1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com)
7 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
38 #include "insn-attr.h"
45 #include "diagnostic-core.h"
50 #include "target-def.h"
51 #include "common/common-target.h"
52 #include "cfglayout.h"
54 #include "langhooks.h"
58 #include "dwarf2out.h"
63 struct processor_costs cypress_costs = {
64 COSTS_N_INSNS (2), /* int load */
65 COSTS_N_INSNS (2), /* int signed load */
66 COSTS_N_INSNS (2), /* int zeroed load */
67 COSTS_N_INSNS (2), /* float load */
68 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
69 COSTS_N_INSNS (5), /* fadd, fsub */
70 COSTS_N_INSNS (1), /* fcmp */
71 COSTS_N_INSNS (1), /* fmov, fmovr */
72 COSTS_N_INSNS (7), /* fmul */
73 COSTS_N_INSNS (37), /* fdivs */
74 COSTS_N_INSNS (37), /* fdivd */
75 COSTS_N_INSNS (63), /* fsqrts */
76 COSTS_N_INSNS (63), /* fsqrtd */
77 COSTS_N_INSNS (1), /* imul */
78 COSTS_N_INSNS (1), /* imulX */
79 0, /* imul bit factor */
80 COSTS_N_INSNS (1), /* idiv */
81 COSTS_N_INSNS (1), /* idivX */
82 COSTS_N_INSNS (1), /* movcc/movr */
83 0, /* shift penalty */
87 struct processor_costs supersparc_costs = {
88 COSTS_N_INSNS (1), /* int load */
89 COSTS_N_INSNS (1), /* int signed load */
90 COSTS_N_INSNS (1), /* int zeroed load */
91 COSTS_N_INSNS (0), /* float load */
92 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
93 COSTS_N_INSNS (3), /* fadd, fsub */
94 COSTS_N_INSNS (3), /* fcmp */
95 COSTS_N_INSNS (1), /* fmov, fmovr */
96 COSTS_N_INSNS (3), /* fmul */
97 COSTS_N_INSNS (6), /* fdivs */
98 COSTS_N_INSNS (9), /* fdivd */
99 COSTS_N_INSNS (12), /* fsqrts */
100 COSTS_N_INSNS (12), /* fsqrtd */
101 COSTS_N_INSNS (4), /* imul */
102 COSTS_N_INSNS (4), /* imulX */
103 0, /* imul bit factor */
104 COSTS_N_INSNS (4), /* idiv */
105 COSTS_N_INSNS (4), /* idivX */
106 COSTS_N_INSNS (1), /* movcc/movr */
107 1, /* shift penalty */
111 struct processor_costs hypersparc_costs = {
112 COSTS_N_INSNS (1), /* int load */
113 COSTS_N_INSNS (1), /* int signed load */
114 COSTS_N_INSNS (1), /* int zeroed load */
115 COSTS_N_INSNS (1), /* float load */
116 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
117 COSTS_N_INSNS (1), /* fadd, fsub */
118 COSTS_N_INSNS (1), /* fcmp */
119 COSTS_N_INSNS (1), /* fmov, fmovr */
120 COSTS_N_INSNS (1), /* fmul */
121 COSTS_N_INSNS (8), /* fdivs */
122 COSTS_N_INSNS (12), /* fdivd */
123 COSTS_N_INSNS (17), /* fsqrts */
124 COSTS_N_INSNS (17), /* fsqrtd */
125 COSTS_N_INSNS (17), /* imul */
126 COSTS_N_INSNS (17), /* imulX */
127 0, /* imul bit factor */
128 COSTS_N_INSNS (17), /* idiv */
129 COSTS_N_INSNS (17), /* idivX */
130 COSTS_N_INSNS (1), /* movcc/movr */
131 0, /* shift penalty */
135 struct processor_costs leon_costs = {
136 COSTS_N_INSNS (1), /* int load */
137 COSTS_N_INSNS (1), /* int signed load */
138 COSTS_N_INSNS (1), /* int zeroed load */
139 COSTS_N_INSNS (1), /* float load */
140 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
141 COSTS_N_INSNS (1), /* fadd, fsub */
142 COSTS_N_INSNS (1), /* fcmp */
143 COSTS_N_INSNS (1), /* fmov, fmovr */
144 COSTS_N_INSNS (1), /* fmul */
145 COSTS_N_INSNS (15), /* fdivs */
146 COSTS_N_INSNS (15), /* fdivd */
147 COSTS_N_INSNS (23), /* fsqrts */
148 COSTS_N_INSNS (23), /* fsqrtd */
149 COSTS_N_INSNS (5), /* imul */
150 COSTS_N_INSNS (5), /* imulX */
151 0, /* imul bit factor */
152 COSTS_N_INSNS (5), /* idiv */
153 COSTS_N_INSNS (5), /* idivX */
154 COSTS_N_INSNS (1), /* movcc/movr */
155 0, /* shift penalty */
159 struct processor_costs sparclet_costs = {
160 COSTS_N_INSNS (3), /* int load */
161 COSTS_N_INSNS (3), /* int signed load */
162 COSTS_N_INSNS (1), /* int zeroed load */
163 COSTS_N_INSNS (1), /* float load */
164 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
165 COSTS_N_INSNS (1), /* fadd, fsub */
166 COSTS_N_INSNS (1), /* fcmp */
167 COSTS_N_INSNS (1), /* fmov, fmovr */
168 COSTS_N_INSNS (1), /* fmul */
169 COSTS_N_INSNS (1), /* fdivs */
170 COSTS_N_INSNS (1), /* fdivd */
171 COSTS_N_INSNS (1), /* fsqrts */
172 COSTS_N_INSNS (1), /* fsqrtd */
173 COSTS_N_INSNS (5), /* imul */
174 COSTS_N_INSNS (5), /* imulX */
175 0, /* imul bit factor */
176 COSTS_N_INSNS (5), /* idiv */
177 COSTS_N_INSNS (5), /* idivX */
178 COSTS_N_INSNS (1), /* movcc/movr */
179 0, /* shift penalty */
183 struct processor_costs ultrasparc_costs = {
184 COSTS_N_INSNS (2), /* int load */
185 COSTS_N_INSNS (3), /* int signed load */
186 COSTS_N_INSNS (2), /* int zeroed load */
187 COSTS_N_INSNS (2), /* float load */
188 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
189 COSTS_N_INSNS (4), /* fadd, fsub */
190 COSTS_N_INSNS (1), /* fcmp */
191 COSTS_N_INSNS (2), /* fmov, fmovr */
192 COSTS_N_INSNS (4), /* fmul */
193 COSTS_N_INSNS (13), /* fdivs */
194 COSTS_N_INSNS (23), /* fdivd */
195 COSTS_N_INSNS (13), /* fsqrts */
196 COSTS_N_INSNS (23), /* fsqrtd */
197 COSTS_N_INSNS (4), /* imul */
198 COSTS_N_INSNS (4), /* imulX */
199 2, /* imul bit factor */
200 COSTS_N_INSNS (37), /* idiv */
201 COSTS_N_INSNS (68), /* idivX */
202 COSTS_N_INSNS (2), /* movcc/movr */
203 2, /* shift penalty */
207 struct processor_costs ultrasparc3_costs = {
208 COSTS_N_INSNS (2), /* int load */
209 COSTS_N_INSNS (3), /* int signed load */
210 COSTS_N_INSNS (3), /* int zeroed load */
211 COSTS_N_INSNS (2), /* float load */
212 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
213 COSTS_N_INSNS (4), /* fadd, fsub */
214 COSTS_N_INSNS (5), /* fcmp */
215 COSTS_N_INSNS (3), /* fmov, fmovr */
216 COSTS_N_INSNS (4), /* fmul */
217 COSTS_N_INSNS (17), /* fdivs */
218 COSTS_N_INSNS (20), /* fdivd */
219 COSTS_N_INSNS (20), /* fsqrts */
220 COSTS_N_INSNS (29), /* fsqrtd */
221 COSTS_N_INSNS (6), /* imul */
222 COSTS_N_INSNS (6), /* imulX */
223 0, /* imul bit factor */
224 COSTS_N_INSNS (40), /* idiv */
225 COSTS_N_INSNS (71), /* idivX */
226 COSTS_N_INSNS (2), /* movcc/movr */
227 0, /* shift penalty */
231 struct processor_costs niagara_costs = {
232 COSTS_N_INSNS (3), /* int load */
233 COSTS_N_INSNS (3), /* int signed load */
234 COSTS_N_INSNS (3), /* int zeroed load */
235 COSTS_N_INSNS (9), /* float load */
236 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
237 COSTS_N_INSNS (8), /* fadd, fsub */
238 COSTS_N_INSNS (26), /* fcmp */
239 COSTS_N_INSNS (8), /* fmov, fmovr */
240 COSTS_N_INSNS (29), /* fmul */
241 COSTS_N_INSNS (54), /* fdivs */
242 COSTS_N_INSNS (83), /* fdivd */
243 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
244 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
245 COSTS_N_INSNS (11), /* imul */
246 COSTS_N_INSNS (11), /* imulX */
247 0, /* imul bit factor */
248 COSTS_N_INSNS (72), /* idiv */
249 COSTS_N_INSNS (72), /* idivX */
250 COSTS_N_INSNS (1), /* movcc/movr */
251 0, /* shift penalty */
255 struct processor_costs niagara2_costs = {
256 COSTS_N_INSNS (3), /* int load */
257 COSTS_N_INSNS (3), /* int signed load */
258 COSTS_N_INSNS (3), /* int zeroed load */
259 COSTS_N_INSNS (3), /* float load */
260 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
261 COSTS_N_INSNS (6), /* fadd, fsub */
262 COSTS_N_INSNS (6), /* fcmp */
263 COSTS_N_INSNS (6), /* fmov, fmovr */
264 COSTS_N_INSNS (6), /* fmul */
265 COSTS_N_INSNS (19), /* fdivs */
266 COSTS_N_INSNS (33), /* fdivd */
267 COSTS_N_INSNS (19), /* fsqrts */
268 COSTS_N_INSNS (33), /* fsqrtd */
269 COSTS_N_INSNS (5), /* imul */
270 COSTS_N_INSNS (5), /* imulX */
271 0, /* imul bit factor */
272 COSTS_N_INSNS (26), /* idiv, average of 12 - 41 cycle range */
273 COSTS_N_INSNS (26), /* idivX, average of 12 - 41 cycle range */
274 COSTS_N_INSNS (1), /* movcc/movr */
275 0, /* shift penalty */
279 struct processor_costs niagara3_costs = {
280 COSTS_N_INSNS (3), /* int load */
281 COSTS_N_INSNS (3), /* int signed load */
282 COSTS_N_INSNS (3), /* int zeroed load */
283 COSTS_N_INSNS (3), /* float load */
284 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
285 COSTS_N_INSNS (9), /* fadd, fsub */
286 COSTS_N_INSNS (9), /* fcmp */
287 COSTS_N_INSNS (9), /* fmov, fmovr */
288 COSTS_N_INSNS (9), /* fmul */
289 COSTS_N_INSNS (23), /* fdivs */
290 COSTS_N_INSNS (37), /* fdivd */
291 COSTS_N_INSNS (23), /* fsqrts */
292 COSTS_N_INSNS (37), /* fsqrtd */
293 COSTS_N_INSNS (9), /* imul */
294 COSTS_N_INSNS (9), /* imulX */
295 0, /* imul bit factor */
296 COSTS_N_INSNS (31), /* idiv, average of 17 - 45 cycle range */
297 COSTS_N_INSNS (30), /* idivX, average of 16 - 44 cycle range */
298 COSTS_N_INSNS (1), /* movcc/movr */
299 0, /* shift penalty */
302 const struct processor_costs *sparc_costs = &cypress_costs;
304 #ifdef HAVE_AS_RELAX_OPTION
305 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
306 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
307 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
308 somebody does not branch between the sethi and jmp. */
309 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
311 #define LEAF_SIBCALL_SLOT_RESERVED_P \
312 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
315 /* Vector to say how input registers are mapped to output registers.
316 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
317 eliminate it. You must use -fomit-frame-pointer to get that. */
318 char leaf_reg_remap[] =
319 { 0, 1, 2, 3, 4, 5, 6, 7,
320 -1, -1, -1, -1, -1, -1, 14, -1,
321 -1, -1, -1, -1, -1, -1, -1, -1,
322 8, 9, 10, 11, 12, 13, -1, 15,
324 32, 33, 34, 35, 36, 37, 38, 39,
325 40, 41, 42, 43, 44, 45, 46, 47,
326 48, 49, 50, 51, 52, 53, 54, 55,
327 56, 57, 58, 59, 60, 61, 62, 63,
328 64, 65, 66, 67, 68, 69, 70, 71,
329 72, 73, 74, 75, 76, 77, 78, 79,
330 80, 81, 82, 83, 84, 85, 86, 87,
331 88, 89, 90, 91, 92, 93, 94, 95,
332 96, 97, 98, 99, 100, 101, 102};
334 /* Vector, indexed by hard register number, which contains 1
335 for a register that is allowable in a candidate for leaf
336 function treatment. */
337 char sparc_leaf_regs[] =
338 { 1, 1, 1, 1, 1, 1, 1, 1,
339 0, 0, 0, 0, 0, 0, 1, 0,
340 0, 0, 0, 0, 0, 0, 0, 0,
341 1, 1, 1, 1, 1, 1, 0, 1,
342 1, 1, 1, 1, 1, 1, 1, 1,
343 1, 1, 1, 1, 1, 1, 1, 1,
344 1, 1, 1, 1, 1, 1, 1, 1,
345 1, 1, 1, 1, 1, 1, 1, 1,
346 1, 1, 1, 1, 1, 1, 1, 1,
347 1, 1, 1, 1, 1, 1, 1, 1,
348 1, 1, 1, 1, 1, 1, 1, 1,
349 1, 1, 1, 1, 1, 1, 1, 1,
350 1, 1, 1, 1, 1, 1, 1};
352 struct GTY(()) machine_function
354 /* Size of the frame of the function. */
355 HOST_WIDE_INT frame_size;
357 /* Size of the frame of the function minus the register window save area
358 and the outgoing argument area. */
359 HOST_WIDE_INT apparent_frame_size;
361 /* Register we pretend the frame pointer is allocated to. Normally, this
362 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
363 record "offset" separately as it may be too big for (reg + disp). */
365 HOST_WIDE_INT frame_base_offset;
367 /* Some local-dynamic TLS symbol name. */
368 const char *some_ld_name;
370 /* Number of global or FP registers to be saved (as 4-byte quantities). */
371 int n_global_fp_regs;
373 /* True if the current function is leaf and uses only leaf regs,
374 so that the SPARC leaf function optimization can be applied.
375 Private version of current_function_uses_only_leaf_regs, see
376 sparc_expand_prologue for the rationale. */
379 /* True if the prologue saves local or in registers. */
380 bool save_local_in_regs_p;
382 /* True if the data calculated by sparc_expand_prologue are valid. */
383 bool prologue_data_valid_p;
386 #define sparc_frame_size cfun->machine->frame_size
387 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
388 #define sparc_frame_base_reg cfun->machine->frame_base_reg
389 #define sparc_frame_base_offset cfun->machine->frame_base_offset
390 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
391 #define sparc_leaf_function_p cfun->machine->leaf_function_p
392 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
393 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
395 /* 1 if the next opcode is to be specially indented. */
396 int sparc_indent_opcode = 0;
398 static void sparc_option_override (void);
399 static void sparc_init_modes (void);
400 static void scan_record_type (const_tree, int *, int *, int *);
401 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
402 const_tree, bool, bool, int *, int *);
404 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
405 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
407 static void sparc_emit_set_const32 (rtx, rtx);
408 static void sparc_emit_set_const64 (rtx, rtx);
409 static void sparc_output_addr_vec (rtx);
410 static void sparc_output_addr_diff_vec (rtx);
411 static void sparc_output_deferred_case_vectors (void);
412 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
413 static bool sparc_legitimate_constant_p (enum machine_mode, rtx);
414 static rtx sparc_builtin_saveregs (void);
415 static int epilogue_renumber (rtx *, int);
416 static bool sparc_assemble_integer (rtx, unsigned int, int);
417 static int set_extends (rtx);
418 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
419 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
420 #ifdef TARGET_SOLARIS
421 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
422 tree) ATTRIBUTE_UNUSED;
424 static int sparc_adjust_cost (rtx, rtx, rtx, int);
425 static int sparc_issue_rate (void);
426 static void sparc_sched_init (FILE *, int, int);
427 static int sparc_use_sched_lookahead (void);
429 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
430 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
431 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
432 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
433 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
435 static bool sparc_function_ok_for_sibcall (tree, tree);
436 static void sparc_init_libfuncs (void);
437 static void sparc_init_builtins (void);
438 static void sparc_vis_init_builtins (void);
439 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
440 static tree sparc_fold_builtin (tree, int, tree *, bool);
441 static int sparc_vis_mul8x16 (int, int);
442 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
443 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
444 HOST_WIDE_INT, tree);
445 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
446 HOST_WIDE_INT, const_tree);
447 static struct machine_function * sparc_init_machine_status (void);
448 static bool sparc_cannot_force_const_mem (enum machine_mode, rtx);
449 static rtx sparc_tls_get_addr (void);
450 static rtx sparc_tls_got (void);
451 static const char *get_some_local_dynamic_name (void);
452 static int get_some_local_dynamic_name_1 (rtx *, void *);
453 static int sparc_register_move_cost (enum machine_mode,
454 reg_class_t, reg_class_t);
455 static bool sparc_rtx_costs (rtx, int, int, int, int *, bool);
456 static rtx sparc_function_value (const_tree, const_tree, bool);
457 static rtx sparc_libcall_value (enum machine_mode, const_rtx);
458 static bool sparc_function_value_regno_p (const unsigned int);
459 static rtx sparc_struct_value_rtx (tree, int);
460 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
461 int *, const_tree, int);
462 static bool sparc_return_in_memory (const_tree, const_tree);
463 static bool sparc_strict_argument_naming (cumulative_args_t);
464 static void sparc_va_start (tree, rtx);
465 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
466 static bool sparc_vector_mode_supported_p (enum machine_mode);
467 static bool sparc_tls_referenced_p (rtx);
468 static rtx sparc_legitimize_tls_address (rtx);
469 static rtx sparc_legitimize_pic_address (rtx, rtx);
470 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
471 static rtx sparc_delegitimize_address (rtx);
472 static bool sparc_mode_dependent_address_p (const_rtx);
473 static bool sparc_pass_by_reference (cumulative_args_t,
474 enum machine_mode, const_tree, bool);
475 static void sparc_function_arg_advance (cumulative_args_t,
476 enum machine_mode, const_tree, bool);
477 static rtx sparc_function_arg_1 (cumulative_args_t,
478 enum machine_mode, const_tree, bool, bool);
479 static rtx sparc_function_arg (cumulative_args_t,
480 enum machine_mode, const_tree, bool);
481 static rtx sparc_function_incoming_arg (cumulative_args_t,
482 enum machine_mode, const_tree, bool);
483 static unsigned int sparc_function_arg_boundary (enum machine_mode,
485 static int sparc_arg_partial_bytes (cumulative_args_t,
486 enum machine_mode, tree, bool);
487 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
488 static void sparc_file_end (void);
489 static bool sparc_frame_pointer_required (void);
490 static bool sparc_can_eliminate (const int, const int);
491 static rtx sparc_builtin_setjmp_frame_value (void);
492 static void sparc_conditional_register_usage (void);
493 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
494 static const char *sparc_mangle_type (const_tree);
496 static void sparc_trampoline_init (rtx, tree, rtx);
497 static enum machine_mode sparc_preferred_simd_mode (enum machine_mode);
498 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
499 static bool sparc_print_operand_punct_valid_p (unsigned char);
500 static void sparc_print_operand (FILE *, rtx, int);
501 static void sparc_print_operand_address (FILE *, rtx);
503 #ifdef SUBTARGET_ATTRIBUTE_TABLE
504 /* Table of valid machine attributes. */
505 static const struct attribute_spec sparc_attribute_table[] =
507 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
509 SUBTARGET_ATTRIBUTE_TABLE,
510 { NULL, 0, 0, false, false, false, NULL, false }
514 /* Option handling. */
517 enum cmodel sparc_cmodel;
519 char sparc_hard_reg_printed[8];
521 /* Initialize the GCC target structure. */
523 /* The default is to use .half rather than .short for aligned HI objects. */
524 #undef TARGET_ASM_ALIGNED_HI_OP
525 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
527 #undef TARGET_ASM_UNALIGNED_HI_OP
528 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
529 #undef TARGET_ASM_UNALIGNED_SI_OP
530 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
531 #undef TARGET_ASM_UNALIGNED_DI_OP
532 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
534 /* The target hook has to handle DI-mode values. */
535 #undef TARGET_ASM_INTEGER
536 #define TARGET_ASM_INTEGER sparc_assemble_integer
538 #undef TARGET_ASM_FUNCTION_PROLOGUE
539 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
540 #undef TARGET_ASM_FUNCTION_EPILOGUE
541 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
543 #undef TARGET_SCHED_ADJUST_COST
544 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
545 #undef TARGET_SCHED_ISSUE_RATE
546 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
547 #undef TARGET_SCHED_INIT
548 #define TARGET_SCHED_INIT sparc_sched_init
549 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
550 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
552 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
553 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
555 #undef TARGET_INIT_LIBFUNCS
556 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
557 #undef TARGET_INIT_BUILTINS
558 #define TARGET_INIT_BUILTINS sparc_init_builtins
560 #undef TARGET_LEGITIMIZE_ADDRESS
561 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
562 #undef TARGET_DELEGITIMIZE_ADDRESS
563 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
564 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
565 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
567 #undef TARGET_EXPAND_BUILTIN
568 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
569 #undef TARGET_FOLD_BUILTIN
570 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
573 #undef TARGET_HAVE_TLS
574 #define TARGET_HAVE_TLS true
577 #undef TARGET_CANNOT_FORCE_CONST_MEM
578 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
580 #undef TARGET_ASM_OUTPUT_MI_THUNK
581 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
582 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
583 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
585 #undef TARGET_RTX_COSTS
586 #define TARGET_RTX_COSTS sparc_rtx_costs
587 #undef TARGET_ADDRESS_COST
588 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
589 #undef TARGET_REGISTER_MOVE_COST
590 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
592 #undef TARGET_PROMOTE_FUNCTION_MODE
593 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
595 #undef TARGET_FUNCTION_VALUE
596 #define TARGET_FUNCTION_VALUE sparc_function_value
597 #undef TARGET_LIBCALL_VALUE
598 #define TARGET_LIBCALL_VALUE sparc_libcall_value
599 #undef TARGET_FUNCTION_VALUE_REGNO_P
600 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
602 #undef TARGET_STRUCT_VALUE_RTX
603 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
604 #undef TARGET_RETURN_IN_MEMORY
605 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
606 #undef TARGET_MUST_PASS_IN_STACK
607 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
608 #undef TARGET_PASS_BY_REFERENCE
609 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
610 #undef TARGET_ARG_PARTIAL_BYTES
611 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
612 #undef TARGET_FUNCTION_ARG_ADVANCE
613 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
614 #undef TARGET_FUNCTION_ARG
615 #define TARGET_FUNCTION_ARG sparc_function_arg
616 #undef TARGET_FUNCTION_INCOMING_ARG
617 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
618 #undef TARGET_FUNCTION_ARG_BOUNDARY
619 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
621 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
622 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
623 #undef TARGET_STRICT_ARGUMENT_NAMING
624 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
626 #undef TARGET_EXPAND_BUILTIN_VA_START
627 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
628 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
629 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
631 #undef TARGET_VECTOR_MODE_SUPPORTED_P
632 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
634 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
635 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
637 #ifdef SUBTARGET_INSERT_ATTRIBUTES
638 #undef TARGET_INSERT_ATTRIBUTES
639 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
642 #ifdef SUBTARGET_ATTRIBUTE_TABLE
643 #undef TARGET_ATTRIBUTE_TABLE
644 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
647 #undef TARGET_RELAXED_ORDERING
648 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
650 #undef TARGET_OPTION_OVERRIDE
651 #define TARGET_OPTION_OVERRIDE sparc_option_override
653 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
654 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
655 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
658 #undef TARGET_ASM_FILE_END
659 #define TARGET_ASM_FILE_END sparc_file_end
661 #undef TARGET_FRAME_POINTER_REQUIRED
662 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
664 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
665 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE sparc_builtin_setjmp_frame_value
667 #undef TARGET_CAN_ELIMINATE
668 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
670 #undef TARGET_PREFERRED_RELOAD_CLASS
671 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
673 #undef TARGET_CONDITIONAL_REGISTER_USAGE
674 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
676 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
677 #undef TARGET_MANGLE_TYPE
678 #define TARGET_MANGLE_TYPE sparc_mangle_type
681 #undef TARGET_LEGITIMATE_ADDRESS_P
682 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
684 #undef TARGET_LEGITIMATE_CONSTANT_P
685 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
687 #undef TARGET_TRAMPOLINE_INIT
688 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
690 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
691 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
692 #undef TARGET_PRINT_OPERAND
693 #define TARGET_PRINT_OPERAND sparc_print_operand
694 #undef TARGET_PRINT_OPERAND_ADDRESS
695 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
697 struct gcc_target targetm = TARGET_INITIALIZER;
699 /* Validate and override various options, and do some machine dependent
703 sparc_option_override (void)
705 static struct code_model {
706 const char *const name;
707 const enum cmodel value;
708 } const cmodels[] = {
710 { "medlow", CM_MEDLOW },
711 { "medmid", CM_MEDMID },
712 { "medany", CM_MEDANY },
713 { "embmedany", CM_EMBMEDANY },
714 { NULL, (enum cmodel) 0 }
716 const struct code_model *cmodel;
717 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
718 static struct cpu_default {
720 const enum processor_type processor;
721 } const cpu_default[] = {
722 /* There must be one entry here for each TARGET_CPU value. */
723 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
724 { TARGET_CPU_v8, PROCESSOR_V8 },
725 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
726 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
727 { TARGET_CPU_leon, PROCESSOR_LEON },
728 { TARGET_CPU_sparclite, PROCESSOR_F930 },
729 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
730 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
731 { TARGET_CPU_v9, PROCESSOR_V9 },
732 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
733 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
734 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
735 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
736 { TARGET_CPU_niagara3, PROCESSOR_NIAGARA3 },
737 { TARGET_CPU_niagara4, PROCESSOR_NIAGARA4 },
740 const struct cpu_default *def;
741 /* Table of values for -m{cpu,tune}=. This must match the order of
742 the PROCESSOR_* enumeration. */
743 static struct cpu_table {
746 } const cpu_table[] = {
749 { MASK_ISA, MASK_V8 },
750 /* TI TMS390Z55 supersparc */
751 { MASK_ISA, MASK_V8 },
752 { MASK_ISA, MASK_V8|MASK_FPU },
754 { MASK_ISA, MASK_V8|MASK_FPU },
755 { MASK_ISA, MASK_SPARCLITE },
756 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
757 { MASK_ISA|MASK_FPU, MASK_SPARCLITE },
758 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
759 { MASK_ISA, MASK_SPARCLITE|MASK_FPU },
760 { MASK_ISA|MASK_FPU, MASK_SPARCLITE },
761 { MASK_ISA, MASK_SPARCLET },
763 { MASK_ISA, MASK_SPARCLET },
764 { MASK_ISA, MASK_V9 },
765 /* UltraSPARC I, II, IIi */
767 /* Although insns using %y are deprecated, it is a clear win. */
768 MASK_V9|MASK_DEPRECATED_V8_INSNS},
770 /* ??? Check if %y issue still holds true. */
772 MASK_V9|MASK_DEPRECATED_V8_INSNS|MASK_VIS2},
775 MASK_V9|MASK_DEPRECATED_V8_INSNS},
777 { MASK_ISA, MASK_V9|MASK_POPC|MASK_VIS2},
779 { MASK_ISA, MASK_V9|MASK_POPC|MASK_VIS2|MASK_VIS3|MASK_FMAF},
781 { MASK_ISA, MASK_V9|MASK_POPC|MASK_VIS2|MASK_VIS3|MASK_FMAF},
783 const struct cpu_table *cpu;
787 #ifdef SUBTARGET_OVERRIDE_OPTIONS
788 SUBTARGET_OVERRIDE_OPTIONS;
791 #ifndef SPARC_BI_ARCH
792 /* Check for unsupported architecture size. */
793 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
794 error ("%s is not supported by this configuration",
795 DEFAULT_ARCH32_P ? "-m64" : "-m32");
798 /* We force all 64bit archs to use 128 bit long double */
799 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
801 error ("-mlong-double-64 not allowed with -m64");
802 target_flags |= MASK_LONG_DOUBLE_128;
805 /* Code model selection. */
806 sparc_cmodel = SPARC_DEFAULT_CMODEL;
810 sparc_cmodel = CM_32;
813 if (sparc_cmodel_string != NULL)
817 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
818 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
820 if (cmodel->name == NULL)
821 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
823 sparc_cmodel = cmodel->value;
826 error ("-mcmodel= is not supported on 32 bit systems");
829 /* Check that -fcall-saved-REG wasn't specified for out registers. */
830 for (i = 8; i < 16; i++)
831 if (!call_used_regs [i])
833 error ("-fcall-saved-REG is not supported for out registers");
834 call_used_regs [i] = 1;
837 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
839 /* Set the default CPU. */
840 if (!global_options_set.x_sparc_cpu_and_features)
842 for (def = &cpu_default[0]; def->cpu != -1; ++def)
843 if (def->cpu == TARGET_CPU_DEFAULT)
845 gcc_assert (def->cpu != -1);
846 sparc_cpu_and_features = def->processor;
848 if (!global_options_set.x_sparc_cpu)
849 sparc_cpu = sparc_cpu_and_features;
851 cpu = &cpu_table[(int) sparc_cpu_and_features];
852 target_flags &= ~cpu->disable;
853 target_flags |= (cpu->enable
854 #ifndef HAVE_AS_FMAF_HPC_VIS3
855 & ~(MASK_FMAF | MASK_VIS3)
859 /* If -mfpu or -mno-fpu was explicitly used, don't override with
860 the processor default. */
861 if (target_flags_explicit & MASK_FPU)
862 target_flags = (target_flags & ~MASK_FPU) | fpu;
864 /* -mvis2 implies -mvis */
866 target_flags |= MASK_VIS;
868 /* -mvis3 implies -mvis2 and -mvis */
870 target_flags |= MASK_VIS2 | MASK_VIS;
872 /* Don't allow -mvis, -mvis2, -mvis3, or -mfmaf if FPU is disabled. */
874 target_flags &= ~(MASK_VIS | MASK_VIS2 | MASK_VIS3 | MASK_FMAF);
876 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
878 -m64 also implies v9. */
879 if (TARGET_VIS || TARGET_ARCH64)
881 target_flags |= MASK_V9;
882 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
885 /* -mvis also implies -mv8plus on 32-bit */
886 if (TARGET_VIS && ! TARGET_ARCH64)
887 target_flags |= MASK_V8PLUS;
889 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
890 if (TARGET_V9 && TARGET_ARCH32)
891 target_flags |= MASK_DEPRECATED_V8_INSNS;
893 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
894 if (! TARGET_V9 || TARGET_ARCH64)
895 target_flags &= ~MASK_V8PLUS;
897 /* Don't use stack biasing in 32 bit mode. */
899 target_flags &= ~MASK_STACK_BIAS;
901 /* Supply a default value for align_functions. */
902 if (align_functions == 0
903 && (sparc_cpu == PROCESSOR_ULTRASPARC
904 || sparc_cpu == PROCESSOR_ULTRASPARC3
905 || sparc_cpu == PROCESSOR_NIAGARA
906 || sparc_cpu == PROCESSOR_NIAGARA2
907 || sparc_cpu == PROCESSOR_NIAGARA3
908 || sparc_cpu == PROCESSOR_NIAGARA4))
909 align_functions = 32;
911 /* Validate PCC_STRUCT_RETURN. */
912 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
913 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
915 /* Only use .uaxword when compiling for a 64-bit target. */
917 targetm.asm_out.unaligned_op.di = NULL;
919 /* Do various machine dependent initializations. */
922 /* Set up function hooks. */
923 init_machine_status = sparc_init_machine_status;
928 case PROCESSOR_CYPRESS:
929 sparc_costs = &cypress_costs;
932 case PROCESSOR_SPARCLITE:
933 case PROCESSOR_SUPERSPARC:
934 sparc_costs = &supersparc_costs;
938 case PROCESSOR_HYPERSPARC:
939 case PROCESSOR_SPARCLITE86X:
940 sparc_costs = &hypersparc_costs;
943 sparc_costs = &leon_costs;
945 case PROCESSOR_SPARCLET:
946 case PROCESSOR_TSC701:
947 sparc_costs = &sparclet_costs;
950 case PROCESSOR_ULTRASPARC:
951 sparc_costs = &ultrasparc_costs;
953 case PROCESSOR_ULTRASPARC3:
954 sparc_costs = &ultrasparc3_costs;
956 case PROCESSOR_NIAGARA:
957 sparc_costs = &niagara_costs;
959 case PROCESSOR_NIAGARA2:
960 sparc_costs = &niagara2_costs;
962 case PROCESSOR_NIAGARA3:
963 case PROCESSOR_NIAGARA4:
964 sparc_costs = &niagara3_costs;
966 case PROCESSOR_NATIVE:
970 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
971 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
972 target_flags |= MASK_LONG_DOUBLE_128;
975 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
976 ((sparc_cpu == PROCESSOR_ULTRASPARC
977 || sparc_cpu == PROCESSOR_NIAGARA
978 || sparc_cpu == PROCESSOR_NIAGARA2
979 || sparc_cpu == PROCESSOR_NIAGARA3
980 || sparc_cpu == PROCESSOR_NIAGARA4)
982 : (sparc_cpu == PROCESSOR_ULTRASPARC3
984 global_options.x_param_values,
985 global_options_set.x_param_values);
986 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
987 ((sparc_cpu == PROCESSOR_ULTRASPARC
988 || sparc_cpu == PROCESSOR_ULTRASPARC3
989 || sparc_cpu == PROCESSOR_NIAGARA
990 || sparc_cpu == PROCESSOR_NIAGARA2
991 || sparc_cpu == PROCESSOR_NIAGARA3
992 || sparc_cpu == PROCESSOR_NIAGARA4)
994 global_options.x_param_values,
995 global_options_set.x_param_values);
997 /* Disable save slot sharing for call-clobbered registers by default.
998 The IRA sharing algorithm works on single registers only and this
999 pessimizes for double floating-point registers. */
1000 if (!global_options_set.x_flag_ira_share_save_slots)
1001 flag_ira_share_save_slots = 0;
1004 /* Miscellaneous utilities. */
1006 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
1007 or branch on register contents instructions. */
1010 v9_regcmp_p (enum rtx_code code)
1012 return (code == EQ || code == NE || code == GE || code == LT
1013 || code == LE || code == GT);
1016 /* Nonzero if OP is a floating point constant which can
1017 be loaded into an integer register using a single
1018 sethi instruction. */
1023 if (GET_CODE (op) == CONST_DOUBLE)
1028 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1029 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1030 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
1036 /* Nonzero if OP is a floating point constant which can
1037 be loaded into an integer register using a single
1043 if (GET_CODE (op) == CONST_DOUBLE)
1048 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1049 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1050 return SPARC_SIMM13_P (i);
1056 /* Nonzero if OP is a floating point constant which can
1057 be loaded into an integer register using a high/losum
1058 instruction sequence. */
1061 fp_high_losum_p (rtx op)
1063 /* The constraints calling this should only be in
1064 SFmode move insns, so any constant which cannot
1065 be moved using a single insn will do. */
1066 if (GET_CODE (op) == CONST_DOUBLE)
1071 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1072 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1073 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
1079 /* Return true if the address of LABEL can be loaded by means of the
1080 mov{si,di}_pic_label_ref patterns in PIC mode. */
1083 can_use_mov_pic_label_ref (rtx label)
1085 /* VxWorks does not impose a fixed gap between segments; the run-time
1086 gap can be different from the object-file gap. We therefore can't
1087 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1088 are absolutely sure that X is in the same segment as the GOT.
1089 Unfortunately, the flexibility of linker scripts means that we
1090 can't be sure of that in general, so assume that GOT-relative
1091 accesses are never valid on VxWorks. */
1092 if (TARGET_VXWORKS_RTP)
1095 /* Similarly, if the label is non-local, it might end up being placed
1096 in a different section than the current one; now mov_pic_label_ref
1097 requires the label and the code to be in the same section. */
1098 if (LABEL_REF_NONLOCAL_P (label))
1101 /* Finally, if we are reordering basic blocks and partition into hot
1102 and cold sections, this might happen for any label. */
1103 if (flag_reorder_blocks_and_partition)
1109 /* Expand a move instruction. Return true if all work is done. */
1112 sparc_expand_move (enum machine_mode mode, rtx *operands)
1114 /* Handle sets of MEM first. */
1115 if (GET_CODE (operands[0]) == MEM)
1117 /* 0 is a register (or a pair of registers) on SPARC. */
1118 if (register_or_zero_operand (operands[1], mode))
1121 if (!reload_in_progress)
1123 operands[0] = validize_mem (operands[0]);
1124 operands[1] = force_reg (mode, operands[1]);
1128 /* Fixup TLS cases. */
1130 && CONSTANT_P (operands[1])
1131 && sparc_tls_referenced_p (operands [1]))
1133 operands[1] = sparc_legitimize_tls_address (operands[1]);
1137 /* Fixup PIC cases. */
1138 if (flag_pic && CONSTANT_P (operands[1]))
1140 if (pic_address_needs_scratch (operands[1]))
1141 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
1143 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
1144 if (GET_CODE (operands[1]) == LABEL_REF
1145 && can_use_mov_pic_label_ref (operands[1]))
1149 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1155 gcc_assert (TARGET_ARCH64);
1156 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1161 if (symbolic_operand (operands[1], mode))
1164 = sparc_legitimize_pic_address (operands[1],
1166 ? operands[0] : NULL_RTX);
1171 /* If we are trying to toss an integer constant into FP registers,
1172 or loading a FP or vector constant, force it into memory. */
1173 if (CONSTANT_P (operands[1])
1174 && REG_P (operands[0])
1175 && (SPARC_FP_REG_P (REGNO (operands[0]))
1176 || SCALAR_FLOAT_MODE_P (mode)
1177 || VECTOR_MODE_P (mode)))
1179 /* emit_group_store will send such bogosity to us when it is
1180 not storing directly into memory. So fix this up to avoid
1181 crashes in output_constant_pool. */
1182 if (operands [1] == const0_rtx)
1183 operands[1] = CONST0_RTX (mode);
1185 /* We can clear or set to all-ones FP registers if TARGET_VIS, and
1186 always other regs. */
1187 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1188 && (const_zero_operand (operands[1], mode)
1189 || const_all_ones_operand (operands[1], mode)))
1192 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1193 /* We are able to build any SF constant in integer registers
1194 with at most 2 instructions. */
1196 /* And any DF constant in integer registers. */
1198 && (reload_completed || reload_in_progress))))
1201 operands[1] = force_const_mem (mode, operands[1]);
1202 if (!reload_in_progress)
1203 operands[1] = validize_mem (operands[1]);
1207 /* Accept non-constants and valid constants unmodified. */
1208 if (!CONSTANT_P (operands[1])
1209 || GET_CODE (operands[1]) == HIGH
1210 || input_operand (operands[1], mode))
1216 /* All QImode constants require only one insn, so proceed. */
1221 sparc_emit_set_const32 (operands[0], operands[1]);
1225 /* input_operand should have filtered out 32-bit mode. */
1226 sparc_emit_set_const64 (operands[0], operands[1]);
1236 /* Load OP1, a 32-bit constant, into OP0, a register.
1237 We know it can't be done in one insn when we get
1238 here, the move expander guarantees this. */
1241 sparc_emit_set_const32 (rtx op0, rtx op1)
1243 enum machine_mode mode = GET_MODE (op0);
1246 if (reload_in_progress || reload_completed)
1249 temp = gen_reg_rtx (mode);
1251 if (GET_CODE (op1) == CONST_INT)
1253 gcc_assert (!small_int_operand (op1, mode)
1254 && !const_high_operand (op1, mode));
1256 /* Emit them as real moves instead of a HIGH/LO_SUM,
1257 this way CSE can see everything and reuse intermediate
1258 values if it wants. */
1259 emit_insn (gen_rtx_SET (VOIDmode, temp,
1260 GEN_INT (INTVAL (op1)
1261 & ~(HOST_WIDE_INT)0x3ff)));
1263 emit_insn (gen_rtx_SET (VOIDmode,
1265 gen_rtx_IOR (mode, temp,
1266 GEN_INT (INTVAL (op1) & 0x3ff))));
1270 /* A symbol, emit in the traditional way. */
1271 emit_insn (gen_rtx_SET (VOIDmode, temp,
1272 gen_rtx_HIGH (mode, op1)));
1273 emit_insn (gen_rtx_SET (VOIDmode,
1274 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1278 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1279 If TEMP is nonzero, we are forbidden to use any other scratch
1280 registers. Otherwise, we are allowed to generate them as needed.
1282 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1283 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1286 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1288 rtx temp1, temp2, temp3, temp4, temp5;
1291 if (temp && GET_MODE (temp) == TImode)
1294 temp = gen_rtx_REG (DImode, REGNO (temp));
1297 /* SPARC-V9 code-model support. */
1298 switch (sparc_cmodel)
1301 /* The range spanned by all instructions in the object is less
1302 than 2^31 bytes (2GB) and the distance from any instruction
1303 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1304 than 2^31 bytes (2GB).
1306 The executable must be in the low 4TB of the virtual address
1309 sethi %hi(symbol), %temp1
1310 or %temp1, %lo(symbol), %reg */
1312 temp1 = temp; /* op0 is allowed. */
1314 temp1 = gen_reg_rtx (DImode);
1316 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1317 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1321 /* The range spanned by all instructions in the object is less
1322 than 2^31 bytes (2GB) and the distance from any instruction
1323 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1324 than 2^31 bytes (2GB).
1326 The executable must be in the low 16TB of the virtual address
1329 sethi %h44(symbol), %temp1
1330 or %temp1, %m44(symbol), %temp2
1331 sllx %temp2, 12, %temp3
1332 or %temp3, %l44(symbol), %reg */
1337 temp3 = temp; /* op0 is allowed. */
1341 temp1 = gen_reg_rtx (DImode);
1342 temp2 = gen_reg_rtx (DImode);
1343 temp3 = gen_reg_rtx (DImode);
1346 emit_insn (gen_seth44 (temp1, op1));
1347 emit_insn (gen_setm44 (temp2, temp1, op1));
1348 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1349 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1350 emit_insn (gen_setl44 (op0, temp3, op1));
1354 /* The range spanned by all instructions in the object is less
1355 than 2^31 bytes (2GB) and the distance from any instruction
1356 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1357 than 2^31 bytes (2GB).
1359 The executable can be placed anywhere in the virtual address
1362 sethi %hh(symbol), %temp1
1363 sethi %lm(symbol), %temp2
1364 or %temp1, %hm(symbol), %temp3
1365 sllx %temp3, 32, %temp4
1366 or %temp4, %temp2, %temp5
1367 or %temp5, %lo(symbol), %reg */
1370 /* It is possible that one of the registers we got for operands[2]
1371 might coincide with that of operands[0] (which is why we made
1372 it TImode). Pick the other one to use as our scratch. */
1373 if (rtx_equal_p (temp, op0))
1375 gcc_assert (ti_temp);
1376 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1379 temp2 = temp; /* op0 is _not_ allowed, see above. */
1386 temp1 = gen_reg_rtx (DImode);
1387 temp2 = gen_reg_rtx (DImode);
1388 temp3 = gen_reg_rtx (DImode);
1389 temp4 = gen_reg_rtx (DImode);
1390 temp5 = gen_reg_rtx (DImode);
1393 emit_insn (gen_sethh (temp1, op1));
1394 emit_insn (gen_setlm (temp2, op1));
1395 emit_insn (gen_sethm (temp3, temp1, op1));
1396 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1397 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1398 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1399 gen_rtx_PLUS (DImode, temp4, temp2)));
1400 emit_insn (gen_setlo (op0, temp5, op1));
1404 /* Old old old backwards compatibility kruft here.
1405 Essentially it is MEDLOW with a fixed 64-bit
1406 virtual base added to all data segment addresses.
1407 Text-segment stuff is computed like MEDANY, we can't
1408 reuse the code above because the relocation knobs
1411 Data segment: sethi %hi(symbol), %temp1
1412 add %temp1, EMBMEDANY_BASE_REG, %temp2
1413 or %temp2, %lo(symbol), %reg */
1414 if (data_segment_operand (op1, GET_MODE (op1)))
1418 temp1 = temp; /* op0 is allowed. */
1423 temp1 = gen_reg_rtx (DImode);
1424 temp2 = gen_reg_rtx (DImode);
1427 emit_insn (gen_embmedany_sethi (temp1, op1));
1428 emit_insn (gen_embmedany_brsum (temp2, temp1));
1429 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1432 /* Text segment: sethi %uhi(symbol), %temp1
1433 sethi %hi(symbol), %temp2
1434 or %temp1, %ulo(symbol), %temp3
1435 sllx %temp3, 32, %temp4
1436 or %temp4, %temp2, %temp5
1437 or %temp5, %lo(symbol), %reg */
1442 /* It is possible that one of the registers we got for operands[2]
1443 might coincide with that of operands[0] (which is why we made
1444 it TImode). Pick the other one to use as our scratch. */
1445 if (rtx_equal_p (temp, op0))
1447 gcc_assert (ti_temp);
1448 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1451 temp2 = temp; /* op0 is _not_ allowed, see above. */
1458 temp1 = gen_reg_rtx (DImode);
1459 temp2 = gen_reg_rtx (DImode);
1460 temp3 = gen_reg_rtx (DImode);
1461 temp4 = gen_reg_rtx (DImode);
1462 temp5 = gen_reg_rtx (DImode);
1465 emit_insn (gen_embmedany_textuhi (temp1, op1));
1466 emit_insn (gen_embmedany_texthi (temp2, op1));
1467 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1468 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1469 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1470 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1471 gen_rtx_PLUS (DImode, temp4, temp2)));
1472 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1481 #if HOST_BITS_PER_WIDE_INT == 32
1483 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1488 /* These avoid problems when cross compiling. If we do not
1489 go through all this hair then the optimizer will see
1490 invalid REG_EQUAL notes or in some cases none at all. */
1491 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1492 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1493 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1494 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1496 /* The optimizer is not to assume anything about exactly
1497 which bits are set for a HIGH, they are unspecified.
1498 Unfortunately this leads to many missed optimizations
1499 during CSE. We mask out the non-HIGH bits, and matches
1500 a plain movdi, to alleviate this problem. */
1502 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1504 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1508 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1510 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1514 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1516 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1520 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1522 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1525 /* Worker routines for 64-bit constant formation on arch64.
1526 One of the key things to be doing in these emissions is
1527 to create as many temp REGs as possible. This makes it
1528 possible for half-built constants to be used later when
1529 such values are similar to something required later on.
1530 Without doing this, the optimizer cannot see such
1533 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1534 unsigned HOST_WIDE_INT, int);
1537 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1538 unsigned HOST_WIDE_INT low_bits, int is_neg)
1540 unsigned HOST_WIDE_INT high_bits;
1543 high_bits = (~low_bits) & 0xffffffff;
1545 high_bits = low_bits;
1547 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1550 emit_insn (gen_rtx_SET (VOIDmode, op0,
1551 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1555 /* If we are XOR'ing with -1, then we should emit a one's complement
1556 instead. This way the combiner will notice logical operations
1557 such as ANDN later on and substitute. */
1558 if ((low_bits & 0x3ff) == 0x3ff)
1560 emit_insn (gen_rtx_SET (VOIDmode, op0,
1561 gen_rtx_NOT (DImode, temp)));
1565 emit_insn (gen_rtx_SET (VOIDmode, op0,
1566 gen_safe_XOR64 (temp,
1567 (-(HOST_WIDE_INT)0x400
1568 | (low_bits & 0x3ff)))));
1573 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1574 unsigned HOST_WIDE_INT, int);
1577 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1578 unsigned HOST_WIDE_INT high_bits,
1579 unsigned HOST_WIDE_INT low_immediate,
1584 if ((high_bits & 0xfffffc00) != 0)
1586 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1587 if ((high_bits & ~0xfffffc00) != 0)
1588 emit_insn (gen_rtx_SET (VOIDmode, op0,
1589 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1595 emit_insn (gen_safe_SET64 (temp, high_bits));
1599 /* Now shift it up into place. */
1600 emit_insn (gen_rtx_SET (VOIDmode, op0,
1601 gen_rtx_ASHIFT (DImode, temp2,
1602 GEN_INT (shift_count))));
1604 /* If there is a low immediate part piece, finish up by
1605 putting that in as well. */
1606 if (low_immediate != 0)
1607 emit_insn (gen_rtx_SET (VOIDmode, op0,
1608 gen_safe_OR64 (op0, low_immediate)));
1611 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1612 unsigned HOST_WIDE_INT);
1614 /* Full 64-bit constant decomposition. Even though this is the
1615 'worst' case, we still optimize a few things away. */
1617 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1618 unsigned HOST_WIDE_INT high_bits,
1619 unsigned HOST_WIDE_INT low_bits)
1623 if (reload_in_progress || reload_completed)
1626 sub_temp = gen_reg_rtx (DImode);
1628 if ((high_bits & 0xfffffc00) != 0)
1630 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1631 if ((high_bits & ~0xfffffc00) != 0)
1632 emit_insn (gen_rtx_SET (VOIDmode,
1634 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1640 emit_insn (gen_safe_SET64 (temp, high_bits));
1644 if (!reload_in_progress && !reload_completed)
1646 rtx temp2 = gen_reg_rtx (DImode);
1647 rtx temp3 = gen_reg_rtx (DImode);
1648 rtx temp4 = gen_reg_rtx (DImode);
1650 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1651 gen_rtx_ASHIFT (DImode, sub_temp,
1654 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1655 if ((low_bits & ~0xfffffc00) != 0)
1657 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1658 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1659 emit_insn (gen_rtx_SET (VOIDmode, op0,
1660 gen_rtx_PLUS (DImode, temp4, temp3)));
1664 emit_insn (gen_rtx_SET (VOIDmode, op0,
1665 gen_rtx_PLUS (DImode, temp4, temp2)));
1670 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1671 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1672 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1675 /* We are in the middle of reload, so this is really
1676 painful. However we do still make an attempt to
1677 avoid emitting truly stupid code. */
1678 if (low1 != const0_rtx)
1680 emit_insn (gen_rtx_SET (VOIDmode, op0,
1681 gen_rtx_ASHIFT (DImode, sub_temp,
1682 GEN_INT (to_shift))));
1683 emit_insn (gen_rtx_SET (VOIDmode, op0,
1684 gen_rtx_IOR (DImode, op0, low1)));
1692 if (low2 != const0_rtx)
1694 emit_insn (gen_rtx_SET (VOIDmode, op0,
1695 gen_rtx_ASHIFT (DImode, sub_temp,
1696 GEN_INT (to_shift))));
1697 emit_insn (gen_rtx_SET (VOIDmode, op0,
1698 gen_rtx_IOR (DImode, op0, low2)));
1706 emit_insn (gen_rtx_SET (VOIDmode, op0,
1707 gen_rtx_ASHIFT (DImode, sub_temp,
1708 GEN_INT (to_shift))));
1709 if (low3 != const0_rtx)
1710 emit_insn (gen_rtx_SET (VOIDmode, op0,
1711 gen_rtx_IOR (DImode, op0, low3)));
1716 /* Analyze a 64-bit constant for certain properties. */
1717 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1718 unsigned HOST_WIDE_INT,
1719 int *, int *, int *);
1722 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1723 unsigned HOST_WIDE_INT low_bits,
1724 int *hbsp, int *lbsp, int *abbasp)
1726 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1729 lowest_bit_set = highest_bit_set = -1;
1733 if ((lowest_bit_set == -1)
1734 && ((low_bits >> i) & 1))
1736 if ((highest_bit_set == -1)
1737 && ((high_bits >> (32 - i - 1)) & 1))
1738 highest_bit_set = (64 - i - 1);
1741 && ((highest_bit_set == -1)
1742 || (lowest_bit_set == -1)));
1748 if ((lowest_bit_set == -1)
1749 && ((high_bits >> i) & 1))
1750 lowest_bit_set = i + 32;
1751 if ((highest_bit_set == -1)
1752 && ((low_bits >> (32 - i - 1)) & 1))
1753 highest_bit_set = 32 - i - 1;
1756 && ((highest_bit_set == -1)
1757 || (lowest_bit_set == -1)));
1759 /* If there are no bits set this should have gone out
1760 as one instruction! */
1761 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1762 all_bits_between_are_set = 1;
1763 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1767 if ((low_bits & (1 << i)) != 0)
1772 if ((high_bits & (1 << (i - 32))) != 0)
1775 all_bits_between_are_set = 0;
1778 *hbsp = highest_bit_set;
1779 *lbsp = lowest_bit_set;
1780 *abbasp = all_bits_between_are_set;
1783 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1786 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1787 unsigned HOST_WIDE_INT low_bits)
1789 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1792 || high_bits == 0xffffffff)
1795 analyze_64bit_constant (high_bits, low_bits,
1796 &highest_bit_set, &lowest_bit_set,
1797 &all_bits_between_are_set);
1799 if ((highest_bit_set == 63
1800 || lowest_bit_set == 0)
1801 && all_bits_between_are_set != 0)
1804 if ((highest_bit_set - lowest_bit_set) < 21)
1810 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1811 unsigned HOST_WIDE_INT,
1814 static unsigned HOST_WIDE_INT
1815 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1816 unsigned HOST_WIDE_INT low_bits,
1817 int lowest_bit_set, int shift)
1819 HOST_WIDE_INT hi, lo;
1821 if (lowest_bit_set < 32)
1823 lo = (low_bits >> lowest_bit_set) << shift;
1824 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1829 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1831 gcc_assert (! (hi & lo));
1835 /* Here we are sure to be arch64 and this is an integer constant
1836 being loaded into a register. Emit the most efficient
1837 insn sequence possible. Detection of all the 1-insn cases
1838 has been done already. */
1840 sparc_emit_set_const64 (rtx op0, rtx op1)
1842 unsigned HOST_WIDE_INT high_bits, low_bits;
1843 int lowest_bit_set, highest_bit_set;
1844 int all_bits_between_are_set;
1847 /* Sanity check that we know what we are working with. */
1848 gcc_assert (TARGET_ARCH64
1849 && (GET_CODE (op0) == SUBREG
1850 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1852 if (reload_in_progress || reload_completed)
1855 if (GET_CODE (op1) != CONST_INT)
1857 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1862 temp = gen_reg_rtx (DImode);
1864 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1865 low_bits = (INTVAL (op1) & 0xffffffff);
1867 /* low_bits bits 0 --> 31
1868 high_bits bits 32 --> 63 */
1870 analyze_64bit_constant (high_bits, low_bits,
1871 &highest_bit_set, &lowest_bit_set,
1872 &all_bits_between_are_set);
1874 /* First try for a 2-insn sequence. */
1876 /* These situations are preferred because the optimizer can
1877 * do more things with them:
1879 * sllx %reg, shift, %reg
1881 * srlx %reg, shift, %reg
1882 * 3) mov some_small_const, %reg
1883 * sllx %reg, shift, %reg
1885 if (((highest_bit_set == 63
1886 || lowest_bit_set == 0)
1887 && all_bits_between_are_set != 0)
1888 || ((highest_bit_set - lowest_bit_set) < 12))
1890 HOST_WIDE_INT the_const = -1;
1891 int shift = lowest_bit_set;
1893 if ((highest_bit_set != 63
1894 && lowest_bit_set != 0)
1895 || all_bits_between_are_set == 0)
1898 create_simple_focus_bits (high_bits, low_bits,
1901 else if (lowest_bit_set == 0)
1902 shift = -(63 - highest_bit_set);
1904 gcc_assert (SPARC_SIMM13_P (the_const));
1905 gcc_assert (shift != 0);
1907 emit_insn (gen_safe_SET64 (temp, the_const));
1909 emit_insn (gen_rtx_SET (VOIDmode,
1911 gen_rtx_ASHIFT (DImode,
1915 emit_insn (gen_rtx_SET (VOIDmode,
1917 gen_rtx_LSHIFTRT (DImode,
1919 GEN_INT (-shift))));
1923 /* Now a range of 22 or less bits set somewhere.
1924 * 1) sethi %hi(focus_bits), %reg
1925 * sllx %reg, shift, %reg
1926 * 2) sethi %hi(focus_bits), %reg
1927 * srlx %reg, shift, %reg
1929 if ((highest_bit_set - lowest_bit_set) < 21)
1931 unsigned HOST_WIDE_INT focus_bits =
1932 create_simple_focus_bits (high_bits, low_bits,
1933 lowest_bit_set, 10);
1935 gcc_assert (SPARC_SETHI_P (focus_bits));
1936 gcc_assert (lowest_bit_set != 10);
1938 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1940 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1941 if (lowest_bit_set < 10)
1942 emit_insn (gen_rtx_SET (VOIDmode,
1944 gen_rtx_LSHIFTRT (DImode, temp,
1945 GEN_INT (10 - lowest_bit_set))));
1946 else if (lowest_bit_set > 10)
1947 emit_insn (gen_rtx_SET (VOIDmode,
1949 gen_rtx_ASHIFT (DImode, temp,
1950 GEN_INT (lowest_bit_set - 10))));
1954 /* 1) sethi %hi(low_bits), %reg
1955 * or %reg, %lo(low_bits), %reg
1956 * 2) sethi %hi(~low_bits), %reg
1957 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1960 || high_bits == 0xffffffff)
1962 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1963 (high_bits == 0xffffffff));
1967 /* Now, try 3-insn sequences. */
1969 /* 1) sethi %hi(high_bits), %reg
1970 * or %reg, %lo(high_bits), %reg
1971 * sllx %reg, 32, %reg
1975 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1979 /* We may be able to do something quick
1980 when the constant is negated, so try that. */
1981 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1982 (~low_bits) & 0xfffffc00))
1984 /* NOTE: The trailing bits get XOR'd so we need the
1985 non-negated bits, not the negated ones. */
1986 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1988 if ((((~high_bits) & 0xffffffff) == 0
1989 && ((~low_bits) & 0x80000000) == 0)
1990 || (((~high_bits) & 0xffffffff) == 0xffffffff
1991 && ((~low_bits) & 0x80000000) != 0))
1993 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1995 if ((SPARC_SETHI_P (fast_int)
1996 && (~high_bits & 0xffffffff) == 0)
1997 || SPARC_SIMM13_P (fast_int))
1998 emit_insn (gen_safe_SET64 (temp, fast_int));
2000 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
2005 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
2006 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
2007 sparc_emit_set_const64 (temp, negated_const);
2010 /* If we are XOR'ing with -1, then we should emit a one's complement
2011 instead. This way the combiner will notice logical operations
2012 such as ANDN later on and substitute. */
2013 if (trailing_bits == 0x3ff)
2015 emit_insn (gen_rtx_SET (VOIDmode, op0,
2016 gen_rtx_NOT (DImode, temp)));
2020 emit_insn (gen_rtx_SET (VOIDmode,
2022 gen_safe_XOR64 (temp,
2023 (-0x400 | trailing_bits))));
2028 /* 1) sethi %hi(xxx), %reg
2029 * or %reg, %lo(xxx), %reg
2030 * sllx %reg, yyy, %reg
2032 * ??? This is just a generalized version of the low_bits==0
2033 * thing above, FIXME...
2035 if ((highest_bit_set - lowest_bit_set) < 32)
2037 unsigned HOST_WIDE_INT focus_bits =
2038 create_simple_focus_bits (high_bits, low_bits,
2041 /* We can't get here in this state. */
2042 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
2044 /* So what we know is that the set bits straddle the
2045 middle of the 64-bit word. */
2046 sparc_emit_set_const64_quick2 (op0, temp,
2052 /* 1) sethi %hi(high_bits), %reg
2053 * or %reg, %lo(high_bits), %reg
2054 * sllx %reg, 32, %reg
2055 * or %reg, low_bits, %reg
2057 if (SPARC_SIMM13_P(low_bits)
2058 && ((int)low_bits > 0))
2060 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
2064 /* The easiest way when all else fails, is full decomposition. */
2065 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
2067 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
2069 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2070 return the mode to be used for the comparison. For floating-point,
2071 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
2072 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2073 processing is needed. */
2076 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
2078 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2104 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2105 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2107 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2108 return CCX_NOOVmode;
2114 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2121 /* Emit the compare insn and return the CC reg for a CODE comparison
2122 with operands X and Y. */
2125 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2127 enum machine_mode mode;
2130 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2133 mode = SELECT_CC_MODE (code, x, y);
2135 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2136 fcc regs (cse can't tell they're really call clobbered regs and will
2137 remove a duplicate comparison even if there is an intervening function
2138 call - it will then try to reload the cc reg via an int reg which is why
2139 we need the movcc patterns). It is possible to provide the movcc
2140 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2141 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2142 to tell cse that CCFPE mode registers (even pseudos) are call
2145 /* ??? This is an experiment. Rather than making changes to cse which may
2146 or may not be easy/clean, we do our own cse. This is possible because
2147 we will generate hard registers. Cse knows they're call clobbered (it
2148 doesn't know the same thing about pseudos). If we guess wrong, no big
2149 deal, but if we win, great! */
2151 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2152 #if 1 /* experiment */
2155 /* We cycle through the registers to ensure they're all exercised. */
2156 static int next_fcc_reg = 0;
2157 /* Previous x,y for each fcc reg. */
2158 static rtx prev_args[4][2];
2160 /* Scan prev_args for x,y. */
2161 for (reg = 0; reg < 4; reg++)
2162 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2167 prev_args[reg][0] = x;
2168 prev_args[reg][1] = y;
2169 next_fcc_reg = (next_fcc_reg + 1) & 3;
2171 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2174 cc_reg = gen_reg_rtx (mode);
2175 #endif /* ! experiment */
2176 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2177 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2179 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2181 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2182 will only result in an unrecognizable insn so no point in asserting. */
2183 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2189 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2192 gen_compare_reg (rtx cmp)
2194 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2197 /* This function is used for v9 only.
2198 DEST is the target of the Scc insn.
2199 CODE is the code for an Scc's comparison.
2200 X and Y are the values we compare.
2202 This function is needed to turn
2205 (gt (reg:CCX 100 %icc)
2209 (gt:DI (reg:CCX 100 %icc)
2212 IE: The instruction recognizer needs to see the mode of the comparison to
2213 find the right instruction. We could use "gt:DI" right in the
2214 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2217 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2220 && (GET_MODE (x) == DImode
2221 || GET_MODE (dest) == DImode))
2224 /* Try to use the movrCC insns. */
2226 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2228 && v9_regcmp_p (compare_code))
2233 /* Special case for op0 != 0. This can be done with one instruction if
2236 if (compare_code == NE
2237 && GET_MODE (dest) == DImode
2238 && rtx_equal_p (op0, dest))
2240 emit_insn (gen_rtx_SET (VOIDmode, dest,
2241 gen_rtx_IF_THEN_ELSE (DImode,
2242 gen_rtx_fmt_ee (compare_code, DImode,
2249 if (reg_overlap_mentioned_p (dest, op0))
2251 /* Handle the case where dest == x.
2252 We "early clobber" the result. */
2253 op0 = gen_reg_rtx (GET_MODE (x));
2254 emit_move_insn (op0, x);
2257 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2258 if (GET_MODE (op0) != DImode)
2260 temp = gen_reg_rtx (DImode);
2261 convert_move (temp, op0, 0);
2265 emit_insn (gen_rtx_SET (VOIDmode, dest,
2266 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2267 gen_rtx_fmt_ee (compare_code, DImode,
2275 x = gen_compare_reg_1 (compare_code, x, y);
2278 gcc_assert (GET_MODE (x) != CC_NOOVmode
2279 && GET_MODE (x) != CCX_NOOVmode);
2281 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2282 emit_insn (gen_rtx_SET (VOIDmode, dest,
2283 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2284 gen_rtx_fmt_ee (compare_code,
2285 GET_MODE (x), x, y),
2286 const1_rtx, dest)));
2292 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2293 without jumps using the addx/subx instructions. */
2296 emit_scc_insn (rtx operands[])
2303 /* The quad-word fp compare library routines all return nonzero to indicate
2304 true, which is different from the equivalent libgcc routines, so we must
2305 handle them specially here. */
2306 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2308 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2309 GET_CODE (operands[1]));
2310 operands[2] = XEXP (operands[1], 0);
2311 operands[3] = XEXP (operands[1], 1);
2314 code = GET_CODE (operands[1]);
2318 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2319 more applications). The exception to this is "reg != 0" which can
2320 be done in one instruction on v9 (so we do it). */
2323 if (GET_MODE (x) == SImode)
2325 rtx pat = gen_seqsi_special (operands[0], x, y);
2329 else if (GET_MODE (x) == DImode)
2331 rtx pat = gen_seqdi_special (operands[0], x, y);
2339 if (GET_MODE (x) == SImode)
2341 rtx pat = gen_snesi_special (operands[0], x, y);
2345 else if (GET_MODE (x) == DImode)
2347 rtx pat = gen_snedi_special (operands[0], x, y);
2353 /* For the rest, on v9 we can use conditional moves. */
2357 if (gen_v9_scc (operands[0], code, x, y))
2361 /* We can do LTU and GEU using the addx/subx instructions too. And
2362 for GTU/LEU, if both operands are registers swap them and fall
2363 back to the easy case. */
2364 if (code == GTU || code == LEU)
2366 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2367 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2372 code = swap_condition (code);
2376 if (code == LTU || code == GEU)
2378 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2379 gen_rtx_fmt_ee (code, SImode,
2380 gen_compare_reg_1 (code, x, y),
2385 /* Nope, do branches. */
2389 /* Emit a conditional jump insn for the v9 architecture using comparison code
2390 CODE and jump target LABEL.
2391 This function exists to take advantage of the v9 brxx insns. */
2394 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2396 emit_jump_insn (gen_rtx_SET (VOIDmode,
2398 gen_rtx_IF_THEN_ELSE (VOIDmode,
2399 gen_rtx_fmt_ee (code, GET_MODE (op0),
2401 gen_rtx_LABEL_REF (VOIDmode, label),
2406 emit_conditional_branch_insn (rtx operands[])
2408 /* The quad-word fp compare library routines all return nonzero to indicate
2409 true, which is different from the equivalent libgcc routines, so we must
2410 handle them specially here. */
2411 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2413 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2414 GET_CODE (operands[0]));
2415 operands[1] = XEXP (operands[0], 0);
2416 operands[2] = XEXP (operands[0], 1);
2419 if (TARGET_ARCH64 && operands[2] == const0_rtx
2420 && GET_CODE (operands[1]) == REG
2421 && GET_MODE (operands[1]) == DImode)
2423 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2427 operands[1] = gen_compare_reg (operands[0]);
2428 operands[2] = const0_rtx;
2429 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2430 operands[1], operands[2]);
2431 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2436 /* Generate a DFmode part of a hard TFmode register.
2437 REG is the TFmode hard register, LOW is 1 for the
2438 low 64bit of the register and 0 otherwise.
2441 gen_df_reg (rtx reg, int low)
2443 int regno = REGNO (reg);
2445 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2446 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2447 return gen_rtx_REG (DFmode, regno);
2450 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2451 Unlike normal calls, TFmode operands are passed by reference. It is
2452 assumed that no more than 3 operands are required. */
2455 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2457 rtx ret_slot = NULL, arg[3], func_sym;
2460 /* We only expect to be called for conversions, unary, and binary ops. */
2461 gcc_assert (nargs == 2 || nargs == 3);
2463 for (i = 0; i < nargs; ++i)
2465 rtx this_arg = operands[i];
2468 /* TFmode arguments and return values are passed by reference. */
2469 if (GET_MODE (this_arg) == TFmode)
2471 int force_stack_temp;
2473 force_stack_temp = 0;
2474 if (TARGET_BUGGY_QP_LIB && i == 0)
2475 force_stack_temp = 1;
2477 if (GET_CODE (this_arg) == MEM
2478 && ! force_stack_temp)
2479 this_arg = XEXP (this_arg, 0);
2480 else if (CONSTANT_P (this_arg)
2481 && ! force_stack_temp)
2483 this_slot = force_const_mem (TFmode, this_arg);
2484 this_arg = XEXP (this_slot, 0);
2488 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2490 /* Operand 0 is the return value. We'll copy it out later. */
2492 emit_move_insn (this_slot, this_arg);
2494 ret_slot = this_slot;
2496 this_arg = XEXP (this_slot, 0);
2503 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2505 if (GET_MODE (operands[0]) == TFmode)
2508 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2509 arg[0], GET_MODE (arg[0]),
2510 arg[1], GET_MODE (arg[1]));
2512 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2513 arg[0], GET_MODE (arg[0]),
2514 arg[1], GET_MODE (arg[1]),
2515 arg[2], GET_MODE (arg[2]));
2518 emit_move_insn (operands[0], ret_slot);
2524 gcc_assert (nargs == 2);
2526 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2527 GET_MODE (operands[0]), 1,
2528 arg[1], GET_MODE (arg[1]));
2530 if (ret != operands[0])
2531 emit_move_insn (operands[0], ret);
2535 /* Expand soft-float TFmode calls to sparc abi routines. */
2538 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2560 emit_soft_tfmode_libcall (func, 3, operands);
2564 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2568 gcc_assert (code == SQRT);
2571 emit_soft_tfmode_libcall (func, 2, operands);
2575 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2582 switch (GET_MODE (operands[1]))
2595 case FLOAT_TRUNCATE:
2596 switch (GET_MODE (operands[0]))
2610 switch (GET_MODE (operands[1]))
2615 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2625 case UNSIGNED_FLOAT:
2626 switch (GET_MODE (operands[1]))
2631 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2642 switch (GET_MODE (operands[0]))
2656 switch (GET_MODE (operands[0]))
2673 emit_soft_tfmode_libcall (func, 2, operands);
2676 /* Expand a hard-float tfmode operation. All arguments must be in
2680 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2684 if (GET_RTX_CLASS (code) == RTX_UNARY)
2686 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2687 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2691 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2692 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2693 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2694 operands[1], operands[2]);
2697 if (register_operand (operands[0], VOIDmode))
2700 dest = gen_reg_rtx (GET_MODE (operands[0]));
2702 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2704 if (dest != operands[0])
2705 emit_move_insn (operands[0], dest);
2709 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2711 if (TARGET_HARD_QUAD)
2712 emit_hard_tfmode_operation (code, operands);
2714 emit_soft_tfmode_binop (code, operands);
2718 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2720 if (TARGET_HARD_QUAD)
2721 emit_hard_tfmode_operation (code, operands);
2723 emit_soft_tfmode_unop (code, operands);
2727 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2729 if (TARGET_HARD_QUAD)
2730 emit_hard_tfmode_operation (code, operands);
2732 emit_soft_tfmode_cvt (code, operands);
2735 /* Return nonzero if a branch/jump/call instruction will be emitting
2736 nop into its delay slot. */
2739 empty_delay_slot (rtx insn)
2743 /* If no previous instruction (should not happen), return true. */
2744 if (PREV_INSN (insn) == NULL)
2747 seq = NEXT_INSN (PREV_INSN (insn));
2748 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2754 /* Return nonzero if TRIAL can go into the call delay slot. */
2757 tls_call_delay (rtx trial)
2762 call __tls_get_addr, %tgd_call (foo)
2763 add %l7, %o0, %o0, %tgd_add (foo)
2764 while Sun as/ld does not. */
2765 if (TARGET_GNU_TLS || !TARGET_TLS)
2768 pat = PATTERN (trial);
2770 /* We must reject tgd_add{32|64}, i.e.
2771 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2772 and tldm_add{32|64}, i.e.
2773 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2775 if (GET_CODE (pat) == SET
2776 && GET_CODE (SET_SRC (pat)) == PLUS)
2778 rtx unspec = XEXP (SET_SRC (pat), 1);
2780 if (GET_CODE (unspec) == UNSPEC
2781 && (XINT (unspec, 1) == UNSPEC_TLSGD
2782 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2789 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2790 instruction. RETURN_P is true if the v9 variant 'return' is to be
2791 considered in the test too.
2793 TRIAL must be a SET whose destination is a REG appropriate for the
2794 'restore' instruction or, if RETURN_P is true, for the 'return'
2798 eligible_for_restore_insn (rtx trial, bool return_p)
2800 rtx pat = PATTERN (trial);
2801 rtx src = SET_SRC (pat);
2803 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2804 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2805 && arith_operand (src, GET_MODE (src)))
2808 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2810 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2813 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2814 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2815 && arith_double_operand (src, GET_MODE (src)))
2816 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2818 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2819 else if (! TARGET_FPU && register_operand (src, SFmode))
2822 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2823 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2826 /* If we have the 'return' instruction, anything that does not use
2827 local or output registers and can go into a delay slot wins. */
2830 && !epilogue_renumber (&pat, 1)
2831 && get_attr_in_uncond_branch_delay (trial)
2832 == IN_UNCOND_BRANCH_DELAY_TRUE)
2835 /* The 'restore src1,src2,dest' pattern for SImode. */
2836 else if (GET_CODE (src) == PLUS
2837 && register_operand (XEXP (src, 0), SImode)
2838 && arith_operand (XEXP (src, 1), SImode))
2841 /* The 'restore src1,src2,dest' pattern for DImode. */
2842 else if (GET_CODE (src) == PLUS
2843 && register_operand (XEXP (src, 0), DImode)
2844 && arith_double_operand (XEXP (src, 1), DImode))
2847 /* The 'restore src1,%lo(src2),dest' pattern. */
2848 else if (GET_CODE (src) == LO_SUM
2849 && ! TARGET_CM_MEDMID
2850 && ((register_operand (XEXP (src, 0), SImode)
2851 && immediate_operand (XEXP (src, 1), SImode))
2853 && register_operand (XEXP (src, 0), DImode)
2854 && immediate_operand (XEXP (src, 1), DImode))))
2857 /* The 'restore src,src,dest' pattern. */
2858 else if (GET_CODE (src) == ASHIFT
2859 && (register_operand (XEXP (src, 0), SImode)
2860 || register_operand (XEXP (src, 0), DImode))
2861 && XEXP (src, 1) == const1_rtx)
2867 /* Return nonzero if TRIAL can go into the function return's delay slot. */
2870 eligible_for_return_delay (rtx trial)
2875 if (GET_CODE (trial) != INSN)
2878 if (get_attr_length (trial) != 1)
2881 /* If the function uses __builtin_eh_return, the eh_return machinery
2882 occupies the delay slot. */
2883 if (crtl->calls_eh_return)
2886 /* In the case of a leaf or flat function, anything can go into the slot. */
2887 if (sparc_leaf_function_p || TARGET_FLAT)
2889 get_attr_in_uncond_branch_delay (trial) == IN_UNCOND_BRANCH_DELAY_TRUE;
2891 pat = PATTERN (trial);
2892 if (GET_CODE (pat) == PARALLEL)
2898 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
2900 rtx expr = XVECEXP (pat, 0, i);
2901 if (GET_CODE (expr) != SET)
2903 if (GET_CODE (SET_DEST (expr)) != REG)
2905 regno = REGNO (SET_DEST (expr));
2906 if (regno >= 8 && regno < 24)
2909 return !epilogue_renumber (&pat, 1)
2910 && (get_attr_in_uncond_branch_delay (trial)
2911 == IN_UNCOND_BRANCH_DELAY_TRUE);
2914 if (GET_CODE (pat) != SET)
2917 if (GET_CODE (SET_DEST (pat)) != REG)
2920 regno = REGNO (SET_DEST (pat));
2922 /* Otherwise, only operations which can be done in tandem with
2923 a `restore' or `return' insn can go into the delay slot. */
2924 if (regno >= 8 && regno < 24)
2927 /* If this instruction sets up floating point register and we have a return
2928 instruction, it can probably go in. But restore will not work
2932 && !epilogue_renumber (&pat, 1)
2933 && get_attr_in_uncond_branch_delay (trial)
2934 == IN_UNCOND_BRANCH_DELAY_TRUE);
2936 return eligible_for_restore_insn (trial, true);
2939 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
2942 eligible_for_sibcall_delay (rtx trial)
2946 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2949 if (get_attr_length (trial) != 1)
2952 pat = PATTERN (trial);
2954 if (sparc_leaf_function_p || TARGET_FLAT)
2956 /* If the tail call is done using the call instruction,
2957 we have to restore %o7 in the delay slot. */
2958 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2961 /* %g1 is used to build the function address */
2962 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2968 /* Otherwise, only operations which can be done in tandem with
2969 a `restore' insn can go into the delay slot. */
2970 if (GET_CODE (SET_DEST (pat)) != REG
2971 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2972 || REGNO (SET_DEST (pat)) >= 32)
2975 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2977 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2980 return eligible_for_restore_insn (trial, false);
2984 short_branch (int uid1, int uid2)
2986 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2988 /* Leave a few words of "slop". */
2989 if (delta >= -1023 && delta <= 1022)
2995 /* Return nonzero if REG is not used after INSN.
2996 We assume REG is a reload reg, and therefore does
2997 not live past labels or calls or jumps. */
2999 reg_unused_after (rtx reg, rtx insn)
3001 enum rtx_code code, prev_code = UNKNOWN;
3003 while ((insn = NEXT_INSN (insn)))
3005 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
3008 code = GET_CODE (insn);
3009 if (GET_CODE (insn) == CODE_LABEL)
3014 rtx set = single_set (insn);
3015 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
3018 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
3020 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
3028 /* Determine if it's legal to put X into the constant pool. This
3029 is not possible if X contains the address of a symbol that is
3030 not constant (TLS) or not known at final link time (PIC). */
3033 sparc_cannot_force_const_mem (enum machine_mode mode, rtx x)
3035 switch (GET_CODE (x))
3040 /* Accept all non-symbolic constants. */
3044 /* Labels are OK iff we are non-PIC. */
3045 return flag_pic != 0;
3048 /* 'Naked' TLS symbol references are never OK,
3049 non-TLS symbols are OK iff we are non-PIC. */
3050 if (SYMBOL_REF_TLS_MODEL (x))
3053 return flag_pic != 0;
3056 return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
3059 return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
3060 || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
3068 /* Global Offset Table support. */
3069 static GTY(()) rtx got_helper_rtx = NULL_RTX;
3070 static GTY(()) rtx global_offset_table_rtx = NULL_RTX;
3072 /* Return the SYMBOL_REF for the Global Offset Table. */
3074 static GTY(()) rtx sparc_got_symbol = NULL_RTX;
3079 if (!sparc_got_symbol)
3080 sparc_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3082 return sparc_got_symbol;
3085 /* Ensure that we are not using patterns that are not OK with PIC. */
3095 op = recog_data.operand[i];
3096 gcc_assert (GET_CODE (op) != SYMBOL_REF
3097 && (GET_CODE (op) != CONST
3098 || (GET_CODE (XEXP (op, 0)) == MINUS
3099 && XEXP (XEXP (op, 0), 0) == sparc_got ()
3100 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
3107 /* Return true if X is an address which needs a temporary register when
3108 reloaded while generating PIC code. */
3111 pic_address_needs_scratch (rtx x)
3113 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
3114 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
3115 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
3116 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3117 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
3123 /* Determine if a given RTX is a valid constant. We already know this
3124 satisfies CONSTANT_P. */
3127 sparc_legitimate_constant_p (enum machine_mode mode, rtx x)
3129 switch (GET_CODE (x))
3133 if (sparc_tls_referenced_p (x))
3138 if (GET_MODE (x) == VOIDmode)
3141 /* Floating point constants are generally not ok.
3142 The only exception is 0.0 and all-ones in VIS. */
3144 && SCALAR_FLOAT_MODE_P (mode)
3145 && (const_zero_operand (x, mode)
3146 || const_all_ones_operand (x, mode)))
3152 /* Vector constants are generally not ok.
3153 The only exception is 0 or -1 in VIS. */
3155 && (const_zero_operand (x, mode)
3156 || const_all_ones_operand (x, mode)))
3168 /* Determine if a given RTX is a valid constant address. */
3171 constant_address_p (rtx x)
3173 switch (GET_CODE (x))
3181 if (flag_pic && pic_address_needs_scratch (x))
3183 return sparc_legitimate_constant_p (Pmode, x);
3186 return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
3193 /* Nonzero if the constant value X is a legitimate general operand
3194 when generating PIC code. It is given that flag_pic is on and
3195 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3198 legitimate_pic_operand_p (rtx x)
3200 if (pic_address_needs_scratch (x))
3202 if (sparc_tls_referenced_p (x))
3207 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
3209 && INTVAL (X) >= -0x1000 \
3210 && INTVAL (X) < (0x1000 - GET_MODE_SIZE (MODE)))
3212 #define RTX_OK_FOR_OLO10_P(X, MODE) \
3214 && INTVAL (X) >= -0x1000 \
3215 && INTVAL (X) < (0xc00 - GET_MODE_SIZE (MODE)))
3217 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
3219 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
3220 ordinarily. This changes a bit when generating PIC. */
3223 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3225 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3227 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3229 else if (GET_CODE (addr) == PLUS)
3231 rs1 = XEXP (addr, 0);
3232 rs2 = XEXP (addr, 1);
3234 /* Canonicalize. REG comes first, if there are no regs,
3235 LO_SUM comes first. */
3237 && GET_CODE (rs1) != SUBREG
3239 || GET_CODE (rs2) == SUBREG
3240 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3242 rs1 = XEXP (addr, 1);
3243 rs2 = XEXP (addr, 0);
3247 && rs1 == pic_offset_table_rtx
3249 && GET_CODE (rs2) != SUBREG
3250 && GET_CODE (rs2) != LO_SUM
3251 && GET_CODE (rs2) != MEM
3252 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
3253 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3254 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3256 || GET_CODE (rs1) == SUBREG)
3257 && RTX_OK_FOR_OFFSET_P (rs2, mode)))
3262 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3263 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3265 /* We prohibit REG + REG for TFmode when there are no quad move insns
3266 and we consequently need to split. We do this because REG+REG
3267 is not an offsettable address. If we get the situation in reload
3268 where source and destination of a movtf pattern are both MEMs with
3269 REG+REG address, then only one of them gets converted to an
3270 offsettable address. */
3272 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3275 /* We prohibit REG + REG on ARCH32 if not optimizing for
3276 DFmode/DImode because then mem_min_alignment is likely to be zero
3277 after reload and the forced split would lack a matching splitter
3279 if (TARGET_ARCH32 && !optimize
3280 && (mode == DFmode || mode == DImode))
3283 else if (USE_AS_OFFSETABLE_LO10
3284 && GET_CODE (rs1) == LO_SUM
3286 && ! TARGET_CM_MEDMID
3287 && RTX_OK_FOR_OLO10_P (rs2, mode))
3290 imm1 = XEXP (rs1, 1);
3291 rs1 = XEXP (rs1, 0);
3292 if (!CONSTANT_P (imm1)
3293 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3297 else if (GET_CODE (addr) == LO_SUM)
3299 rs1 = XEXP (addr, 0);
3300 imm1 = XEXP (addr, 1);
3302 if (!CONSTANT_P (imm1)
3303 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3306 /* We can't allow TFmode in 32-bit mode, because an offset greater
3307 than the alignment (8) may cause the LO_SUM to overflow. */
3308 if (mode == TFmode && TARGET_ARCH32)
3311 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3316 if (GET_CODE (rs1) == SUBREG)
3317 rs1 = SUBREG_REG (rs1);
3323 if (GET_CODE (rs2) == SUBREG)
3324 rs2 = SUBREG_REG (rs2);
3331 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3332 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3337 if ((REGNO (rs1) >= 32
3338 && REGNO (rs1) != FRAME_POINTER_REGNUM
3339 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3341 && (REGNO (rs2) >= 32
3342 && REGNO (rs2) != FRAME_POINTER_REGNUM
3343 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3349 /* Return the SYMBOL_REF for the tls_get_addr function. */
3351 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
3354 sparc_tls_get_addr (void)
3356 if (!sparc_tls_symbol)
3357 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3359 return sparc_tls_symbol;
3362 /* Return the Global Offset Table to be used in TLS mode. */
3365 sparc_tls_got (void)
3367 /* In PIC mode, this is just the PIC offset table. */
3370 crtl->uses_pic_offset_table = 1;
3371 return pic_offset_table_rtx;
3374 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
3375 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
3376 if (TARGET_SUN_TLS && TARGET_ARCH32)
3378 load_got_register ();
3379 return global_offset_table_rtx;
3382 /* In all other cases, we load a new pseudo with the GOT symbol. */
3383 return copy_to_reg (sparc_got ());
3386 /* Return true if X contains a thread-local symbol. */
3389 sparc_tls_referenced_p (rtx x)
3391 if (!TARGET_HAVE_TLS)
3394 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3395 x = XEXP (XEXP (x, 0), 0);
3397 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
3400 /* That's all we handle in sparc_legitimize_tls_address for now. */
3404 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3405 this (thread-local) address. */
3408 sparc_legitimize_tls_address (rtx addr)
3410 rtx temp1, temp2, temp3, ret, o0, got, insn;
3412 gcc_assert (can_create_pseudo_p ());
3414 if (GET_CODE (addr) == SYMBOL_REF)
3415 switch (SYMBOL_REF_TLS_MODEL (addr))
3417 case TLS_MODEL_GLOBAL_DYNAMIC:
3419 temp1 = gen_reg_rtx (SImode);
3420 temp2 = gen_reg_rtx (SImode);
3421 ret = gen_reg_rtx (Pmode);
3422 o0 = gen_rtx_REG (Pmode, 8);
3423 got = sparc_tls_got ();
3424 emit_insn (gen_tgd_hi22 (temp1, addr));
3425 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3428 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3429 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3434 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3435 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3438 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3439 insn = get_insns ();
3441 emit_libcall_block (insn, ret, o0, addr);
3444 case TLS_MODEL_LOCAL_DYNAMIC:
3446 temp1 = gen_reg_rtx (SImode);
3447 temp2 = gen_reg_rtx (SImode);
3448 temp3 = gen_reg_rtx (Pmode);
3449 ret = gen_reg_rtx (Pmode);
3450 o0 = gen_rtx_REG (Pmode, 8);
3451 got = sparc_tls_got ();
3452 emit_insn (gen_tldm_hi22 (temp1));
3453 emit_insn (gen_tldm_lo10 (temp2, temp1));
3456 emit_insn (gen_tldm_add32 (o0, got, temp2));
3457 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3462 emit_insn (gen_tldm_add64 (o0, got, temp2));
3463 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3466 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3467 insn = get_insns ();
3469 emit_libcall_block (insn, temp3, o0,
3470 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3471 UNSPEC_TLSLD_BASE));
3472 temp1 = gen_reg_rtx (SImode);
3473 temp2 = gen_reg_rtx (SImode);
3474 emit_insn (gen_tldo_hix22 (temp1, addr));
3475 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3477 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3479 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3482 case TLS_MODEL_INITIAL_EXEC:
3483 temp1 = gen_reg_rtx (SImode);
3484 temp2 = gen_reg_rtx (SImode);
3485 temp3 = gen_reg_rtx (Pmode);
3486 got = sparc_tls_got ();
3487 emit_insn (gen_tie_hi22 (temp1, addr));
3488 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3490 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3492 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3495 ret = gen_reg_rtx (Pmode);
3497 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3500 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3504 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3507 case TLS_MODEL_LOCAL_EXEC:
3508 temp1 = gen_reg_rtx (Pmode);
3509 temp2 = gen_reg_rtx (Pmode);
3512 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3513 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3517 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3518 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3520 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3527 else if (GET_CODE (addr) == CONST)
3531 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
3533 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
3534 offset = XEXP (XEXP (addr, 0), 1);
3536 base = force_operand (base, NULL_RTX);
3537 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
3538 offset = force_reg (Pmode, offset);
3539 ret = gen_rtx_PLUS (Pmode, base, offset);
3543 gcc_unreachable (); /* for now ... */
3548 /* Legitimize PIC addresses. If the address is already position-independent,
3549 we return ORIG. Newly generated position-independent addresses go into a
3550 reg. This is REG if nonzero, otherwise we allocate register(s) as
3554 sparc_legitimize_pic_address (rtx orig, rtx reg)
3556 bool gotdata_op = false;
3558 if (GET_CODE (orig) == SYMBOL_REF
3559 /* See the comment in sparc_expand_move. */
3560 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
3562 rtx pic_ref, address;
3567 gcc_assert (! reload_in_progress && ! reload_completed);
3568 reg = gen_reg_rtx (Pmode);
3573 /* If not during reload, allocate another temp reg here for loading
3574 in the address, so that these instructions can be optimized
3576 rtx temp_reg = ((reload_in_progress || reload_completed)
3577 ? reg : gen_reg_rtx (Pmode));
3579 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3580 won't get confused into thinking that these two instructions
3581 are loading in the true address of the symbol. If in the
3582 future a PIC rtx exists, that should be used instead. */
3585 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3586 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3590 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3591 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3599 crtl->uses_pic_offset_table = 1;
3603 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
3604 pic_offset_table_rtx,
3607 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
3608 pic_offset_table_rtx,
3614 = gen_const_mem (Pmode,
3615 gen_rtx_PLUS (Pmode,
3616 pic_offset_table_rtx, address));
3617 insn = emit_move_insn (reg, pic_ref);
3620 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3622 set_unique_reg_note (insn, REG_EQUAL, orig);
3625 else if (GET_CODE (orig) == CONST)
3629 if (GET_CODE (XEXP (orig, 0)) == PLUS
3630 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3635 gcc_assert (! reload_in_progress && ! reload_completed);
3636 reg = gen_reg_rtx (Pmode);
3639 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3640 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
3641 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
3642 base == reg ? NULL_RTX : reg);
3644 if (GET_CODE (offset) == CONST_INT)
3646 if (SMALL_INT (offset))
3647 return plus_constant (base, INTVAL (offset));
3648 else if (! reload_in_progress && ! reload_completed)
3649 offset = force_reg (Pmode, offset);
3651 /* If we reach here, then something is seriously wrong. */
3654 return gen_rtx_PLUS (Pmode, base, offset);
3656 else if (GET_CODE (orig) == LABEL_REF)
3657 /* ??? We ought to be checking that the register is live instead, in case
3658 it is eliminated. */
3659 crtl->uses_pic_offset_table = 1;
3664 /* Try machine-dependent ways of modifying an illegitimate address X
3665 to be legitimate. If we find one, return the new, valid address.
3667 OLDX is the address as it was before break_out_memory_refs was called.
3668 In some cases it is useful to look at this to decide what needs to be done.
3670 MODE is the mode of the operand pointed to by X.
3672 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3675 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3676 enum machine_mode mode)
3680 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3681 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3682 force_operand (XEXP (x, 0), NULL_RTX));
3683 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3684 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3685 force_operand (XEXP (x, 1), NULL_RTX));
3686 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3687 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3689 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3690 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3691 force_operand (XEXP (x, 1), NULL_RTX));
3693 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3696 if (sparc_tls_referenced_p (x))
3697 x = sparc_legitimize_tls_address (x);
3699 x = sparc_legitimize_pic_address (x, NULL_RTX);
3700 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3701 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3702 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3703 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3704 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3705 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3706 else if (GET_CODE (x) == SYMBOL_REF
3707 || GET_CODE (x) == CONST
3708 || GET_CODE (x) == LABEL_REF)
3709 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3714 /* Delegitimize an address that was legitimized by the above function. */
3717 sparc_delegitimize_address (rtx x)
3719 x = delegitimize_mem_from_attrs (x);
3721 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 1)) == UNSPEC)
3722 switch (XINT (XEXP (x, 1), 1))
3724 case UNSPEC_MOVE_PIC:
3726 x = XVECEXP (XEXP (x, 1), 0, 0);
3727 gcc_assert (GET_CODE (x) == SYMBOL_REF);
3733 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
3734 if (GET_CODE (x) == MINUS
3735 && REG_P (XEXP (x, 0))
3736 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
3737 && GET_CODE (XEXP (x, 1)) == LO_SUM
3738 && GET_CODE (XEXP (XEXP (x, 1), 1)) == UNSPEC
3739 && XINT (XEXP (XEXP (x, 1), 1), 1) == UNSPEC_MOVE_PIC_LABEL)
3741 x = XVECEXP (XEXP (XEXP (x, 1), 1), 0, 0);
3742 gcc_assert (GET_CODE (x) == LABEL_REF);
3748 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
3749 replace the input X, or the original X if no replacement is called for.
3750 The output parameter *WIN is 1 if the calling macro should goto WIN,
3753 For SPARC, we wish to handle addresses by splitting them into
3754 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
3755 This cuts the number of extra insns by one.
3757 Do nothing when generating PIC code and the address is a symbolic
3758 operand or requires a scratch register. */
3761 sparc_legitimize_reload_address (rtx x, enum machine_mode mode,
3762 int opnum, int type,
3763 int ind_levels ATTRIBUTE_UNUSED, int *win)
3765 /* Decompose SImode constants into HIGH+LO_SUM. */
3767 && (mode != TFmode || TARGET_ARCH64)
3768 && GET_MODE (x) == SImode
3769 && GET_CODE (x) != LO_SUM
3770 && GET_CODE (x) != HIGH
3771 && sparc_cmodel <= CM_MEDLOW
3773 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
3775 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
3776 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3777 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3778 opnum, (enum reload_type)type);
3783 /* We have to recognize what we have already generated above. */
3784 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
3786 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3787 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3788 opnum, (enum reload_type)type);
3797 /* Return true if ADDR (a legitimate address expression)
3798 has an effect that depends on the machine mode it is used for.
3804 is not equivalent to
3806 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
3808 because [%l7+a+1] is interpreted as the address of (a+1). */
3812 sparc_mode_dependent_address_p (const_rtx addr)
3814 if (flag_pic && GET_CODE (addr) == PLUS)
3816 rtx op0 = XEXP (addr, 0);
3817 rtx op1 = XEXP (addr, 1);
3818 if (op0 == pic_offset_table_rtx
3819 && symbolic_operand (op1, VOIDmode))
3826 #ifdef HAVE_GAS_HIDDEN
3827 # define USE_HIDDEN_LINKONCE 1
3829 # define USE_HIDDEN_LINKONCE 0
3833 get_pc_thunk_name (char name[32], unsigned int regno)
3835 const char *reg_name = reg_names[regno];
3837 /* Skip the leading '%' as that cannot be used in a
3841 if (USE_HIDDEN_LINKONCE)
3842 sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
3844 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
3847 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
3850 gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2, rtx op3)
3852 int orig_flag_pic = flag_pic;
3855 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
3858 insn = gen_load_pcrel_symdi (op0, op1, op2, op3);
3860 insn = gen_load_pcrel_symsi (op0, op1, op2, op3);
3861 flag_pic = orig_flag_pic;
3866 /* Emit code to load the GOT register. */
3869 load_got_register (void)
3871 /* In PIC mode, this will retrieve pic_offset_table_rtx. */
3872 if (!global_offset_table_rtx)
3873 global_offset_table_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
3875 if (TARGET_VXWORKS_RTP)
3876 emit_insn (gen_vxworks_load_got ());
3879 /* The GOT symbol is subject to a PC-relative relocation so we need a
3880 helper function to add the PC value and thus get the final value. */
3881 if (!got_helper_rtx)
3884 get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
3885 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
3888 emit_insn (gen_load_pcrel_sym (global_offset_table_rtx, sparc_got (),
3890 GEN_INT (GLOBAL_OFFSET_TABLE_REGNUM)));
3893 /* Need to emit this whether or not we obey regdecls,
3894 since setjmp/longjmp can cause life info to screw up.
3895 ??? In the case where we don't obey regdecls, this is not sufficient
3896 since we may not fall out the bottom. */
3897 emit_use (global_offset_table_rtx);
3900 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3901 address of the call target. */
3904 sparc_emit_call_insn (rtx pat, rtx addr)
3908 insn = emit_call_insn (pat);
3910 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3911 if (TARGET_VXWORKS_RTP
3913 && GET_CODE (addr) == SYMBOL_REF
3914 && (SYMBOL_REF_DECL (addr)
3915 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3916 : !SYMBOL_REF_LOCAL_P (addr)))
3918 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3919 crtl->uses_pic_offset_table = 1;
3923 /* Return 1 if RTX is a MEM which is known to be aligned to at
3924 least a DESIRED byte boundary. */
3927 mem_min_alignment (rtx mem, int desired)
3929 rtx addr, base, offset;
3931 /* If it's not a MEM we can't accept it. */
3932 if (GET_CODE (mem) != MEM)
3936 if (!TARGET_UNALIGNED_DOUBLES
3937 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3940 /* ??? The rest of the function predates MEM_ALIGN so
3941 there is probably a bit of redundancy. */
3942 addr = XEXP (mem, 0);
3943 base = offset = NULL_RTX;
3944 if (GET_CODE (addr) == PLUS)
3946 if (GET_CODE (XEXP (addr, 0)) == REG)
3948 base = XEXP (addr, 0);
3950 /* What we are saying here is that if the base
3951 REG is aligned properly, the compiler will make
3952 sure any REG based index upon it will be so
3954 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3955 offset = XEXP (addr, 1);
3957 offset = const0_rtx;
3960 else if (GET_CODE (addr) == REG)
3963 offset = const0_rtx;
3966 if (base != NULL_RTX)
3968 int regno = REGNO (base);
3970 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3972 /* Check if the compiler has recorded some information
3973 about the alignment of the base REG. If reload has
3974 completed, we already matched with proper alignments.
3975 If not running global_alloc, reload might give us
3976 unaligned pointer to local stack though. */
3978 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3979 || (optimize && reload_completed))
3980 && (INTVAL (offset) & (desired - 1)) == 0)
3985 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3989 else if (! TARGET_UNALIGNED_DOUBLES
3990 || CONSTANT_P (addr)
3991 || GET_CODE (addr) == LO_SUM)
3993 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3994 is true, in which case we can only assume that an access is aligned if
3995 it is to a constant address, or the address involves a LO_SUM. */
3999 /* An obviously unaligned address. */
4004 /* Vectors to keep interesting information about registers where it can easily
4005 be got. We used to use the actual mode value as the bit number, but there
4006 are more than 32 modes now. Instead we use two tables: one indexed by
4007 hard register number, and one indexed by mode. */
4009 /* The purpose of sparc_mode_class is to shrink the range of modes so that
4010 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
4011 mapped into one sparc_mode_class mode. */
4013 enum sparc_mode_class {
4014 S_MODE, D_MODE, T_MODE, O_MODE,
4015 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
4019 /* Modes for single-word and smaller quantities. */
4020 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
4022 /* Modes for double-word and smaller quantities. */
4023 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
4025 /* Modes for quad-word and smaller quantities. */
4026 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
4028 /* Modes for 8-word and smaller quantities. */
4029 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
4031 /* Modes for single-float quantities. We must allow any single word or
4032 smaller quantity. This is because the fix/float conversion instructions
4033 take integer inputs/outputs from the float registers. */
4034 #define SF_MODES (S_MODES)
4036 /* Modes for double-float and smaller quantities. */
4037 #define DF_MODES (D_MODES)
4039 /* Modes for quad-float and smaller quantities. */
4040 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
4042 /* Modes for quad-float pairs and smaller quantities. */
4043 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
4045 /* Modes for double-float only quantities. */
4046 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
4048 /* Modes for quad-float and double-float only quantities. */
4049 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
4051 /* Modes for quad-float pairs and double-float only quantities. */
4052 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
4054 /* Modes for condition codes. */
4055 #define CC_MODES (1 << (int) CC_MODE)
4056 #define CCFP_MODES (1 << (int) CCFP_MODE)
4058 /* Value is 1 if register/mode pair is acceptable on sparc.
4059 The funny mixture of D and T modes is because integer operations
4060 do not specially operate on tetra quantities, so non-quad-aligned
4061 registers can hold quadword quantities (except %o4 and %i4 because
4062 they cross fixed registers). */
4064 /* This points to either the 32 bit or the 64 bit version. */
4065 const int *hard_regno_mode_classes;
4067 static const int hard_32bit_mode_classes[] = {
4068 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4069 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4070 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4071 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4073 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4074 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4075 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4076 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4078 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4079 and none can hold SFmode/SImode values. */
4080 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4081 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4082 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4083 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4086 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4088 /* %icc, %sfp, %gsr */
4089 CC_MODES, 0, D_MODES
4092 static const int hard_64bit_mode_classes[] = {
4093 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4094 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4095 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4096 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4098 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4099 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4100 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4101 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4103 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4104 and none can hold SFmode/SImode values. */
4105 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4106 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4107 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4108 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4111 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4113 /* %icc, %sfp, %gsr */
4114 CC_MODES, 0, D_MODES
4117 int sparc_mode_class [NUM_MACHINE_MODES];
4119 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
4122 sparc_init_modes (void)
4126 for (i = 0; i < NUM_MACHINE_MODES; i++)
4128 switch (GET_MODE_CLASS (i))
4131 case MODE_PARTIAL_INT:
4132 case MODE_COMPLEX_INT:
4133 if (GET_MODE_SIZE (i) <= 4)
4134 sparc_mode_class[i] = 1 << (int) S_MODE;
4135 else if (GET_MODE_SIZE (i) == 8)
4136 sparc_mode_class[i] = 1 << (int) D_MODE;
4137 else if (GET_MODE_SIZE (i) == 16)
4138 sparc_mode_class[i] = 1 << (int) T_MODE;
4139 else if (GET_MODE_SIZE (i) == 32)
4140 sparc_mode_class[i] = 1 << (int) O_MODE;
4142 sparc_mode_class[i] = 0;
4144 case MODE_VECTOR_INT:
4145 if (GET_MODE_SIZE (i) <= 4)
4146 sparc_mode_class[i] = 1 << (int)SF_MODE;
4147 else if (GET_MODE_SIZE (i) == 8)
4148 sparc_mode_class[i] = 1 << (int)DF_MODE;
4151 case MODE_COMPLEX_FLOAT:
4152 if (GET_MODE_SIZE (i) <= 4)
4153 sparc_mode_class[i] = 1 << (int) SF_MODE;
4154 else if (GET_MODE_SIZE (i) == 8)
4155 sparc_mode_class[i] = 1 << (int) DF_MODE;
4156 else if (GET_MODE_SIZE (i) == 16)
4157 sparc_mode_class[i] = 1 << (int) TF_MODE;
4158 else if (GET_MODE_SIZE (i) == 32)
4159 sparc_mode_class[i] = 1 << (int) OF_MODE;
4161 sparc_mode_class[i] = 0;
4164 if (i == (int) CCFPmode || i == (int) CCFPEmode)
4165 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
4167 sparc_mode_class[i] = 1 << (int) CC_MODE;
4170 sparc_mode_class[i] = 0;
4176 hard_regno_mode_classes = hard_64bit_mode_classes;
4178 hard_regno_mode_classes = hard_32bit_mode_classes;
4180 /* Initialize the array used by REGNO_REG_CLASS. */
4181 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4183 if (i < 16 && TARGET_V8PLUS)
4184 sparc_regno_reg_class[i] = I64_REGS;
4185 else if (i < 32 || i == FRAME_POINTER_REGNUM)
4186 sparc_regno_reg_class[i] = GENERAL_REGS;
4188 sparc_regno_reg_class[i] = FP_REGS;
4190 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
4192 sparc_regno_reg_class[i] = FPCC_REGS;
4194 sparc_regno_reg_class[i] = NO_REGS;
4198 /* Return whether REGNO, a global or FP register, must be saved/restored. */
4201 save_global_or_fp_reg_p (unsigned int regno,
4202 int leaf_function ATTRIBUTE_UNUSED)
4204 return !call_used_regs[regno] && df_regs_ever_live_p (regno);
4207 /* Return whether the return address register (%i7) is needed. */
4210 return_addr_reg_needed_p (int leaf_function)
4212 /* If it is live, for example because of __builtin_return_address (0). */
4213 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM))
4216 /* Otherwise, it is needed as save register if %o7 is clobbered. */
4218 /* Loading the GOT register clobbers %o7. */
4219 || crtl->uses_pic_offset_table
4220 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM))
4226 /* Return whether REGNO, a local or in register, must be saved/restored. */
4229 save_local_or_in_reg_p (unsigned int regno, int leaf_function)
4231 /* General case: call-saved registers live at some point. */
4232 if (!call_used_regs[regno] && df_regs_ever_live_p (regno))
4235 /* Frame pointer register (%fp) if needed. */
4236 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
4239 /* Return address register (%i7) if needed. */
4240 if (regno == RETURN_ADDR_REGNUM && return_addr_reg_needed_p (leaf_function))
4243 /* GOT register (%l7) if needed. */
4244 if (regno == PIC_OFFSET_TABLE_REGNUM && crtl->uses_pic_offset_table)
4247 /* If the function accesses prior frames, the frame pointer and the return
4248 address of the previous frame must be saved on the stack. */
4249 if (crtl->accesses_prior_frames
4250 && (regno == HARD_FRAME_POINTER_REGNUM || regno == RETURN_ADDR_REGNUM))
4256 /* Compute the frame size required by the function. This function is called
4257 during the reload pass and also by sparc_expand_prologue. */
4260 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function)
4262 HOST_WIDE_INT frame_size, apparent_frame_size;
4263 int args_size, n_global_fp_regs = 0;
4264 bool save_local_in_regs_p = false;
4267 /* If the function allocates dynamic stack space, the dynamic offset is
4268 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
4269 if (leaf_function && !cfun->calls_alloca)
4272 args_size = crtl->outgoing_args_size + REG_PARM_STACK_SPACE (cfun->decl);
4274 /* Calculate space needed for global registers. */
4276 for (i = 0; i < 8; i++)
4277 if (save_global_or_fp_reg_p (i, 0))
4278 n_global_fp_regs += 2;
4280 for (i = 0; i < 8; i += 2)
4281 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4282 n_global_fp_regs += 2;
4284 /* In the flat window model, find out which local and in registers need to
4285 be saved. We don't reserve space in the current frame for them as they
4286 will be spilled into the register window save area of the caller's frame.
4287 However, as soon as we use this register window save area, we must create
4288 that of the current frame to make it the live one. */
4290 for (i = 16; i < 32; i++)
4291 if (save_local_or_in_reg_p (i, leaf_function))
4293 save_local_in_regs_p = true;
4297 /* Calculate space needed for FP registers. */
4298 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
4299 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4300 n_global_fp_regs += 2;
4303 && n_global_fp_regs == 0
4305 && !save_local_in_regs_p)
4306 frame_size = apparent_frame_size = 0;
4309 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
4310 apparent_frame_size = (size - STARTING_FRAME_OFFSET + 7) & -8;
4311 apparent_frame_size += n_global_fp_regs * 4;
4313 /* We need to add the size of the outgoing argument area. */
4314 frame_size = apparent_frame_size + ((args_size + 7) & -8);
4316 /* And that of the register window save area. */
4317 frame_size += FIRST_PARM_OFFSET (cfun->decl);
4319 /* Finally, bump to the appropriate alignment. */
4320 frame_size = SPARC_STACK_ALIGN (frame_size);
4323 /* Set up values for use in prologue and epilogue. */
4324 sparc_frame_size = frame_size;
4325 sparc_apparent_frame_size = apparent_frame_size;
4326 sparc_n_global_fp_regs = n_global_fp_regs;
4327 sparc_save_local_in_regs_p = save_local_in_regs_p;
4332 /* Output any necessary .register pseudo-ops. */
4335 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
4337 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
4343 /* Check if %g[2367] were used without
4344 .register being printed for them already. */
4345 for (i = 2; i < 8; i++)
4347 if (df_regs_ever_live_p (i)
4348 && ! sparc_hard_reg_printed [i])
4350 sparc_hard_reg_printed [i] = 1;
4351 /* %g7 is used as TLS base register, use #ignore
4352 for it instead of #scratch. */
4353 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
4354 i == 7 ? "ignore" : "scratch");
4361 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
4363 #if PROBE_INTERVAL > 4096
4364 #error Cannot use indexed addressing mode for stack probing
4367 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
4368 inclusive. These are offsets from the current stack pointer.
4370 Note that we don't use the REG+REG addressing mode for the probes because
4371 of the stack bias in 64-bit mode. And it doesn't really buy us anything
4372 so the advantages of having a single code win here. */
4375 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
4377 rtx g1 = gen_rtx_REG (Pmode, 1);
4379 /* See if we have a constant small number of probes to generate. If so,
4380 that's the easy case. */
4381 if (size <= PROBE_INTERVAL)
4383 emit_move_insn (g1, GEN_INT (first));
4384 emit_insn (gen_rtx_SET (VOIDmode, g1,
4385 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4386 emit_stack_probe (plus_constant (g1, -size));
4389 /* The run-time loop is made up of 10 insns in the generic case while the
4390 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
4391 else if (size <= 5 * PROBE_INTERVAL)
4395 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
4396 emit_insn (gen_rtx_SET (VOIDmode, g1,
4397 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4398 emit_stack_probe (g1);
4400 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
4401 it exceeds SIZE. If only two probes are needed, this will not
4402 generate any code. Then probe at FIRST + SIZE. */
4403 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
4405 emit_insn (gen_rtx_SET (VOIDmode, g1,
4406 plus_constant (g1, -PROBE_INTERVAL)));
4407 emit_stack_probe (g1);
4410 emit_stack_probe (plus_constant (g1, (i - PROBE_INTERVAL) - size));
4413 /* Otherwise, do the same as above, but in a loop. Note that we must be
4414 extra careful with variables wrapping around because we might be at
4415 the very top (or the very bottom) of the address space and we have
4416 to be able to handle this case properly; in particular, we use an
4417 equality test for the loop condition. */
4420 HOST_WIDE_INT rounded_size;
4421 rtx g4 = gen_rtx_REG (Pmode, 4);
4423 emit_move_insn (g1, GEN_INT (first));
4426 /* Step 1: round SIZE to the previous multiple of the interval. */
4428 rounded_size = size & -PROBE_INTERVAL;
4429 emit_move_insn (g4, GEN_INT (rounded_size));
4432 /* Step 2: compute initial and final value of the loop counter. */
4434 /* TEST_ADDR = SP + FIRST. */
4435 emit_insn (gen_rtx_SET (VOIDmode, g1,
4436 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4438 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
4439 emit_insn (gen_rtx_SET (VOIDmode, g4, gen_rtx_MINUS (Pmode, g1, g4)));
4444 while (TEST_ADDR != LAST_ADDR)
4446 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
4450 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
4451 until it is equal to ROUNDED_SIZE. */
4454 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
4456 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
4459 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
4460 that SIZE is equal to ROUNDED_SIZE. */
4462 if (size != rounded_size)
4463 emit_stack_probe (plus_constant (g4, rounded_size - size));
4466 /* Make sure nothing is scheduled before we are done. */
4467 emit_insn (gen_blockage ());
4470 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
4471 absolute addresses. */
4474 output_probe_stack_range (rtx reg1, rtx reg2)
4476 static int labelno = 0;
4477 char loop_lab[32], end_lab[32];
4480 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
4481 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
4483 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
4485 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
4488 output_asm_insn ("cmp\t%0, %1", xops);
4490 fputs ("\tbe,pn\t%xcc,", asm_out_file);
4492 fputs ("\tbe\t", asm_out_file);
4493 assemble_name_raw (asm_out_file, end_lab);
4494 fputc ('\n', asm_out_file);
4496 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
4497 xops[1] = GEN_INT (-PROBE_INTERVAL);
4498 output_asm_insn (" add\t%0, %1, %0", xops);
4500 /* Probe at TEST_ADDR and branch. */
4502 fputs ("\tba,pt\t%xcc,", asm_out_file);
4504 fputs ("\tba\t", asm_out_file);
4505 assemble_name_raw (asm_out_file, loop_lab);
4506 fputc ('\n', asm_out_file);
4507 xops[1] = GEN_INT (SPARC_STACK_BIAS);
4508 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
4510 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
4515 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
4516 needed. LOW is supposed to be double-word aligned for 32-bit registers.
4517 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
4518 is the action to be performed if SAVE_P returns true and ACTION_FALSE
4519 the action to be performed if it returns false. Return the new offset. */
4521 typedef bool (*sorr_pred_t) (unsigned int, int);
4522 typedef enum { SORR_NONE, SORR_ADVANCE, SORR_SAVE, SORR_RESTORE } sorr_act_t;
4525 emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
4526 int offset, int leaf_function, sorr_pred_t save_p,
4527 sorr_act_t action_true, sorr_act_t action_false)
4532 if (TARGET_ARCH64 && high <= 32)
4536 for (i = low; i < high; i++)
4538 if (save_p (i, leaf_function))
4540 mem = gen_frame_mem (DImode, plus_constant (base, offset));
4541 if (action_true == SORR_SAVE)
4543 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4544 RTX_FRAME_RELATED_P (insn) = 1;
4546 else /* action_true == SORR_RESTORE */
4548 /* The frame pointer must be restored last since its old
4549 value may be used as base address for the frame. This
4550 is problematic in 64-bit mode only because of the lack
4551 of double-word load instruction. */
4552 if (i == HARD_FRAME_POINTER_REGNUM)
4555 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4559 else if (action_false == SORR_ADVANCE)
4565 mem = gen_frame_mem (DImode, plus_constant (base, fp_offset));
4566 emit_move_insn (hard_frame_pointer_rtx, mem);
4571 for (i = low; i < high; i += 2)
4573 bool reg0 = save_p (i, leaf_function);
4574 bool reg1 = save_p (i + 1, leaf_function);
4575 enum machine_mode mode;
4580 mode = i < 32 ? DImode : DFmode;
4585 mode = i < 32 ? SImode : SFmode;
4590 mode = i < 32 ? SImode : SFmode;
4596 if (action_false == SORR_ADVANCE)
4601 mem = gen_frame_mem (mode, plus_constant (base, offset));
4602 if (action_true == SORR_SAVE)
4604 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4605 RTX_FRAME_RELATED_P (insn) = 1;
4609 mem = gen_frame_mem (SImode, plus_constant (base, offset));
4610 set1 = gen_rtx_SET (VOIDmode, mem,
4611 gen_rtx_REG (SImode, regno));
4612 RTX_FRAME_RELATED_P (set1) = 1;
4614 = gen_frame_mem (SImode, plus_constant (base, offset + 4));
4615 set2 = gen_rtx_SET (VOIDmode, mem,
4616 gen_rtx_REG (SImode, regno + 1));
4617 RTX_FRAME_RELATED_P (set2) = 1;
4618 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4619 gen_rtx_PARALLEL (VOIDmode,
4620 gen_rtvec (2, set1, set2)));
4623 else /* action_true == SORR_RESTORE */
4624 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4626 /* Always preserve double-word alignment. */
4627 offset = (offset + 8) & -8;
4634 /* Emit code to adjust BASE to OFFSET. Return the new base. */
4637 emit_adjust_base_to_offset (rtx base, int offset)
4639 /* ??? This might be optimized a little as %g1 might already have a
4640 value close enough that a single add insn will do. */
4641 /* ??? Although, all of this is probably only a temporary fix because
4642 if %g1 can hold a function result, then sparc_expand_epilogue will
4643 lose (the result will be clobbered). */
4644 rtx new_base = gen_rtx_REG (Pmode, 1);
4645 emit_move_insn (new_base, GEN_INT (offset));
4646 emit_insn (gen_rtx_SET (VOIDmode,
4647 new_base, gen_rtx_PLUS (Pmode, base, new_base)));
4651 /* Emit code to save/restore call-saved global and FP registers. */
4654 emit_save_or_restore_global_fp_regs (rtx base, int offset, sorr_act_t action)
4656 if (offset < -4096 || offset + sparc_n_global_fp_regs * 4 > 4095)
4658 base = emit_adjust_base_to_offset (base, offset);
4663 = emit_save_or_restore_regs (0, 8, base, offset, 0,
4664 save_global_or_fp_reg_p, action, SORR_NONE);
4665 emit_save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, 0,
4666 save_global_or_fp_reg_p, action, SORR_NONE);
4669 /* Emit code to save/restore call-saved local and in registers. */
4672 emit_save_or_restore_local_in_regs (rtx base, int offset, sorr_act_t action)
4674 if (offset < -4096 || offset + 16 * UNITS_PER_WORD > 4095)
4676 base = emit_adjust_base_to_offset (base, offset);
4680 emit_save_or_restore_regs (16, 32, base, offset, sparc_leaf_function_p,
4681 save_local_or_in_reg_p, action, SORR_ADVANCE);
4684 /* Emit a window_save insn. */
4687 emit_window_save (rtx increment)
4689 rtx insn = emit_insn (gen_window_save (increment));
4690 RTX_FRAME_RELATED_P (insn) = 1;
4692 /* The incoming return address (%o7) is saved in %i7. */
4693 add_reg_note (insn, REG_CFA_REGISTER,
4694 gen_rtx_SET (VOIDmode,
4695 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
4697 INCOMING_RETURN_ADDR_REGNUM)));
4699 /* The window save event. */
4700 add_reg_note (insn, REG_CFA_WINDOW_SAVE, const0_rtx);
4702 /* The CFA is %fp, the hard frame pointer. */
4703 add_reg_note (insn, REG_CFA_DEF_CFA,
4704 plus_constant (hard_frame_pointer_rtx,
4705 INCOMING_FRAME_SP_OFFSET));
4710 /* Generate an increment for the stack pointer. */
4713 gen_stack_pointer_inc (rtx increment)
4715 return gen_rtx_SET (VOIDmode,
4717 gen_rtx_PLUS (Pmode,
4722 /* Generate a decrement for the stack pointer. */
4725 gen_stack_pointer_dec (rtx decrement)
4727 return gen_rtx_SET (VOIDmode,
4729 gen_rtx_MINUS (Pmode,
4734 /* Expand the function prologue. The prologue is responsible for reserving
4735 storage for the frame, saving the call-saved registers and loading the
4736 GOT register if needed. */
4739 sparc_expand_prologue (void)
4744 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4745 on the final value of the flag means deferring the prologue/epilogue
4746 expansion until just before the second scheduling pass, which is too
4747 late to emit multiple epilogues or return insns.
4749 Of course we are making the assumption that the value of the flag
4750 will not change between now and its final value. Of the three parts
4751 of the formula, only the last one can reasonably vary. Let's take a
4752 closer look, after assuming that the first two ones are set to true
4753 (otherwise the last value is effectively silenced).
4755 If only_leaf_regs_used returns false, the global predicate will also
4756 be false so the actual frame size calculated below will be positive.
4757 As a consequence, the save_register_window insn will be emitted in
4758 the instruction stream; now this insn explicitly references %fp
4759 which is not a leaf register so only_leaf_regs_used will always
4760 return false subsequently.
4762 If only_leaf_regs_used returns true, we hope that the subsequent
4763 optimization passes won't cause non-leaf registers to pop up. For
4764 example, the regrename pass has special provisions to not rename to
4765 non-leaf registers in a leaf function. */
4766 sparc_leaf_function_p
4767 = optimize > 0 && current_function_is_leaf && only_leaf_regs_used ();
4769 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4771 if (flag_stack_usage_info)
4772 current_function_static_stack_size = size;
4774 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
4775 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
4779 else if (sparc_leaf_function_p)
4781 rtx size_int_rtx = GEN_INT (-size);
4784 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
4785 else if (size <= 8192)
4787 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4788 /* %sp is still the CFA register. */
4789 RTX_FRAME_RELATED_P (insn) = 1;
4790 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
4794 rtx size_rtx = gen_rtx_REG (Pmode, 1);
4795 emit_move_insn (size_rtx, size_int_rtx);
4796 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
4797 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4798 gen_stack_pointer_inc (size_int_rtx));
4801 RTX_FRAME_RELATED_P (insn) = 1;
4805 rtx size_int_rtx = GEN_INT (-size);
4808 emit_window_save (size_int_rtx);
4809 else if (size <= 8192)
4811 emit_window_save (GEN_INT (-4096));
4812 /* %sp is not the CFA register anymore. */
4813 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
4817 rtx size_rtx = gen_rtx_REG (Pmode, 1);
4818 emit_move_insn (size_rtx, size_int_rtx);
4819 emit_window_save (size_rtx);
4823 if (sparc_leaf_function_p)
4825 sparc_frame_base_reg = stack_pointer_rtx;
4826 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
4830 sparc_frame_base_reg = hard_frame_pointer_rtx;
4831 sparc_frame_base_offset = SPARC_STACK_BIAS;
4834 if (sparc_n_global_fp_regs > 0)
4835 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
4836 sparc_frame_base_offset
4837 - sparc_apparent_frame_size,
4840 /* Load the GOT register if needed. */
4841 if (crtl->uses_pic_offset_table)
4842 load_got_register ();
4844 /* Advertise that the data calculated just above are now valid. */
4845 sparc_prologue_data_valid_p = true;
4848 /* Expand the function prologue. The prologue is responsible for reserving
4849 storage for the frame, saving the call-saved registers and loading the
4850 GOT register if needed. */
4853 sparc_flat_expand_prologue (void)
4858 sparc_leaf_function_p = optimize > 0 && current_function_is_leaf;
4860 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4862 if (flag_stack_usage_info)
4863 current_function_static_stack_size = size;
4865 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
4866 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
4868 if (sparc_save_local_in_regs_p)
4869 emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
4876 rtx size_int_rtx, size_rtx;
4878 size_rtx = size_int_rtx = GEN_INT (-size);
4880 /* We establish the frame (i.e. decrement the stack pointer) first, even
4881 if we use a frame pointer, because we cannot clobber any call-saved
4882 registers, including the frame pointer, if we haven't created a new
4883 register save area, for the sake of compatibility with the ABI. */
4885 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
4886 else if (size <= 8192 && !frame_pointer_needed)
4888 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4889 RTX_FRAME_RELATED_P (insn) = 1;
4890 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
4894 size_rtx = gen_rtx_REG (Pmode, 1);
4895 emit_move_insn (size_rtx, size_int_rtx);
4896 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
4897 add_reg_note (insn, REG_CFA_ADJUST_CFA,
4898 gen_stack_pointer_inc (size_int_rtx));
4900 RTX_FRAME_RELATED_P (insn) = 1;
4902 /* Ensure nothing is scheduled until after the frame is established. */
4903 emit_insn (gen_blockage ());
4905 if (frame_pointer_needed)
4907 insn = emit_insn (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
4908 gen_rtx_MINUS (Pmode,
4911 RTX_FRAME_RELATED_P (insn) = 1;
4913 add_reg_note (insn, REG_CFA_ADJUST_CFA,
4914 gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
4915 plus_constant (stack_pointer_rtx,
4919 if (return_addr_reg_needed_p (sparc_leaf_function_p))
4921 rtx o7 = gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM);
4922 rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
4924 insn = emit_move_insn (i7, o7);
4925 RTX_FRAME_RELATED_P (insn) = 1;
4927 add_reg_note (insn, REG_CFA_REGISTER,
4928 gen_rtx_SET (VOIDmode, i7, o7));
4930 /* Prevent this instruction from ever being considered dead,
4931 even if this function has no epilogue. */
4932 emit_insn (gen_rtx_USE (VOIDmode, i7));
4936 if (frame_pointer_needed)
4938 sparc_frame_base_reg = hard_frame_pointer_rtx;
4939 sparc_frame_base_offset = SPARC_STACK_BIAS;
4943 sparc_frame_base_reg = stack_pointer_rtx;
4944 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
4947 if (sparc_n_global_fp_regs > 0)
4948 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
4949 sparc_frame_base_offset
4950 - sparc_apparent_frame_size,
4953 /* Load the GOT register if needed. */
4954 if (crtl->uses_pic_offset_table)
4955 load_got_register ();
4957 /* Advertise that the data calculated just above are now valid. */
4958 sparc_prologue_data_valid_p = true;
4961 /* This function generates the assembly code for function entry, which boils
4962 down to emitting the necessary .register directives. */
4965 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4967 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4969 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4971 sparc_output_scratch_registers (file);
4974 /* Expand the function epilogue, either normal or part of a sibcall.
4975 We emit all the instructions except the return or the call. */
4978 sparc_expand_epilogue (bool for_eh)
4980 HOST_WIDE_INT size = sparc_frame_size;
4982 if (sparc_n_global_fp_regs > 0)
4983 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
4984 sparc_frame_base_offset
4985 - sparc_apparent_frame_size,
4988 if (size == 0 || for_eh)
4990 else if (sparc_leaf_function_p)
4993 emit_insn (gen_stack_pointer_dec (GEN_INT (-size)));
4994 else if (size <= 8192)
4996 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4997 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size)));
5001 rtx reg = gen_rtx_REG (Pmode, 1);
5002 emit_move_insn (reg, GEN_INT (-size));
5003 emit_insn (gen_stack_pointer_dec (reg));
5008 /* Expand the function epilogue, either normal or part of a sibcall.
5009 We emit all the instructions except the return or the call. */
5012 sparc_flat_expand_epilogue (bool for_eh)
5014 HOST_WIDE_INT size = sparc_frame_size;
5016 if (sparc_n_global_fp_regs > 0)
5017 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5018 sparc_frame_base_offset
5019 - sparc_apparent_frame_size,
5022 /* If we have a frame pointer, we'll need both to restore it before the
5023 frame is destroyed and use its current value in destroying the frame.
5024 Since we don't have an atomic way to do that in the flat window model,
5025 we save the current value into a temporary register (%g1). */
5026 if (frame_pointer_needed && !for_eh)
5027 emit_move_insn (gen_rtx_REG (Pmode, 1), hard_frame_pointer_rtx);
5029 if (return_addr_reg_needed_p (sparc_leaf_function_p))
5030 emit_move_insn (gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM),
5031 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM));
5033 if (sparc_save_local_in_regs_p)
5034 emit_save_or_restore_local_in_regs (sparc_frame_base_reg,
5035 sparc_frame_base_offset,
5038 if (size == 0 || for_eh)
5040 else if (frame_pointer_needed)
5042 /* Make sure the frame is destroyed after everything else is done. */
5043 emit_insn (gen_blockage ());
5045 emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
5050 emit_insn (gen_blockage ());
5053 emit_insn (gen_stack_pointer_dec (GEN_INT (-size)));
5054 else if (size <= 8192)
5056 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
5057 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size)));
5061 rtx reg = gen_rtx_REG (Pmode, 1);
5062 emit_move_insn (reg, GEN_INT (-size));
5063 emit_insn (gen_stack_pointer_dec (reg));
5068 /* Return true if it is appropriate to emit `return' instructions in the
5069 body of a function. */
5072 sparc_can_use_return_insn_p (void)
5074 return sparc_prologue_data_valid_p
5075 && sparc_n_global_fp_regs == 0
5077 ? (sparc_frame_size == 0 && !sparc_save_local_in_regs_p)
5078 : (sparc_frame_size == 0 || !sparc_leaf_function_p);
5081 /* This function generates the assembly code for function exit. */
5084 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5086 /* If the last two instructions of a function are "call foo; dslot;"
5087 the return address might point to the first instruction in the next
5088 function and we have to output a dummy nop for the sake of sane
5089 backtraces in such cases. This is pointless for sibling calls since
5090 the return address is explicitly adjusted. */
5092 rtx insn, last_real_insn;
5094 insn = get_last_insn ();
5096 last_real_insn = prev_real_insn (insn);
5098 && GET_CODE (last_real_insn) == INSN
5099 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
5100 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
5103 && CALL_P (last_real_insn)
5104 && !SIBLING_CALL_P (last_real_insn))
5105 fputs("\tnop\n", file);
5107 sparc_output_deferred_case_vectors ();
5110 /* Output a 'restore' instruction. */
5113 output_restore (rtx pat)
5119 fputs ("\t restore\n", asm_out_file);
5123 gcc_assert (GET_CODE (pat) == SET);
5125 operands[0] = SET_DEST (pat);
5126 pat = SET_SRC (pat);
5128 switch (GET_CODE (pat))
5131 operands[1] = XEXP (pat, 0);
5132 operands[2] = XEXP (pat, 1);
5133 output_asm_insn (" restore %r1, %2, %Y0", operands);
5136 operands[1] = XEXP (pat, 0);
5137 operands[2] = XEXP (pat, 1);
5138 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
5141 operands[1] = XEXP (pat, 0);
5142 gcc_assert (XEXP (pat, 1) == const1_rtx);
5143 output_asm_insn (" restore %r1, %r1, %Y0", operands);
5147 output_asm_insn (" restore %%g0, %1, %Y0", operands);
5152 /* Output a return. */
5155 output_return (rtx insn)
5157 if (crtl->calls_eh_return)
5159 /* If the function uses __builtin_eh_return, the eh_return
5160 machinery occupies the delay slot. */
5161 gcc_assert (!final_sequence);
5163 if (flag_delayed_branch)
5165 if (!TARGET_FLAT && TARGET_V9)
5166 fputs ("\treturn\t%i7+8\n", asm_out_file);
5170 fputs ("\trestore\n", asm_out_file);
5172 fputs ("\tjmp\t%o7+8\n", asm_out_file);
5175 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
5180 fputs ("\trestore\n", asm_out_file);
5182 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file);
5183 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file);
5186 else if (sparc_leaf_function_p || TARGET_FLAT)
5188 /* This is a leaf or flat function so we don't have to bother restoring
5189 the register window, which frees us from dealing with the convoluted
5190 semantics of restore/return. We simply output the jump to the
5191 return address and the insn in the delay slot (if any). */
5193 return "jmp\t%%o7+%)%#";
5197 /* This is a regular function so we have to restore the register window.
5198 We may have a pending insn for the delay slot, which will be either
5199 combined with the 'restore' instruction or put in the delay slot of
5200 the 'return' instruction. */
5206 delay = NEXT_INSN (insn);
5209 pat = PATTERN (delay);
5211 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
5213 epilogue_renumber (&pat, 0);
5214 return "return\t%%i7+%)%#";
5218 output_asm_insn ("jmp\t%%i7+%)", NULL);
5219 output_restore (pat);
5220 PATTERN (delay) = gen_blockage ();
5221 INSN_CODE (delay) = -1;
5226 /* The delay slot is empty. */
5228 return "return\t%%i7+%)\n\t nop";
5229 else if (flag_delayed_branch)
5230 return "jmp\t%%i7+%)\n\t restore";
5232 return "restore\n\tjmp\t%%o7+%)\n\t nop";
5239 /* Output a sibling call. */
5242 output_sibcall (rtx insn, rtx call_operand)
5246 gcc_assert (flag_delayed_branch);
5248 operands[0] = call_operand;
5250 if (sparc_leaf_function_p || TARGET_FLAT)
5252 /* This is a leaf or flat function so we don't have to bother restoring
5253 the register window. We simply output the jump to the function and
5254 the insn in the delay slot (if any). */
5256 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
5259 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
5262 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
5263 it into branch if possible. */
5264 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
5269 /* This is a regular function so we have to restore the register window.
5270 We may have a pending insn for the delay slot, which will be combined
5271 with the 'restore' instruction. */
5273 output_asm_insn ("call\t%a0, 0", operands);
5277 rtx delay = NEXT_INSN (insn);
5280 output_restore (PATTERN (delay));
5282 PATTERN (delay) = gen_blockage ();
5283 INSN_CODE (delay) = -1;
5286 output_restore (NULL_RTX);
5292 /* Functions for handling argument passing.
5294 For 32-bit, the first 6 args are normally in registers and the rest are
5295 pushed. Any arg that starts within the first 6 words is at least
5296 partially passed in a register unless its data type forbids.
5298 For 64-bit, the argument registers are laid out as an array of 16 elements
5299 and arguments are added sequentially. The first 6 int args and up to the
5300 first 16 fp args (depending on size) are passed in regs.
5302 Slot Stack Integral Float Float in structure Double Long Double
5303 ---- ----- -------- ----- ------------------ ------ -----------
5304 15 [SP+248] %f31 %f30,%f31 %d30
5305 14 [SP+240] %f29 %f28,%f29 %d28 %q28
5306 13 [SP+232] %f27 %f26,%f27 %d26
5307 12 [SP+224] %f25 %f24,%f25 %d24 %q24
5308 11 [SP+216] %f23 %f22,%f23 %d22
5309 10 [SP+208] %f21 %f20,%f21 %d20 %q20
5310 9 [SP+200] %f19 %f18,%f19 %d18
5311 8 [SP+192] %f17 %f16,%f17 %d16 %q16
5312 7 [SP+184] %f15 %f14,%f15 %d14
5313 6 [SP+176] %f13 %f12,%f13 %d12 %q12
5314 5 [SP+168] %o5 %f11 %f10,%f11 %d10
5315 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
5316 3 [SP+152] %o3 %f7 %f6,%f7 %d6
5317 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
5318 1 [SP+136] %o1 %f3 %f2,%f3 %d2
5319 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
5321 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
5323 Integral arguments are always passed as 64-bit quantities appropriately
5326 Passing of floating point values is handled as follows.
5327 If a prototype is in scope:
5328 If the value is in a named argument (i.e. not a stdarg function or a
5329 value not part of the `...') then the value is passed in the appropriate
5331 If the value is part of the `...' and is passed in one of the first 6
5332 slots then the value is passed in the appropriate int reg.
5333 If the value is part of the `...' and is not passed in one of the first 6
5334 slots then the value is passed in memory.
5335 If a prototype is not in scope:
5336 If the value is one of the first 6 arguments the value is passed in the
5337 appropriate integer reg and the appropriate fp reg.
5338 If the value is not one of the first 6 arguments the value is passed in
5339 the appropriate fp reg and in memory.
5342 Summary of the calling conventions implemented by GCC on the SPARC:
5345 size argument return value
5347 small integer <4 int. reg. int. reg.
5348 word 4 int. reg. int. reg.
5349 double word 8 int. reg. int. reg.
5351 _Complex small integer <8 int. reg. int. reg.
5352 _Complex word 8 int. reg. int. reg.
5353 _Complex double word 16 memory int. reg.
5355 vector integer <=8 int. reg. FP reg.
5356 vector integer >8 memory memory
5358 float 4 int. reg. FP reg.
5359 double 8 int. reg. FP reg.
5360 long double 16 memory memory
5362 _Complex float 8 memory FP reg.
5363 _Complex double 16 memory FP reg.
5364 _Complex long double 32 memory FP reg.
5366 vector float any memory memory
5368 aggregate any memory memory
5373 size argument return value
5375 small integer <8 int. reg. int. reg.
5376 word 8 int. reg. int. reg.
5377 double word 16 int. reg. int. reg.
5379 _Complex small integer <16 int. reg. int. reg.
5380 _Complex word 16 int. reg. int. reg.
5381 _Complex double word 32 memory int. reg.
5383 vector integer <=16 FP reg. FP reg.
5384 vector integer 16<s<=32 memory FP reg.
5385 vector integer >32 memory memory
5387 float 4 FP reg. FP reg.
5388 double 8 FP reg. FP reg.
5389 long double 16 FP reg. FP reg.
5391 _Complex float 8 FP reg. FP reg.
5392 _Complex double 16 FP reg. FP reg.
5393 _Complex long double 32 memory FP reg.
5395 vector float <=16 FP reg. FP reg.
5396 vector float 16<s<=32 memory FP reg.
5397 vector float >32 memory memory
5399 aggregate <=16 reg. reg.
5400 aggregate 16<s<=32 memory reg.
5401 aggregate >32 memory memory
5405 Note #1: complex floating-point types follow the extended SPARC ABIs as
5406 implemented by the Sun compiler.
5408 Note #2: integral vector types follow the scalar floating-point types
5409 conventions to match what is implemented by the Sun VIS SDK.
5411 Note #3: floating-point vector types follow the aggregate types
5415 /* Maximum number of int regs for args. */
5416 #define SPARC_INT_ARG_MAX 6
5417 /* Maximum number of fp regs for args. */
5418 #define SPARC_FP_ARG_MAX 16
5420 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
5422 /* Handle the INIT_CUMULATIVE_ARGS macro.
5423 Initialize a variable CUM of type CUMULATIVE_ARGS
5424 for a call to a function whose data type is FNTYPE.
5425 For a library call, FNTYPE is 0. */
5428 init_cumulative_args (struct sparc_args *cum, tree fntype,
5429 rtx libname ATTRIBUTE_UNUSED,
5430 tree fndecl ATTRIBUTE_UNUSED)
5433 cum->prototype_p = fntype && prototype_p (fntype);
5434 cum->libcall_p = fntype == 0;
5437 /* Handle promotion of pointer and integer arguments. */
5439 static enum machine_mode
5440 sparc_promote_function_mode (const_tree type,
5441 enum machine_mode mode,
5443 const_tree fntype ATTRIBUTE_UNUSED,
5444 int for_return ATTRIBUTE_UNUSED)
5446 if (type != NULL_TREE && POINTER_TYPE_P (type))
5448 *punsignedp = POINTERS_EXTEND_UNSIGNED;
5452 /* Integral arguments are passed as full words, as per the ABI. */
5453 if (GET_MODE_CLASS (mode) == MODE_INT
5454 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5460 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
5463 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
5465 return TARGET_ARCH64 ? true : false;
5468 /* Scan the record type TYPE and return the following predicates:
5469 - INTREGS_P: the record contains at least one field or sub-field
5470 that is eligible for promotion in integer registers.
5471 - FP_REGS_P: the record contains at least one field or sub-field
5472 that is eligible for promotion in floating-point registers.
5473 - PACKED_P: the record contains at least one field that is packed.
5475 Sub-fields are not taken into account for the PACKED_P predicate. */
5478 scan_record_type (const_tree type, int *intregs_p, int *fpregs_p,
5483 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5485 if (TREE_CODE (field) == FIELD_DECL)
5487 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5488 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
5489 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5490 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5496 if (packed_p && DECL_PACKED (field))
5502 /* Compute the slot number to pass an argument in.
5503 Return the slot number or -1 if passing on the stack.
5505 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5506 the preceding args and about the function being called.
5507 MODE is the argument's machine mode.
5508 TYPE is the data type of the argument (as a tree).
5509 This is null for libcalls where that information may
5511 NAMED is nonzero if this argument is a named parameter
5512 (otherwise it is an extra parameter matching an ellipsis).
5513 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
5514 *PREGNO records the register number to use if scalar type.
5515 *PPADDING records the amount of padding needed in words. */
5518 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
5519 const_tree type, bool named, bool incoming_p,
5520 int *pregno, int *ppadding)
5522 int regbase = (incoming_p
5523 ? SPARC_INCOMING_INT_ARG_FIRST
5524 : SPARC_OUTGOING_INT_ARG_FIRST);
5525 int slotno = cum->words;
5526 enum mode_class mclass;
5531 if (type && TREE_ADDRESSABLE (type))
5537 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
5540 /* For SPARC64, objects requiring 16-byte alignment get it. */
5542 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
5543 && (slotno & 1) != 0)
5544 slotno++, *ppadding = 1;
5546 mclass = GET_MODE_CLASS (mode);
5547 if (type && TREE_CODE (type) == VECTOR_TYPE)
5549 /* Vector types deserve special treatment because they are
5550 polymorphic wrt their mode, depending upon whether VIS
5551 instructions are enabled. */
5552 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5554 /* The SPARC port defines no floating-point vector modes. */
5555 gcc_assert (mode == BLKmode);
5559 /* Integral vector types should either have a vector
5560 mode or an integral mode, because we are guaranteed
5561 by pass_by_reference that their size is not greater
5562 than 16 bytes and TImode is 16-byte wide. */
5563 gcc_assert (mode != BLKmode);
5565 /* Vector integers are handled like floats according to
5567 mclass = MODE_FLOAT;
5574 case MODE_COMPLEX_FLOAT:
5575 case MODE_VECTOR_INT:
5576 if (TARGET_ARCH64 && TARGET_FPU && named)
5578 if (slotno >= SPARC_FP_ARG_MAX)
5580 regno = SPARC_FP_ARG_FIRST + slotno * 2;
5581 /* Arguments filling only one single FP register are
5582 right-justified in the outer double FP register. */
5583 if (GET_MODE_SIZE (mode) <= 4)
5590 case MODE_COMPLEX_INT:
5591 if (slotno >= SPARC_INT_ARG_MAX)
5593 regno = regbase + slotno;
5597 if (mode == VOIDmode)
5598 /* MODE is VOIDmode when generating the actual call. */
5601 gcc_assert (mode == BLKmode);
5605 || (TREE_CODE (type) != VECTOR_TYPE
5606 && TREE_CODE (type) != RECORD_TYPE))
5608 if (slotno >= SPARC_INT_ARG_MAX)
5610 regno = regbase + slotno;
5612 else /* TARGET_ARCH64 && type */
5614 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
5616 /* First see what kinds of registers we would need. */
5617 if (TREE_CODE (type) == VECTOR_TYPE)
5620 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
5622 /* The ABI obviously doesn't specify how packed structures
5623 are passed. These are defined to be passed in int regs
5624 if possible, otherwise memory. */
5625 if (packed_p || !named)
5626 fpregs_p = 0, intregs_p = 1;
5628 /* If all arg slots are filled, then must pass on stack. */
5629 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
5632 /* If there are only int args and all int arg slots are filled,
5633 then must pass on stack. */
5634 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
5637 /* Note that even if all int arg slots are filled, fp members may
5638 still be passed in regs if such regs are available.
5639 *PREGNO isn't set because there may be more than one, it's up
5640 to the caller to compute them. */
5653 /* Handle recursive register counting for structure field layout. */
5655 struct function_arg_record_value_parms
5657 rtx ret; /* return expression being built. */
5658 int slotno; /* slot number of the argument. */
5659 int named; /* whether the argument is named. */
5660 int regbase; /* regno of the base register. */
5661 int stack; /* 1 if part of the argument is on the stack. */
5662 int intoffset; /* offset of the first pending integer field. */
5663 unsigned int nregs; /* number of words passed in registers. */
5666 static void function_arg_record_value_3
5667 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
5668 static void function_arg_record_value_2
5669 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5670 static void function_arg_record_value_1
5671 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5672 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
5673 static rtx function_arg_union_value (int, enum machine_mode, int, int);
5675 /* A subroutine of function_arg_record_value. Traverse the structure
5676 recursively and determine how many registers will be required. */
5679 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
5680 struct function_arg_record_value_parms *parms,
5685 /* We need to compute how many registers are needed so we can
5686 allocate the PARALLEL but before we can do that we need to know
5687 whether there are any packed fields. The ABI obviously doesn't
5688 specify how structures are passed in this case, so they are
5689 defined to be passed in int regs if possible, otherwise memory,
5690 regardless of whether there are fp values present. */
5693 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5695 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5702 /* Compute how many registers we need. */
5703 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5705 if (TREE_CODE (field) == FIELD_DECL)
5707 HOST_WIDE_INT bitpos = startbitpos;
5709 if (DECL_SIZE (field) != 0)
5711 if (integer_zerop (DECL_SIZE (field)))
5714 if (host_integerp (bit_position (field), 1))
5715 bitpos += int_bit_position (field);
5718 /* ??? FIXME: else assume zero offset. */
5720 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5721 function_arg_record_value_1 (TREE_TYPE (field),
5725 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5726 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5731 if (parms->intoffset != -1)
5733 unsigned int startbit, endbit;
5734 int intslots, this_slotno;
5736 startbit = parms->intoffset & -BITS_PER_WORD;
5737 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5739 intslots = (endbit - startbit) / BITS_PER_WORD;
5740 this_slotno = parms->slotno + parms->intoffset
5743 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5745 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5746 /* We need to pass this field on the stack. */
5750 parms->nregs += intslots;
5751 parms->intoffset = -1;
5754 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
5755 If it wasn't true we wouldn't be here. */
5756 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5757 && DECL_MODE (field) == BLKmode)
5758 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5759 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5766 if (parms->intoffset == -1)
5767 parms->intoffset = bitpos;
5773 /* A subroutine of function_arg_record_value. Assign the bits of the
5774 structure between parms->intoffset and bitpos to integer registers. */
5777 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
5778 struct function_arg_record_value_parms *parms)
5780 enum machine_mode mode;
5782 unsigned int startbit, endbit;
5783 int this_slotno, intslots, intoffset;
5786 if (parms->intoffset == -1)
5789 intoffset = parms->intoffset;
5790 parms->intoffset = -1;
5792 startbit = intoffset & -BITS_PER_WORD;
5793 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5794 intslots = (endbit - startbit) / BITS_PER_WORD;
5795 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
5797 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
5801 /* If this is the trailing part of a word, only load that much into
5802 the register. Otherwise load the whole register. Note that in
5803 the latter case we may pick up unwanted bits. It's not a problem
5804 at the moment but may wish to revisit. */
5806 if (intoffset % BITS_PER_WORD != 0)
5807 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5812 intoffset /= BITS_PER_UNIT;
5815 regno = parms->regbase + this_slotno;
5816 reg = gen_rtx_REG (mode, regno);
5817 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5818 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5821 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5826 while (intslots > 0);
5829 /* A subroutine of function_arg_record_value. Traverse the structure
5830 recursively and assign bits to floating point registers. Track which
5831 bits in between need integer registers; invoke function_arg_record_value_3
5832 to make that happen. */
5835 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
5836 struct function_arg_record_value_parms *parms,
5842 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5844 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5851 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5853 if (TREE_CODE (field) == FIELD_DECL)
5855 HOST_WIDE_INT bitpos = startbitpos;
5857 if (DECL_SIZE (field) != 0)
5859 if (integer_zerop (DECL_SIZE (field)))
5862 if (host_integerp (bit_position (field), 1))
5863 bitpos += int_bit_position (field);
5866 /* ??? FIXME: else assume zero offset. */
5868 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5869 function_arg_record_value_2 (TREE_TYPE (field),
5873 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5874 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5879 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5880 int regno, nregs, pos;
5881 enum machine_mode mode = DECL_MODE (field);
5884 function_arg_record_value_3 (bitpos, parms);
5886 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5889 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5890 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5892 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5894 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5900 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5901 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5903 reg = gen_rtx_REG (mode, regno);
5904 pos = bitpos / BITS_PER_UNIT;
5905 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5906 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5910 regno += GET_MODE_SIZE (mode) / 4;
5911 reg = gen_rtx_REG (mode, regno);
5912 pos += GET_MODE_SIZE (mode);
5913 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5914 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5920 if (parms->intoffset == -1)
5921 parms->intoffset = bitpos;
5927 /* Used by function_arg and sparc_function_value_1 to implement the complex
5928 conventions of the 64-bit ABI for passing and returning structures.
5929 Return an expression valid as a return value for the FUNCTION_ARG
5930 and TARGET_FUNCTION_VALUE.
5932 TYPE is the data type of the argument (as a tree).
5933 This is null for libcalls where that information may
5935 MODE is the argument's machine mode.
5936 SLOTNO is the index number of the argument's slot in the parameter array.
5937 NAMED is nonzero if this argument is a named parameter
5938 (otherwise it is an extra parameter matching an ellipsis).
5939 REGBASE is the regno of the base register for the parameter array. */
5942 function_arg_record_value (const_tree type, enum machine_mode mode,
5943 int slotno, int named, int regbase)
5945 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5946 struct function_arg_record_value_parms parms;
5949 parms.ret = NULL_RTX;
5950 parms.slotno = slotno;
5951 parms.named = named;
5952 parms.regbase = regbase;
5955 /* Compute how many registers we need. */
5957 parms.intoffset = 0;
5958 function_arg_record_value_1 (type, 0, &parms, false);
5960 /* Take into account pending integer fields. */
5961 if (parms.intoffset != -1)
5963 unsigned int startbit, endbit;
5964 int intslots, this_slotno;
5966 startbit = parms.intoffset & -BITS_PER_WORD;
5967 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5968 intslots = (endbit - startbit) / BITS_PER_WORD;
5969 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5971 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5973 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5974 /* We need to pass this field on the stack. */
5978 parms.nregs += intslots;
5980 nregs = parms.nregs;
5982 /* Allocate the vector and handle some annoying special cases. */
5985 /* ??? Empty structure has no value? Duh? */
5988 /* Though there's nothing really to store, return a word register
5989 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5990 leads to breakage due to the fact that there are zero bytes to
5992 return gen_rtx_REG (mode, regbase);
5996 /* ??? C++ has structures with no fields, and yet a size. Give up
5997 for now and pass everything back in integer registers. */
5998 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6000 if (nregs + slotno > SPARC_INT_ARG_MAX)
6001 nregs = SPARC_INT_ARG_MAX - slotno;
6003 gcc_assert (nregs != 0);
6005 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
6007 /* If at least one field must be passed on the stack, generate
6008 (parallel [(expr_list (nil) ...) ...]) so that all fields will
6009 also be passed on the stack. We can't do much better because the
6010 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
6011 of structures for which the fields passed exclusively in registers
6012 are not at the beginning of the structure. */
6014 XVECEXP (parms.ret, 0, 0)
6015 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6017 /* Fill in the entries. */
6019 parms.intoffset = 0;
6020 function_arg_record_value_2 (type, 0, &parms, false);
6021 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
6023 gcc_assert (parms.nregs == nregs);
6028 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6029 of the 64-bit ABI for passing and returning unions.
6030 Return an expression valid as a return value for the FUNCTION_ARG
6031 and TARGET_FUNCTION_VALUE.
6033 SIZE is the size in bytes of the union.
6034 MODE is the argument's machine mode.
6035 REGNO is the hard register the union will be passed in. */
6038 function_arg_union_value (int size, enum machine_mode mode, int slotno,
6041 int nwords = ROUND_ADVANCE (size), i;
6044 /* See comment in previous function for empty structures. */
6046 return gen_rtx_REG (mode, regno);
6048 if (slotno == SPARC_INT_ARG_MAX - 1)
6051 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
6053 for (i = 0; i < nwords; i++)
6055 /* Unions are passed left-justified. */
6056 XVECEXP (regs, 0, i)
6057 = gen_rtx_EXPR_LIST (VOIDmode,
6058 gen_rtx_REG (word_mode, regno),
6059 GEN_INT (UNITS_PER_WORD * i));
6066 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6067 for passing and returning large (BLKmode) vectors.
6068 Return an expression valid as a return value for the FUNCTION_ARG
6069 and TARGET_FUNCTION_VALUE.
6071 SIZE is the size in bytes of the vector (at least 8 bytes).
6072 REGNO is the FP hard register the vector will be passed in. */
6075 function_arg_vector_value (int size, int regno)
6077 int i, nregs = size / 8;
6080 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
6082 for (i = 0; i < nregs; i++)
6084 XVECEXP (regs, 0, i)
6085 = gen_rtx_EXPR_LIST (VOIDmode,
6086 gen_rtx_REG (DImode, regno + 2*i),
6093 /* Determine where to put an argument to a function.
6094 Value is zero to push the argument on the stack,
6095 or a hard register in which to store the argument.
6097 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6098 the preceding args and about the function being called.
6099 MODE is the argument's machine mode.
6100 TYPE is the data type of the argument (as a tree).
6101 This is null for libcalls where that information may
6103 NAMED is true if this argument is a named parameter
6104 (otherwise it is an extra parameter matching an ellipsis).
6105 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
6106 TARGET_FUNCTION_INCOMING_ARG. */
6109 sparc_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode,
6110 const_tree type, bool named, bool incoming_p)
6112 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6114 int regbase = (incoming_p
6115 ? SPARC_INCOMING_INT_ARG_FIRST
6116 : SPARC_OUTGOING_INT_ARG_FIRST);
6117 int slotno, regno, padding;
6118 enum mode_class mclass = GET_MODE_CLASS (mode);
6120 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
6125 /* Vector types deserve special treatment because they are polymorphic wrt
6126 their mode, depending upon whether VIS instructions are enabled. */
6127 if (type && TREE_CODE (type) == VECTOR_TYPE)
6129 HOST_WIDE_INT size = int_size_in_bytes (type);
6130 gcc_assert ((TARGET_ARCH32 && size <= 8)
6131 || (TARGET_ARCH64 && size <= 16));
6133 if (mode == BLKmode)
6134 return function_arg_vector_value (size,
6135 SPARC_FP_ARG_FIRST + 2*slotno);
6137 mclass = MODE_FLOAT;
6141 return gen_rtx_REG (mode, regno);
6143 /* Structures up to 16 bytes in size are passed in arg slots on the stack
6144 and are promoted to registers if possible. */
6145 if (type && TREE_CODE (type) == RECORD_TYPE)
6147 HOST_WIDE_INT size = int_size_in_bytes (type);
6148 gcc_assert (size <= 16);
6150 return function_arg_record_value (type, mode, slotno, named, regbase);
6153 /* Unions up to 16 bytes in size are passed in integer registers. */
6154 else if (type && TREE_CODE (type) == UNION_TYPE)
6156 HOST_WIDE_INT size = int_size_in_bytes (type);
6157 gcc_assert (size <= 16);
6159 return function_arg_union_value (size, mode, slotno, regno);
6162 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
6163 but also have the slot allocated for them.
6164 If no prototype is in scope fp values in register slots get passed
6165 in two places, either fp regs and int regs or fp regs and memory. */
6166 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6167 && SPARC_FP_REG_P (regno))
6169 rtx reg = gen_rtx_REG (mode, regno);
6170 if (cum->prototype_p || cum->libcall_p)
6172 /* "* 2" because fp reg numbers are recorded in 4 byte
6175 /* ??? This will cause the value to be passed in the fp reg and
6176 in the stack. When a prototype exists we want to pass the
6177 value in the reg but reserve space on the stack. That's an
6178 optimization, and is deferred [for a bit]. */
6179 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
6180 return gen_rtx_PARALLEL (mode,
6182 gen_rtx_EXPR_LIST (VOIDmode,
6183 NULL_RTX, const0_rtx),
6184 gen_rtx_EXPR_LIST (VOIDmode,
6188 /* ??? It seems that passing back a register even when past
6189 the area declared by REG_PARM_STACK_SPACE will allocate
6190 space appropriately, and will not copy the data onto the
6191 stack, exactly as we desire.
6193 This is due to locate_and_pad_parm being called in
6194 expand_call whenever reg_parm_stack_space > 0, which
6195 while beneficial to our example here, would seem to be
6196 in error from what had been intended. Ho hum... -- r~ */
6204 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
6208 /* On incoming, we don't need to know that the value
6209 is passed in %f0 and %i0, and it confuses other parts
6210 causing needless spillage even on the simplest cases. */
6214 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
6215 + (regno - SPARC_FP_ARG_FIRST) / 2);
6217 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6218 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
6220 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6224 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6225 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6226 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6231 /* All other aggregate types are passed in an integer register in a mode
6232 corresponding to the size of the type. */
6233 else if (type && AGGREGATE_TYPE_P (type))
6235 HOST_WIDE_INT size = int_size_in_bytes (type);
6236 gcc_assert (size <= 16);
6238 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6241 return gen_rtx_REG (mode, regno);
6244 /* Handle the TARGET_FUNCTION_ARG target hook. */
6247 sparc_function_arg (cumulative_args_t cum, enum machine_mode mode,
6248 const_tree type, bool named)
6250 return sparc_function_arg_1 (cum, mode, type, named, false);
6253 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
6256 sparc_function_incoming_arg (cumulative_args_t cum, enum machine_mode mode,
6257 const_tree type, bool named)
6259 return sparc_function_arg_1 (cum, mode, type, named, true);
6262 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
6265 sparc_function_arg_boundary (enum machine_mode mode, const_tree type)
6267 return ((TARGET_ARCH64
6268 && (GET_MODE_ALIGNMENT (mode) == 128
6269 || (type && TYPE_ALIGN (type) == 128)))
6274 /* For an arg passed partly in registers and partly in memory,
6275 this is the number of bytes of registers used.
6276 For args passed entirely in registers or entirely in memory, zero.
6278 Any arg that starts in the first 6 regs but won't entirely fit in them
6279 needs partial registers on v8. On v9, structures with integer
6280 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
6281 values that begin in the last fp reg [where "last fp reg" varies with the
6282 mode] will be split between that reg and memory. */
6285 sparc_arg_partial_bytes (cumulative_args_t cum, enum machine_mode mode,
6286 tree type, bool named)
6288 int slotno, regno, padding;
6290 /* We pass false for incoming_p here, it doesn't matter. */
6291 slotno = function_arg_slotno (get_cumulative_args (cum), mode, type, named,
6292 false, ®no, &padding);
6299 if ((slotno + (mode == BLKmode
6300 ? ROUND_ADVANCE (int_size_in_bytes (type))
6301 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
6302 > SPARC_INT_ARG_MAX)
6303 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
6307 /* We are guaranteed by pass_by_reference that the size of the
6308 argument is not greater than 16 bytes, so we only need to return
6309 one word if the argument is partially passed in registers. */
6311 if (type && AGGREGATE_TYPE_P (type))
6313 int size = int_size_in_bytes (type);
6315 if (size > UNITS_PER_WORD
6316 && slotno == SPARC_INT_ARG_MAX - 1)
6317 return UNITS_PER_WORD;
6319 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
6320 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
6321 && ! (TARGET_FPU && named)))
6323 /* The complex types are passed as packed types. */
6324 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
6325 && slotno == SPARC_INT_ARG_MAX - 1)
6326 return UNITS_PER_WORD;
6328 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6330 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
6332 return UNITS_PER_WORD;
6339 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
6340 Specify whether to pass the argument by reference. */
6343 sparc_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
6344 enum machine_mode mode, const_tree type,
6345 bool named ATTRIBUTE_UNUSED)
6348 /* Original SPARC 32-bit ABI says that structures and unions,
6349 and quad-precision floats are passed by reference. For Pascal,
6350 also pass arrays by reference. All other base types are passed
6353 Extended ABI (as implemented by the Sun compiler) says that all
6354 complex floats are passed by reference. Pass complex integers
6355 in registers up to 8 bytes. More generally, enforce the 2-word
6356 cap for passing arguments in registers.
6358 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6359 integers are passed like floats of the same size, that is in
6360 registers up to 8 bytes. Pass all vector floats by reference
6361 like structure and unions. */
6362 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
6364 /* Catch CDImode, TFmode, DCmode and TCmode. */
6365 || GET_MODE_SIZE (mode) > 8
6367 && TREE_CODE (type) == VECTOR_TYPE
6368 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6370 /* Original SPARC 64-bit ABI says that structures and unions
6371 smaller than 16 bytes are passed in registers, as well as
6372 all other base types.
6374 Extended ABI (as implemented by the Sun compiler) says that
6375 complex floats are passed in registers up to 16 bytes. Pass
6376 all complex integers in registers up to 16 bytes. More generally,
6377 enforce the 2-word cap for passing arguments in registers.
6379 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6380 integers are passed like floats of the same size, that is in
6381 registers (up to 16 bytes). Pass all vector floats like structure
6384 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
6385 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
6386 /* Catch CTImode and TCmode. */
6387 || GET_MODE_SIZE (mode) > 16);
6390 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
6391 Update the data in CUM to advance over an argument
6392 of mode MODE and data type TYPE.
6393 TYPE is null for libcalls where that information may not be available. */
6396 sparc_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
6397 const_tree type, bool named)
6399 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6402 /* We pass false for incoming_p here, it doesn't matter. */
6403 function_arg_slotno (cum, mode, type, named, false, ®no, &padding);
6405 /* If argument requires leading padding, add it. */
6406 cum->words += padding;
6410 cum->words += (mode != BLKmode
6411 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
6412 : ROUND_ADVANCE (int_size_in_bytes (type)));
6416 if (type && AGGREGATE_TYPE_P (type))
6418 int size = int_size_in_bytes (type);
6422 else if (size <= 16)
6424 else /* passed by reference */
6429 cum->words += (mode != BLKmode
6430 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
6431 : ROUND_ADVANCE (int_size_in_bytes (type)));
6436 /* Handle the FUNCTION_ARG_PADDING macro.
6437 For the 64 bit ABI structs are always stored left shifted in their
6441 function_arg_padding (enum machine_mode mode, const_tree type)
6443 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
6446 /* Fall back to the default. */
6447 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
6450 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
6451 Specify whether to return the return value in memory. */
6454 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6457 /* Original SPARC 32-bit ABI says that structures and unions,
6458 and quad-precision floats are returned in memory. All other
6459 base types are returned in registers.
6461 Extended ABI (as implemented by the Sun compiler) says that
6462 all complex floats are returned in registers (8 FP registers
6463 at most for '_Complex long double'). Return all complex integers
6464 in registers (4 at most for '_Complex long long').
6466 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6467 integers are returned like floats of the same size, that is in
6468 registers up to 8 bytes and in memory otherwise. Return all
6469 vector floats in memory like structure and unions; note that
6470 they always have BLKmode like the latter. */
6471 return (TYPE_MODE (type) == BLKmode
6472 || TYPE_MODE (type) == TFmode
6473 || (TREE_CODE (type) == VECTOR_TYPE
6474 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6476 /* Original SPARC 64-bit ABI says that structures and unions
6477 smaller than 32 bytes are returned in registers, as well as
6478 all other base types.
6480 Extended ABI (as implemented by the Sun compiler) says that all
6481 complex floats are returned in registers (8 FP registers at most
6482 for '_Complex long double'). Return all complex integers in
6483 registers (4 at most for '_Complex TItype').
6485 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6486 integers are returned like floats of the same size, that is in
6487 registers. Return all vector floats like structure and unions;
6488 note that they always have BLKmode like the latter. */
6489 return (TYPE_MODE (type) == BLKmode
6490 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
6493 /* Handle the TARGET_STRUCT_VALUE target hook.
6494 Return where to find the structure return value address. */
6497 sparc_struct_value_rtx (tree fndecl, int incoming)
6506 mem = gen_frame_mem (Pmode, plus_constant (frame_pointer_rtx,
6507 STRUCT_VALUE_OFFSET));
6509 mem = gen_frame_mem (Pmode, plus_constant (stack_pointer_rtx,
6510 STRUCT_VALUE_OFFSET));
6512 /* Only follow the SPARC ABI for fixed-size structure returns.
6513 Variable size structure returns are handled per the normal
6514 procedures in GCC. This is enabled by -mstd-struct-return */
6516 && sparc_std_struct_return
6517 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
6518 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
6520 /* We must check and adjust the return address, as it is
6521 optional as to whether the return object is really
6523 rtx ret_reg = gen_rtx_REG (Pmode, 31);
6524 rtx scratch = gen_reg_rtx (SImode);
6525 rtx endlab = gen_label_rtx ();
6527 /* Calculate the return object size */
6528 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
6529 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
6530 /* Construct a temporary return value */
6532 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
6534 /* Implement SPARC 32-bit psABI callee return struct checking:
6536 Fetch the instruction where we will return to and see if
6537 it's an unimp instruction (the most significant 10 bits
6539 emit_move_insn (scratch, gen_rtx_MEM (SImode,
6540 plus_constant (ret_reg, 8)));
6541 /* Assume the size is valid and pre-adjust */
6542 emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
6543 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
6545 emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
6546 /* Write the address of the memory pointed to by temp_val into
6547 the memory pointed to by mem */
6548 emit_move_insn (mem, XEXP (temp_val, 0));
6549 emit_label (endlab);
6556 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
6557 For v9, function return values are subject to the same rules as arguments,
6558 except that up to 32 bytes may be returned in registers. */
6561 sparc_function_value_1 (const_tree type, enum machine_mode mode,
6564 /* Beware that the two values are swapped here wrt function_arg. */
6565 int regbase = (outgoing
6566 ? SPARC_INCOMING_INT_ARG_FIRST
6567 : SPARC_OUTGOING_INT_ARG_FIRST);
6568 enum mode_class mclass = GET_MODE_CLASS (mode);
6571 /* Vector types deserve special treatment because they are polymorphic wrt
6572 their mode, depending upon whether VIS instructions are enabled. */
6573 if (type && TREE_CODE (type) == VECTOR_TYPE)
6575 HOST_WIDE_INT size = int_size_in_bytes (type);
6576 gcc_assert ((TARGET_ARCH32 && size <= 8)
6577 || (TARGET_ARCH64 && size <= 32));
6579 if (mode == BLKmode)
6580 return function_arg_vector_value (size,
6581 SPARC_FP_ARG_FIRST);
6583 mclass = MODE_FLOAT;
6586 if (TARGET_ARCH64 && type)
6588 /* Structures up to 32 bytes in size are returned in registers. */
6589 if (TREE_CODE (type) == RECORD_TYPE)
6591 HOST_WIDE_INT size = int_size_in_bytes (type);
6592 gcc_assert (size <= 32);
6594 return function_arg_record_value (type, mode, 0, 1, regbase);
6597 /* Unions up to 32 bytes in size are returned in integer registers. */
6598 else if (TREE_CODE (type) == UNION_TYPE)
6600 HOST_WIDE_INT size = int_size_in_bytes (type);
6601 gcc_assert (size <= 32);
6603 return function_arg_union_value (size, mode, 0, regbase);
6606 /* Objects that require it are returned in FP registers. */
6607 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6610 /* All other aggregate types are returned in an integer register in a
6611 mode corresponding to the size of the type. */
6612 else if (AGGREGATE_TYPE_P (type))
6614 /* All other aggregate types are passed in an integer register
6615 in a mode corresponding to the size of the type. */
6616 HOST_WIDE_INT size = int_size_in_bytes (type);
6617 gcc_assert (size <= 32);
6619 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6621 /* ??? We probably should have made the same ABI change in
6622 3.4.0 as the one we made for unions. The latter was
6623 required by the SCD though, while the former is not
6624 specified, so we favored compatibility and efficiency.
6626 Now we're stuck for aggregates larger than 16 bytes,
6627 because OImode vanished in the meantime. Let's not
6628 try to be unduly clever, and simply follow the ABI
6629 for unions in that case. */
6630 if (mode == BLKmode)
6631 return function_arg_union_value (size, mode, 0, regbase);
6636 /* We should only have pointer and integer types at this point. This
6637 must match sparc_promote_function_mode. */
6638 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6642 /* We should only have pointer and integer types at this point. This must
6643 match sparc_promote_function_mode. */
6644 else if (TARGET_ARCH32
6645 && mclass == MODE_INT
6646 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6649 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
6650 regno = SPARC_FP_ARG_FIRST;
6654 return gen_rtx_REG (mode, regno);
6657 /* Handle TARGET_FUNCTION_VALUE.
6658 On the SPARC, the value is found in the first "output" register, but the
6659 called function leaves it in the first "input" register. */
6662 sparc_function_value (const_tree valtype,
6663 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
6666 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
6669 /* Handle TARGET_LIBCALL_VALUE. */
6672 sparc_libcall_value (enum machine_mode mode,
6673 const_rtx fun ATTRIBUTE_UNUSED)
6675 return sparc_function_value_1 (NULL_TREE, mode, false);
6678 /* Handle FUNCTION_VALUE_REGNO_P.
6679 On the SPARC, the first "output" reg is used for integer values, and the
6680 first floating point register is used for floating point values. */
6683 sparc_function_value_regno_p (const unsigned int regno)
6685 return (regno == 8 || regno == 32);
6688 /* Do what is necessary for `va_start'. We look at the current function
6689 to determine if stdarg or varargs is used and return the address of
6690 the first unnamed parameter. */
6693 sparc_builtin_saveregs (void)
6695 int first_reg = crtl->args.info.words;
6699 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
6700 emit_move_insn (gen_rtx_MEM (word_mode,
6701 gen_rtx_PLUS (Pmode,
6703 GEN_INT (FIRST_PARM_OFFSET (0)
6706 gen_rtx_REG (word_mode,
6707 SPARC_INCOMING_INT_ARG_FIRST + regno));
6709 address = gen_rtx_PLUS (Pmode,
6711 GEN_INT (FIRST_PARM_OFFSET (0)
6712 + UNITS_PER_WORD * first_reg));
6717 /* Implement `va_start' for stdarg. */
6720 sparc_va_start (tree valist, rtx nextarg)
6722 nextarg = expand_builtin_saveregs ();
6723 std_expand_builtin_va_start (valist, nextarg);
6726 /* Implement `va_arg' for stdarg. */
6729 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6732 HOST_WIDE_INT size, rsize, align;
6735 tree ptrtype = build_pointer_type (type);
6737 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
6740 size = rsize = UNITS_PER_WORD;
6746 size = int_size_in_bytes (type);
6747 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
6752 /* For SPARC64, objects requiring 16-byte alignment get it. */
6753 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
6754 align = 2 * UNITS_PER_WORD;
6756 /* SPARC-V9 ABI states that structures up to 16 bytes in size
6757 are left-justified in their slots. */
6758 if (AGGREGATE_TYPE_P (type))
6761 size = rsize = UNITS_PER_WORD;
6771 incr = fold_build_pointer_plus_hwi (incr, align - 1);
6772 incr = fold_convert (sizetype, incr);
6773 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
6775 incr = fold_convert (ptr_type_node, incr);
6778 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
6781 if (BYTES_BIG_ENDIAN && size < rsize)
6782 addr = fold_build_pointer_plus_hwi (incr, rsize - size);
6786 addr = fold_convert (build_pointer_type (ptrtype), addr);
6787 addr = build_va_arg_indirect_ref (addr);
6790 /* If the address isn't aligned properly for the type, we need a temporary.
6791 FIXME: This is inefficient, usually we can do this in registers. */
6792 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
6794 tree tmp = create_tmp_var (type, "va_arg_tmp");
6795 tree dest_addr = build_fold_addr_expr (tmp);
6796 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
6797 3, dest_addr, addr, size_int (rsize));
6798 TREE_ADDRESSABLE (tmp) = 1;
6799 gimplify_and_add (copy, pre_p);
6804 addr = fold_convert (ptrtype, addr);
6806 incr = fold_build_pointer_plus_hwi (incr, rsize);
6807 gimplify_assign (valist, incr, post_p);
6809 return build_va_arg_indirect_ref (addr);
6812 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
6813 Specify whether the vector mode is supported by the hardware. */
6816 sparc_vector_mode_supported_p (enum machine_mode mode)
6818 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
6821 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
6823 static enum machine_mode
6824 sparc_preferred_simd_mode (enum machine_mode mode)
6842 /* Return the string to output an unconditional branch to LABEL, which is
6843 the operand number of the label.
6845 DEST is the destination insn (i.e. the label), INSN is the source. */
6848 output_ubranch (rtx dest, int label, rtx insn)
6850 static char string[64];
6851 bool v9_form = false;
6854 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
6856 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6857 - INSN_ADDRESSES (INSN_UID (insn)));
6858 /* Leave some instructions for "slop". */
6859 if (delta >= -260000 && delta < 260000)
6864 strcpy (string, "ba%*,pt\t%%xcc, ");
6866 strcpy (string, "b%*\t");
6868 p = strchr (string, '\0');
6879 /* Return the string to output a conditional branch to LABEL, which is
6880 the operand number of the label. OP is the conditional expression.
6881 XEXP (OP, 0) is assumed to be a condition code register (integer or
6882 floating point) and its mode specifies what kind of comparison we made.
6884 DEST is the destination insn (i.e. the label), INSN is the source.
6886 REVERSED is nonzero if we should reverse the sense of the comparison.
6888 ANNUL is nonzero if we should generate an annulling branch. */
6891 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
6894 static char string[64];
6895 enum rtx_code code = GET_CODE (op);
6896 rtx cc_reg = XEXP (op, 0);
6897 enum machine_mode mode = GET_MODE (cc_reg);
6898 const char *labelno, *branch;
6899 int spaces = 8, far;
6902 /* v9 branches are limited to +-1MB. If it is too far away,
6915 fbne,a,pn %fcc2, .LC29
6923 far = TARGET_V9 && (get_attr_length (insn) >= 3);
6926 /* Reversal of FP compares takes care -- an ordered compare
6927 becomes an unordered compare and vice versa. */
6928 if (mode == CCFPmode || mode == CCFPEmode)
6929 code = reverse_condition_maybe_unordered (code);
6931 code = reverse_condition (code);
6934 /* Start by writing the branch condition. */
6935 if (mode == CCFPmode || mode == CCFPEmode)
6986 /* ??? !v9: FP branches cannot be preceded by another floating point
6987 insn. Because there is currently no concept of pre-delay slots,
6988 we can fix this only by always emitting a nop before a floating
6993 strcpy (string, "nop\n\t");
6994 strcat (string, branch);
7007 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
7019 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
7040 strcpy (string, branch);
7042 spaces -= strlen (branch);
7043 p = strchr (string, '\0');
7045 /* Now add the annulling, the label, and a possible noop. */
7058 if (! far && insn && INSN_ADDRESSES_SET_P ())
7060 int delta = (INSN_ADDRESSES (INSN_UID (dest))
7061 - INSN_ADDRESSES (INSN_UID (insn)));
7062 /* Leave some instructions for "slop". */
7063 if (delta < -260000 || delta >= 260000)
7067 if (mode == CCFPmode || mode == CCFPEmode)
7069 static char v9_fcc_labelno[] = "%%fccX, ";
7070 /* Set the char indicating the number of the fcc reg to use. */
7071 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
7072 labelno = v9_fcc_labelno;
7075 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
7079 else if (mode == CCXmode || mode == CCX_NOOVmode)
7081 labelno = "%%xcc, ";
7086 labelno = "%%icc, ";
7091 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
7094 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
7107 strcpy (p, labelno);
7108 p = strchr (p, '\0');
7111 strcpy (p, ".+12\n\t nop\n\tb\t");
7112 /* Skip the next insn if requested or
7113 if we know that it will be a nop. */
7114 if (annul || ! final_sequence)
7128 /* Emit a library call comparison between floating point X and Y.
7129 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
7130 Return the new operator to be used in the comparison sequence.
7132 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
7133 values as arguments instead of the TFmode registers themselves,
7134 that's why we cannot call emit_float_lib_cmp. */
7137 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
7140 rtx slot0, slot1, result, tem, tem2, libfunc;
7141 enum machine_mode mode;
7142 enum rtx_code new_comparison;
7147 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
7151 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
7155 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
7159 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
7163 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
7167 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
7178 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
7191 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
7192 emit_move_insn (slot0, x);
7199 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
7200 emit_move_insn (slot1, y);
7203 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7204 emit_library_call (libfunc, LCT_NORMAL,
7206 XEXP (slot0, 0), Pmode,
7207 XEXP (slot1, 0), Pmode);
7212 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7213 emit_library_call (libfunc, LCT_NORMAL,
7215 x, TFmode, y, TFmode);
7220 /* Immediately move the result of the libcall into a pseudo
7221 register so reload doesn't clobber the value if it needs
7222 the return register for a spill reg. */
7223 result = gen_reg_rtx (mode);
7224 emit_move_insn (result, hard_libcall_value (mode, libfunc));
7229 return gen_rtx_NE (VOIDmode, result, const0_rtx);
7232 new_comparison = (comparison == UNORDERED ? EQ : NE);
7233 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
7236 new_comparison = (comparison == UNGT ? GT : NE);
7237 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
7239 return gen_rtx_NE (VOIDmode, result, const2_rtx);
7241 tem = gen_reg_rtx (mode);
7243 emit_insn (gen_andsi3 (tem, result, const1_rtx));
7245 emit_insn (gen_anddi3 (tem, result, const1_rtx));
7246 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
7249 tem = gen_reg_rtx (mode);
7251 emit_insn (gen_addsi3 (tem, result, const1_rtx));
7253 emit_insn (gen_adddi3 (tem, result, const1_rtx));
7254 tem2 = gen_reg_rtx (mode);
7256 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
7258 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
7259 new_comparison = (comparison == UNEQ ? EQ : NE);
7260 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
7266 /* Generate an unsigned DImode to FP conversion. This is the same code
7267 optabs would emit if we didn't have TFmode patterns. */
7270 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
7272 rtx neglab, donelab, i0, i1, f0, in, out;
7275 in = force_reg (DImode, operands[1]);
7276 neglab = gen_label_rtx ();
7277 donelab = gen_label_rtx ();
7278 i0 = gen_reg_rtx (DImode);
7279 i1 = gen_reg_rtx (DImode);
7280 f0 = gen_reg_rtx (mode);
7282 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
7284 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
7285 emit_jump_insn (gen_jump (donelab));
7288 emit_label (neglab);
7290 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
7291 emit_insn (gen_anddi3 (i1, in, const1_rtx));
7292 emit_insn (gen_iordi3 (i0, i0, i1));
7293 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
7294 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
7296 emit_label (donelab);
7299 /* Generate an FP to unsigned DImode conversion. This is the same code
7300 optabs would emit if we didn't have TFmode patterns. */
7303 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
7305 rtx neglab, donelab, i0, i1, f0, in, out, limit;
7308 in = force_reg (mode, operands[1]);
7309 neglab = gen_label_rtx ();
7310 donelab = gen_label_rtx ();
7311 i0 = gen_reg_rtx (DImode);
7312 i1 = gen_reg_rtx (DImode);
7313 limit = gen_reg_rtx (mode);
7314 f0 = gen_reg_rtx (mode);
7316 emit_move_insn (limit,
7317 CONST_DOUBLE_FROM_REAL_VALUE (
7318 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
7319 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
7321 emit_insn (gen_rtx_SET (VOIDmode,
7323 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
7324 emit_jump_insn (gen_jump (donelab));
7327 emit_label (neglab);
7329 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
7330 emit_insn (gen_rtx_SET (VOIDmode,
7332 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
7333 emit_insn (gen_movdi (i1, const1_rtx));
7334 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
7335 emit_insn (gen_xordi3 (out, i0, i1));
7337 emit_label (donelab);
7340 /* Return the string to output a conditional branch to LABEL, testing
7341 register REG. LABEL is the operand number of the label; REG is the
7342 operand number of the reg. OP is the conditional expression. The mode
7343 of REG says what kind of comparison we made.
7345 DEST is the destination insn (i.e. the label), INSN is the source.
7347 REVERSED is nonzero if we should reverse the sense of the comparison.
7349 ANNUL is nonzero if we should generate an annulling branch. */
7352 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
7353 int annul, rtx insn)
7355 static char string[64];
7356 enum rtx_code code = GET_CODE (op);
7357 enum machine_mode mode = GET_MODE (XEXP (op, 0));
7362 /* branch on register are limited to +-128KB. If it is too far away,
7375 brgez,a,pn %o1, .LC29
7381 ba,pt %xcc, .LC29 */
7383 far = get_attr_length (insn) >= 3;
7385 /* If not floating-point or if EQ or NE, we can just reverse the code. */
7387 code = reverse_condition (code);
7389 /* Only 64 bit versions of these instructions exist. */
7390 gcc_assert (mode == DImode);
7392 /* Start by writing the branch condition. */
7397 strcpy (string, "brnz");
7401 strcpy (string, "brz");
7405 strcpy (string, "brgez");
7409 strcpy (string, "brlz");
7413 strcpy (string, "brlez");
7417 strcpy (string, "brgz");
7424 p = strchr (string, '\0');
7426 /* Now add the annulling, reg, label, and nop. */
7433 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
7436 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
7441 *p = p < string + 8 ? '\t' : ' ';
7449 int veryfar = 1, delta;
7451 if (INSN_ADDRESSES_SET_P ())
7453 delta = (INSN_ADDRESSES (INSN_UID (dest))
7454 - INSN_ADDRESSES (INSN_UID (insn)));
7455 /* Leave some instructions for "slop". */
7456 if (delta >= -260000 && delta < 260000)
7460 strcpy (p, ".+12\n\t nop\n\t");
7461 /* Skip the next insn if requested or
7462 if we know that it will be a nop. */
7463 if (annul || ! final_sequence)
7473 strcpy (p, "ba,pt\t%%xcc, ");
7487 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
7488 Such instructions cannot be used in the delay slot of return insn on v9.
7489 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
7493 epilogue_renumber (register rtx *where, int test)
7495 register const char *fmt;
7497 register enum rtx_code code;
7502 code = GET_CODE (*where);
7507 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
7509 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
7510 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
7518 /* Do not replace the frame pointer with the stack pointer because
7519 it can cause the delayed instruction to load below the stack.
7520 This occurs when instructions like:
7522 (set (reg/i:SI 24 %i0)
7523 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
7524 (const_int -20 [0xffffffec])) 0))
7526 are in the return delayed slot. */
7528 if (GET_CODE (XEXP (*where, 0)) == REG
7529 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
7530 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
7531 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
7536 if (SPARC_STACK_BIAS
7537 && GET_CODE (XEXP (*where, 0)) == REG
7538 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
7546 fmt = GET_RTX_FORMAT (code);
7548 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7553 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
7554 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
7557 else if (fmt[i] == 'e'
7558 && epilogue_renumber (&(XEXP (*where, i)), test))
7564 /* Leaf functions and non-leaf functions have different needs. */
7567 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
7570 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
7572 static const int *const reg_alloc_orders[] = {
7573 reg_leaf_alloc_order,
7574 reg_nonleaf_alloc_order};
7577 order_regs_for_local_alloc (void)
7579 static int last_order_nonleaf = 1;
7581 if (df_regs_ever_live_p (15) != last_order_nonleaf)
7583 last_order_nonleaf = !last_order_nonleaf;
7584 memcpy ((char *) reg_alloc_order,
7585 (const char *) reg_alloc_orders[last_order_nonleaf],
7586 FIRST_PSEUDO_REGISTER * sizeof (int));
7590 /* Return 1 if REG and MEM are legitimate enough to allow the various
7591 mem<-->reg splits to be run. */
7594 sparc_splitdi_legitimate (rtx reg, rtx mem)
7596 /* Punt if we are here by mistake. */
7597 gcc_assert (reload_completed);
7599 /* We must have an offsettable memory reference. */
7600 if (! offsettable_memref_p (mem))
7603 /* If we have legitimate args for ldd/std, we do not want
7604 the split to happen. */
7605 if ((REGNO (reg) % 2) == 0
7606 && mem_min_alignment (mem, 8))
7613 /* Return 1 if x and y are some kind of REG and they refer to
7614 different hard registers. This test is guaranteed to be
7615 run after reload. */
7618 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
7620 if (GET_CODE (x) != REG)
7622 if (GET_CODE (y) != REG)
7624 if (REGNO (x) == REGNO (y))
7629 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
7630 This makes them candidates for using ldd and std insns.
7632 Note reg1 and reg2 *must* be hard registers. */
7635 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
7637 /* We might have been passed a SUBREG. */
7638 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
7641 if (REGNO (reg1) % 2 != 0)
7644 /* Integer ldd is deprecated in SPARC V9 */
7645 if (TARGET_V9 && REGNO (reg1) < 32)
7648 return (REGNO (reg1) == REGNO (reg2) - 1);
7651 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
7654 This can only happen when addr1 and addr2, the addresses in mem1
7655 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
7656 addr1 must also be aligned on a 64-bit boundary.
7658 Also iff dependent_reg_rtx is not null it should not be used to
7659 compute the address for mem1, i.e. we cannot optimize a sequence
7671 But, note that the transformation from:
7676 is perfectly fine. Thus, the peephole2 patterns always pass us
7677 the destination register of the first load, never the second one.
7679 For stores we don't have a similar problem, so dependent_reg_rtx is
7683 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
7687 HOST_WIDE_INT offset1;
7689 /* The mems cannot be volatile. */
7690 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
7693 /* MEM1 should be aligned on a 64-bit boundary. */
7694 if (MEM_ALIGN (mem1) < 64)
7697 addr1 = XEXP (mem1, 0);
7698 addr2 = XEXP (mem2, 0);
7700 /* Extract a register number and offset (if used) from the first addr. */
7701 if (GET_CODE (addr1) == PLUS)
7703 /* If not a REG, return zero. */
7704 if (GET_CODE (XEXP (addr1, 0)) != REG)
7708 reg1 = REGNO (XEXP (addr1, 0));
7709 /* The offset must be constant! */
7710 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
7712 offset1 = INTVAL (XEXP (addr1, 1));
7715 else if (GET_CODE (addr1) != REG)
7719 reg1 = REGNO (addr1);
7720 /* This was a simple (mem (reg)) expression. Offset is 0. */
7724 /* Make sure the second address is a (mem (plus (reg) (const_int). */
7725 if (GET_CODE (addr2) != PLUS)
7728 if (GET_CODE (XEXP (addr2, 0)) != REG
7729 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
7732 if (reg1 != REGNO (XEXP (addr2, 0)))
7735 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
7738 /* The first offset must be evenly divisible by 8 to ensure the
7739 address is 64 bit aligned. */
7740 if (offset1 % 8 != 0)
7743 /* The offset for the second addr must be 4 more than the first addr. */
7744 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
7747 /* All the tests passed. addr1 and addr2 are valid for ldd and std
7752 /* Return 1 if reg is a pseudo, or is the first register in
7753 a hard register pair. This makes it suitable for use in
7754 ldd and std insns. */
7757 register_ok_for_ldd (rtx reg)
7759 /* We might have been passed a SUBREG. */
7763 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
7764 return (REGNO (reg) % 2 == 0);
7769 /* Return 1 if OP is a memory whose address is known to be
7770 aligned to 8-byte boundary, or a pseudo during reload.
7771 This makes it suitable for use in ldd and std insns. */
7774 memory_ok_for_ldd (rtx op)
7778 /* In 64-bit mode, we assume that the address is word-aligned. */
7779 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
7782 if ((reload_in_progress || reload_completed)
7783 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
7786 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
7788 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
7797 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
7800 sparc_print_operand_punct_valid_p (unsigned char code)
7813 /* Implement TARGET_PRINT_OPERAND.
7814 Print operand X (an rtx) in assembler syntax to file FILE.
7815 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
7816 For `%' followed by punctuation, CODE is the punctuation and X is null. */
7819 sparc_print_operand (FILE *file, rtx x, int code)
7824 /* Output an insn in a delay slot. */
7826 sparc_indent_opcode = 1;
7828 fputs ("\n\t nop", file);
7831 /* Output an annul flag if there's nothing for the delay slot and we
7832 are optimizing. This is always used with '(' below.
7833 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
7834 this is a dbx bug. So, we only do this when optimizing.
7835 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
7836 Always emit a nop in case the next instruction is a branch. */
7837 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
7841 /* Output a 'nop' if there's nothing for the delay slot and we are
7842 not optimizing. This is always used with '*' above. */
7843 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
7844 fputs ("\n\t nop", file);
7845 else if (final_sequence)
7846 sparc_indent_opcode = 1;
7849 /* Output the right displacement from the saved PC on function return.
7850 The caller may have placed an "unimp" insn immediately after the call
7851 so we have to account for it. This insn is used in the 32-bit ABI
7852 when calling a function that returns a non zero-sized structure. The
7853 64-bit ABI doesn't have it. Be careful to have this test be the same
7854 as that for the call. The exception is when sparc_std_struct_return
7855 is enabled, the psABI is followed exactly and the adjustment is made
7856 by the code in sparc_struct_value_rtx. The call emitted is the same
7857 when sparc_std_struct_return is enabled. */
7859 && cfun->returns_struct
7860 && !sparc_std_struct_return
7861 && DECL_SIZE (DECL_RESULT (current_function_decl))
7862 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
7864 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
7870 /* Output the Embedded Medium/Anywhere code model base register. */
7871 fputs (EMBMEDANY_BASE_REG, file);
7874 /* Print some local dynamic TLS name. */
7875 assemble_name (file, get_some_local_dynamic_name ());
7879 /* Adjust the operand to take into account a RESTORE operation. */
7880 if (GET_CODE (x) == CONST_INT)
7882 else if (GET_CODE (x) != REG)
7883 output_operand_lossage ("invalid %%Y operand");
7884 else if (REGNO (x) < 8)
7885 fputs (reg_names[REGNO (x)], file);
7886 else if (REGNO (x) >= 24 && REGNO (x) < 32)
7887 fputs (reg_names[REGNO (x)-16], file);
7889 output_operand_lossage ("invalid %%Y operand");
7892 /* Print out the low order register name of a register pair. */
7893 if (WORDS_BIG_ENDIAN)
7894 fputs (reg_names[REGNO (x)+1], file);
7896 fputs (reg_names[REGNO (x)], file);
7899 /* Print out the high order register name of a register pair. */
7900 if (WORDS_BIG_ENDIAN)
7901 fputs (reg_names[REGNO (x)], file);
7903 fputs (reg_names[REGNO (x)+1], file);
7906 /* Print out the second register name of a register pair or quad.
7907 I.e., R (%o0) => %o1. */
7908 fputs (reg_names[REGNO (x)+1], file);
7911 /* Print out the third register name of a register quad.
7912 I.e., S (%o0) => %o2. */
7913 fputs (reg_names[REGNO (x)+2], file);
7916 /* Print out the fourth register name of a register quad.
7917 I.e., T (%o0) => %o3. */
7918 fputs (reg_names[REGNO (x)+3], file);
7921 /* Print a condition code register. */
7922 if (REGNO (x) == SPARC_ICC_REG)
7924 /* We don't handle CC[X]_NOOVmode because they're not supposed
7926 if (GET_MODE (x) == CCmode)
7927 fputs ("%icc", file);
7928 else if (GET_MODE (x) == CCXmode)
7929 fputs ("%xcc", file);
7934 /* %fccN register */
7935 fputs (reg_names[REGNO (x)], file);
7938 /* Print the operand's address only. */
7939 output_address (XEXP (x, 0));
7942 /* In this case we need a register. Use %g0 if the
7943 operand is const0_rtx. */
7945 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
7947 fputs ("%g0", file);
7954 switch (GET_CODE (x))
7956 case IOR: fputs ("or", file); break;
7957 case AND: fputs ("and", file); break;
7958 case XOR: fputs ("xor", file); break;
7959 default: output_operand_lossage ("invalid %%A operand");
7964 switch (GET_CODE (x))
7966 case IOR: fputs ("orn", file); break;
7967 case AND: fputs ("andn", file); break;
7968 case XOR: fputs ("xnor", file); break;
7969 default: output_operand_lossage ("invalid %%B operand");
7973 /* These are used by the conditional move instructions. */
7977 enum rtx_code rc = GET_CODE (x);
7981 enum machine_mode mode = GET_MODE (XEXP (x, 0));
7982 if (mode == CCFPmode || mode == CCFPEmode)
7983 rc = reverse_condition_maybe_unordered (GET_CODE (x));
7985 rc = reverse_condition (GET_CODE (x));
7989 case NE: fputs ("ne", file); break;
7990 case EQ: fputs ("e", file); break;
7991 case GE: fputs ("ge", file); break;
7992 case GT: fputs ("g", file); break;
7993 case LE: fputs ("le", file); break;
7994 case LT: fputs ("l", file); break;
7995 case GEU: fputs ("geu", file); break;
7996 case GTU: fputs ("gu", file); break;
7997 case LEU: fputs ("leu", file); break;
7998 case LTU: fputs ("lu", file); break;
7999 case LTGT: fputs ("lg", file); break;
8000 case UNORDERED: fputs ("u", file); break;
8001 case ORDERED: fputs ("o", file); break;
8002 case UNLT: fputs ("ul", file); break;
8003 case UNLE: fputs ("ule", file); break;
8004 case UNGT: fputs ("ug", file); break;
8005 case UNGE: fputs ("uge", file); break;
8006 case UNEQ: fputs ("ue", file); break;
8007 default: output_operand_lossage (code == 'c'
8008 ? "invalid %%c operand"
8009 : "invalid %%C operand");
8014 /* These are used by the movr instruction pattern. */
8018 enum rtx_code rc = (code == 'd'
8019 ? reverse_condition (GET_CODE (x))
8023 case NE: fputs ("ne", file); break;
8024 case EQ: fputs ("e", file); break;
8025 case GE: fputs ("gez", file); break;
8026 case LT: fputs ("lz", file); break;
8027 case LE: fputs ("lez", file); break;
8028 case GT: fputs ("gz", file); break;
8029 default: output_operand_lossage (code == 'd'
8030 ? "invalid %%d operand"
8031 : "invalid %%D operand");
8038 /* Print a sign-extended character. */
8039 int i = trunc_int_for_mode (INTVAL (x), QImode);
8040 fprintf (file, "%d", i);
8045 /* Operand must be a MEM; write its address. */
8046 if (GET_CODE (x) != MEM)
8047 output_operand_lossage ("invalid %%f operand");
8048 output_address (XEXP (x, 0));
8053 /* Print a sign-extended 32-bit value. */
8055 if (GET_CODE(x) == CONST_INT)
8057 else if (GET_CODE(x) == CONST_DOUBLE)
8058 i = CONST_DOUBLE_LOW (x);
8061 output_operand_lossage ("invalid %%s operand");
8064 i = trunc_int_for_mode (i, SImode);
8065 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
8070 /* Do nothing special. */
8074 /* Undocumented flag. */
8075 output_operand_lossage ("invalid operand output code");
8078 if (GET_CODE (x) == REG)
8079 fputs (reg_names[REGNO (x)], file);
8080 else if (GET_CODE (x) == MEM)
8083 /* Poor Sun assembler doesn't understand absolute addressing. */
8084 if (CONSTANT_P (XEXP (x, 0)))
8085 fputs ("%g0+", file);
8086 output_address (XEXP (x, 0));
8089 else if (GET_CODE (x) == HIGH)
8091 fputs ("%hi(", file);
8092 output_addr_const (file, XEXP (x, 0));
8095 else if (GET_CODE (x) == LO_SUM)
8097 sparc_print_operand (file, XEXP (x, 0), 0);
8098 if (TARGET_CM_MEDMID)
8099 fputs ("+%l44(", file);
8101 fputs ("+%lo(", file);
8102 output_addr_const (file, XEXP (x, 1));
8105 else if (GET_CODE (x) == CONST_DOUBLE
8106 && (GET_MODE (x) == VOIDmode
8107 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
8109 if (CONST_DOUBLE_HIGH (x) == 0)
8110 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
8111 else if (CONST_DOUBLE_HIGH (x) == -1
8112 && CONST_DOUBLE_LOW (x) < 0)
8113 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
8115 output_operand_lossage ("long long constant not a valid immediate operand");
8117 else if (GET_CODE (x) == CONST_DOUBLE)
8118 output_operand_lossage ("floating point constant not a valid immediate operand");
8119 else { output_addr_const (file, x); }
8122 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
8125 sparc_print_operand_address (FILE *file, rtx x)
8127 register rtx base, index = 0;
8129 register rtx addr = x;
8132 fputs (reg_names[REGNO (addr)], file);
8133 else if (GET_CODE (addr) == PLUS)
8135 if (CONST_INT_P (XEXP (addr, 0)))
8136 offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
8137 else if (CONST_INT_P (XEXP (addr, 1)))
8138 offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
8140 base = XEXP (addr, 0), index = XEXP (addr, 1);
8141 if (GET_CODE (base) == LO_SUM)
8143 gcc_assert (USE_AS_OFFSETABLE_LO10
8145 && ! TARGET_CM_MEDMID);
8146 output_operand (XEXP (base, 0), 0);
8147 fputs ("+%lo(", file);
8148 output_address (XEXP (base, 1));
8149 fprintf (file, ")+%d", offset);
8153 fputs (reg_names[REGNO (base)], file);
8155 fprintf (file, "%+d", offset);
8156 else if (REG_P (index))
8157 fprintf (file, "+%s", reg_names[REGNO (index)]);
8158 else if (GET_CODE (index) == SYMBOL_REF
8159 || GET_CODE (index) == LABEL_REF
8160 || GET_CODE (index) == CONST)
8161 fputc ('+', file), output_addr_const (file, index);
8162 else gcc_unreachable ();
8165 else if (GET_CODE (addr) == MINUS
8166 && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
8168 output_addr_const (file, XEXP (addr, 0));
8170 output_addr_const (file, XEXP (addr, 1));
8171 fputs ("-.)", file);
8173 else if (GET_CODE (addr) == LO_SUM)
8175 output_operand (XEXP (addr, 0), 0);
8176 if (TARGET_CM_MEDMID)
8177 fputs ("+%l44(", file);
8179 fputs ("+%lo(", file);
8180 output_address (XEXP (addr, 1));
8184 && GET_CODE (addr) == CONST
8185 && GET_CODE (XEXP (addr, 0)) == MINUS
8186 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
8187 && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
8188 && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
8190 addr = XEXP (addr, 0);
8191 output_addr_const (file, XEXP (addr, 0));
8192 /* Group the args of the second CONST in parenthesis. */
8194 /* Skip past the second CONST--it does nothing for us. */
8195 output_addr_const (file, XEXP (XEXP (addr, 1), 0));
8196 /* Close the parenthesis. */
8201 output_addr_const (file, addr);
8205 /* Target hook for assembling integer objects. The sparc version has
8206 special handling for aligned DI-mode objects. */
8209 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
8211 /* ??? We only output .xword's for symbols and only then in environments
8212 where the assembler can handle them. */
8213 if (aligned_p && size == 8
8214 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
8218 assemble_integer_with_op ("\t.xword\t", x);
8223 assemble_aligned_integer (4, const0_rtx);
8224 assemble_aligned_integer (4, x);
8228 return default_assemble_integer (x, size, aligned_p);
8231 /* Return the value of a code used in the .proc pseudo-op that says
8232 what kind of result this function returns. For non-C types, we pick
8233 the closest C type. */
8235 #ifndef SHORT_TYPE_SIZE
8236 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
8239 #ifndef INT_TYPE_SIZE
8240 #define INT_TYPE_SIZE BITS_PER_WORD
8243 #ifndef LONG_TYPE_SIZE
8244 #define LONG_TYPE_SIZE BITS_PER_WORD
8247 #ifndef LONG_LONG_TYPE_SIZE
8248 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
8251 #ifndef FLOAT_TYPE_SIZE
8252 #define FLOAT_TYPE_SIZE BITS_PER_WORD
8255 #ifndef DOUBLE_TYPE_SIZE
8256 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8259 #ifndef LONG_DOUBLE_TYPE_SIZE
8260 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8264 sparc_type_code (register tree type)
8266 register unsigned long qualifiers = 0;
8267 register unsigned shift;
8269 /* Only the first 30 bits of the qualifier are valid. We must refrain from
8270 setting more, since some assemblers will give an error for this. Also,
8271 we must be careful to avoid shifts of 32 bits or more to avoid getting
8272 unpredictable results. */
8274 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
8276 switch (TREE_CODE (type))
8282 qualifiers |= (3 << shift);
8287 qualifiers |= (2 << shift);
8291 case REFERENCE_TYPE:
8293 qualifiers |= (1 << shift);
8297 return (qualifiers | 8);
8300 case QUAL_UNION_TYPE:
8301 return (qualifiers | 9);
8304 return (qualifiers | 10);
8307 return (qualifiers | 16);
8310 /* If this is a range type, consider it to be the underlying
8312 if (TREE_TYPE (type) != 0)
8315 /* Carefully distinguish all the standard types of C,
8316 without messing up if the language is not C. We do this by
8317 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
8318 look at both the names and the above fields, but that's redundant.
8319 Any type whose size is between two C types will be considered
8320 to be the wider of the two types. Also, we do not have a
8321 special code to use for "long long", so anything wider than
8322 long is treated the same. Note that we can't distinguish
8323 between "int" and "long" in this code if they are the same
8324 size, but that's fine, since neither can the assembler. */
8326 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
8327 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
8329 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
8330 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
8332 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
8333 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
8336 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
8339 /* If this is a range type, consider it to be the underlying
8341 if (TREE_TYPE (type) != 0)
8344 /* Carefully distinguish all the standard types of C,
8345 without messing up if the language is not C. */
8347 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
8348 return (qualifiers | 6);
8351 return (qualifiers | 7);
8353 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
8354 /* ??? We need to distinguish between double and float complex types,
8355 but I don't know how yet because I can't reach this code from
8356 existing front-ends. */
8357 return (qualifiers | 7); /* Who knows? */
8360 case BOOLEAN_TYPE: /* Boolean truth value type. */
8366 gcc_unreachable (); /* Not a type! */
8373 /* Nested function support. */
8375 /* Emit RTL insns to initialize the variable parts of a trampoline.
8376 FNADDR is an RTX for the address of the function's pure code.
8377 CXT is an RTX for the static chain value for the function.
8379 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
8380 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
8381 (to store insns). This is a bit excessive. Perhaps a different
8382 mechanism would be better here.
8384 Emit enough FLUSH insns to synchronize the data and instruction caches. */
8387 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
8389 /* SPARC 32-bit trampoline:
8392 sethi %hi(static), %g2
8394 or %g2, %lo(static), %g2
8396 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
8397 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
8401 (adjust_address (m_tramp, SImode, 0),
8402 expand_binop (SImode, ior_optab,
8403 expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
8404 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
8405 NULL_RTX, 1, OPTAB_DIRECT));
8408 (adjust_address (m_tramp, SImode, 4),
8409 expand_binop (SImode, ior_optab,
8410 expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
8411 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
8412 NULL_RTX, 1, OPTAB_DIRECT));
8415 (adjust_address (m_tramp, SImode, 8),
8416 expand_binop (SImode, ior_optab,
8417 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
8418 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
8419 NULL_RTX, 1, OPTAB_DIRECT));
8422 (adjust_address (m_tramp, SImode, 12),
8423 expand_binop (SImode, ior_optab,
8424 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
8425 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
8426 NULL_RTX, 1, OPTAB_DIRECT));
8428 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
8429 aligned on a 16 byte boundary so one flush clears it all. */
8430 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
8431 if (sparc_cpu != PROCESSOR_ULTRASPARC
8432 && sparc_cpu != PROCESSOR_ULTRASPARC3
8433 && sparc_cpu != PROCESSOR_NIAGARA
8434 && sparc_cpu != PROCESSOR_NIAGARA2
8435 && sparc_cpu != PROCESSOR_NIAGARA3
8436 && sparc_cpu != PROCESSOR_NIAGARA4)
8437 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
8439 /* Call __enable_execute_stack after writing onto the stack to make sure
8440 the stack address is accessible. */
8441 #ifdef HAVE_ENABLE_EXECUTE_STACK
8442 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
8443 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8448 /* The 64-bit version is simpler because it makes more sense to load the
8449 values as "immediate" data out of the trampoline. It's also easier since
8450 we can read the PC without clobbering a register. */
8453 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
8455 /* SPARC 64-bit trampoline:
8464 emit_move_insn (adjust_address (m_tramp, SImode, 0),
8465 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
8466 emit_move_insn (adjust_address (m_tramp, SImode, 4),
8467 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
8468 emit_move_insn (adjust_address (m_tramp, SImode, 8),
8469 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
8470 emit_move_insn (adjust_address (m_tramp, SImode, 12),
8471 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
8472 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
8473 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
8474 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
8476 if (sparc_cpu != PROCESSOR_ULTRASPARC
8477 && sparc_cpu != PROCESSOR_ULTRASPARC3
8478 && sparc_cpu != PROCESSOR_NIAGARA
8479 && sparc_cpu != PROCESSOR_NIAGARA2
8480 && sparc_cpu != PROCESSOR_NIAGARA3
8481 && sparc_cpu != PROCESSOR_NIAGARA4)
8482 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
8484 /* Call __enable_execute_stack after writing onto the stack to make sure
8485 the stack address is accessible. */
8486 #ifdef HAVE_ENABLE_EXECUTE_STACK
8487 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
8488 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8492 /* Worker for TARGET_TRAMPOLINE_INIT. */
8495 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
8497 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
8498 cxt = force_reg (Pmode, cxt);
8500 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
8502 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
8505 /* Adjust the cost of a scheduling dependency. Return the new cost of
8506 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
8509 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8511 enum attr_type insn_type;
8513 if (! recog_memoized (insn))
8516 insn_type = get_attr_type (insn);
8518 if (REG_NOTE_KIND (link) == 0)
8520 /* Data dependency; DEP_INSN writes a register that INSN reads some
8523 /* if a load, then the dependence must be on the memory address;
8524 add an extra "cycle". Note that the cost could be two cycles
8525 if the reg was written late in an instruction group; we ca not tell
8527 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
8530 /* Get the delay only if the address of the store is the dependence. */
8531 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
8533 rtx pat = PATTERN(insn);
8534 rtx dep_pat = PATTERN (dep_insn);
8536 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8537 return cost; /* This should not happen! */
8539 /* The dependency between the two instructions was on the data that
8540 is being stored. Assume that this implies that the address of the
8541 store is not dependent. */
8542 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8545 return cost + 3; /* An approximation. */
8548 /* A shift instruction cannot receive its data from an instruction
8549 in the same cycle; add a one cycle penalty. */
8550 if (insn_type == TYPE_SHIFT)
8551 return cost + 3; /* Split before cascade into shift. */
8555 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
8556 INSN writes some cycles later. */
8558 /* These are only significant for the fpu unit; writing a fp reg before
8559 the fpu has finished with it stalls the processor. */
8561 /* Reusing an integer register causes no problems. */
8562 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8570 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8572 enum attr_type insn_type, dep_type;
8573 rtx pat = PATTERN(insn);
8574 rtx dep_pat = PATTERN (dep_insn);
8576 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
8579 insn_type = get_attr_type (insn);
8580 dep_type = get_attr_type (dep_insn);
8582 switch (REG_NOTE_KIND (link))
8585 /* Data dependency; DEP_INSN writes a register that INSN reads some
8592 /* Get the delay iff the address of the store is the dependence. */
8593 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8596 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8603 /* If a load, then the dependence must be on the memory address. If
8604 the addresses aren't equal, then it might be a false dependency */
8605 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
8607 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
8608 || GET_CODE (SET_DEST (dep_pat)) != MEM
8609 || GET_CODE (SET_SRC (pat)) != MEM
8610 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
8611 XEXP (SET_SRC (pat), 0)))
8619 /* Compare to branch latency is 0. There is no benefit from
8620 separating compare and branch. */
8621 if (dep_type == TYPE_COMPARE)
8623 /* Floating point compare to branch latency is less than
8624 compare to conditional move. */
8625 if (dep_type == TYPE_FPCMP)
8634 /* Anti-dependencies only penalize the fpu unit. */
8635 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8647 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
8651 case PROCESSOR_SUPERSPARC:
8652 cost = supersparc_adjust_cost (insn, link, dep, cost);
8654 case PROCESSOR_HYPERSPARC:
8655 case PROCESSOR_SPARCLITE86X:
8656 cost = hypersparc_adjust_cost (insn, link, dep, cost);
8665 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
8666 int sched_verbose ATTRIBUTE_UNUSED,
8667 int max_ready ATTRIBUTE_UNUSED)
8671 sparc_use_sched_lookahead (void)
8673 if (sparc_cpu == PROCESSOR_NIAGARA
8674 || sparc_cpu == PROCESSOR_NIAGARA2
8675 || sparc_cpu == PROCESSOR_NIAGARA3
8676 || sparc_cpu == PROCESSOR_NIAGARA4)
8678 if (sparc_cpu == PROCESSOR_ULTRASPARC
8679 || sparc_cpu == PROCESSOR_ULTRASPARC3)
8681 if ((1 << sparc_cpu) &
8682 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
8683 (1 << PROCESSOR_SPARCLITE86X)))
8689 sparc_issue_rate (void)
8693 case PROCESSOR_NIAGARA:
8694 case PROCESSOR_NIAGARA2:
8695 case PROCESSOR_NIAGARA3:
8696 case PROCESSOR_NIAGARA4:
8700 /* Assume V9 processors are capable of at least dual-issue. */
8702 case PROCESSOR_SUPERSPARC:
8704 case PROCESSOR_HYPERSPARC:
8705 case PROCESSOR_SPARCLITE86X:
8707 case PROCESSOR_ULTRASPARC:
8708 case PROCESSOR_ULTRASPARC3:
8714 set_extends (rtx insn)
8716 register rtx pat = PATTERN (insn);
8718 switch (GET_CODE (SET_SRC (pat)))
8720 /* Load and some shift instructions zero extend. */
8723 /* sethi clears the high bits */
8725 /* LO_SUM is used with sethi. sethi cleared the high
8726 bits and the values used with lo_sum are positive */
8728 /* Store flag stores 0 or 1 */
8738 rtx op0 = XEXP (SET_SRC (pat), 0);
8739 rtx op1 = XEXP (SET_SRC (pat), 1);
8740 if (GET_CODE (op1) == CONST_INT)
8741 return INTVAL (op1) >= 0;
8742 if (GET_CODE (op0) != REG)
8744 if (sparc_check_64 (op0, insn) == 1)
8746 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8751 rtx op0 = XEXP (SET_SRC (pat), 0);
8752 rtx op1 = XEXP (SET_SRC (pat), 1);
8753 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
8755 if (GET_CODE (op1) == CONST_INT)
8756 return INTVAL (op1) >= 0;
8757 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8760 return GET_MODE (SET_SRC (pat)) == SImode;
8761 /* Positive integers leave the high bits zero. */
8763 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
8765 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
8768 return - (GET_MODE (SET_SRC (pat)) == SImode);
8770 return sparc_check_64 (SET_SRC (pat), insn);
8776 /* We _ought_ to have only one kind per function, but... */
8777 static GTY(()) rtx sparc_addr_diff_list;
8778 static GTY(()) rtx sparc_addr_list;
8781 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
8783 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
8785 sparc_addr_diff_list
8786 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
8788 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
8792 sparc_output_addr_vec (rtx vec)
8794 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8795 int idx, vlen = XVECLEN (body, 0);
8797 #ifdef ASM_OUTPUT_ADDR_VEC_START
8798 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8801 #ifdef ASM_OUTPUT_CASE_LABEL
8802 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8805 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8808 for (idx = 0; idx < vlen; idx++)
8810 ASM_OUTPUT_ADDR_VEC_ELT
8811 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
8814 #ifdef ASM_OUTPUT_ADDR_VEC_END
8815 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8820 sparc_output_addr_diff_vec (rtx vec)
8822 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8823 rtx base = XEXP (XEXP (body, 0), 0);
8824 int idx, vlen = XVECLEN (body, 1);
8826 #ifdef ASM_OUTPUT_ADDR_VEC_START
8827 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8830 #ifdef ASM_OUTPUT_CASE_LABEL
8831 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8834 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8837 for (idx = 0; idx < vlen; idx++)
8839 ASM_OUTPUT_ADDR_DIFF_ELT
8842 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
8843 CODE_LABEL_NUMBER (base));
8846 #ifdef ASM_OUTPUT_ADDR_VEC_END
8847 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8852 sparc_output_deferred_case_vectors (void)
8857 if (sparc_addr_list == NULL_RTX
8858 && sparc_addr_diff_list == NULL_RTX)
8861 /* Align to cache line in the function's code section. */
8862 switch_to_section (current_function_section ());
8864 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
8866 ASM_OUTPUT_ALIGN (asm_out_file, align);
8868 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
8869 sparc_output_addr_vec (XEXP (t, 0));
8870 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
8871 sparc_output_addr_diff_vec (XEXP (t, 0));
8873 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
8876 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
8877 unknown. Return 1 if the high bits are zero, -1 if the register is
8880 sparc_check_64 (rtx x, rtx insn)
8882 /* If a register is set only once it is safe to ignore insns this
8883 code does not know how to handle. The loop will either recognize
8884 the single set and return the correct value or fail to recognize
8889 gcc_assert (GET_CODE (x) == REG);
8891 if (GET_MODE (x) == DImode)
8892 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
8894 if (flag_expensive_optimizations
8895 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
8901 insn = get_last_insn_anywhere ();
8906 while ((insn = PREV_INSN (insn)))
8908 switch (GET_CODE (insn))
8921 rtx pat = PATTERN (insn);
8922 if (GET_CODE (pat) != SET)
8924 if (rtx_equal_p (x, SET_DEST (pat)))
8925 return set_extends (insn);
8926 if (y && rtx_equal_p (y, SET_DEST (pat)))
8927 return set_extends (insn);
8928 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
8936 /* Returns assembly code to perform a DImode shift using
8937 a 64-bit global or out register on SPARC-V8+. */
8939 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
8941 static char asm_code[60];
8943 /* The scratch register is only required when the destination
8944 register is not a 64-bit global or out register. */
8945 if (which_alternative != 2)
8946 operands[3] = operands[0];
8948 /* We can only shift by constants <= 63. */
8949 if (GET_CODE (operands[2]) == CONST_INT)
8950 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
8952 if (GET_CODE (operands[1]) == CONST_INT)
8954 output_asm_insn ("mov\t%1, %3", operands);
8958 output_asm_insn ("sllx\t%H1, 32, %3", operands);
8959 if (sparc_check_64 (operands[1], insn) <= 0)
8960 output_asm_insn ("srl\t%L1, 0, %L1", operands);
8961 output_asm_insn ("or\t%L1, %3, %3", operands);
8964 strcpy(asm_code, opcode);
8966 if (which_alternative != 2)
8967 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
8969 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
8972 /* Output rtl to increment the profiler label LABELNO
8973 for profiling a function entry. */
8976 sparc_profile_hook (int labelno)
8981 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
8982 if (NO_PROFILE_COUNTERS)
8984 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
8988 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8989 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
8990 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
8994 #ifdef TARGET_SOLARIS
8995 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
8998 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
8999 tree decl ATTRIBUTE_UNUSED)
9001 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
9003 solaris_elf_asm_comdat_section (name, flags, decl);
9007 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
9009 if (!(flags & SECTION_DEBUG))
9010 fputs (",#alloc", asm_out_file);
9011 if (flags & SECTION_WRITE)
9012 fputs (",#write", asm_out_file);
9013 if (flags & SECTION_TLS)
9014 fputs (",#tls", asm_out_file);
9015 if (flags & SECTION_CODE)
9016 fputs (",#execinstr", asm_out_file);
9018 /* ??? Handle SECTION_BSS. */
9020 fputc ('\n', asm_out_file);
9022 #endif /* TARGET_SOLARIS */
9024 /* We do not allow indirect calls to be optimized into sibling calls.
9026 We cannot use sibling calls when delayed branches are disabled
9027 because they will likely require the call delay slot to be filled.
9029 Also, on SPARC 32-bit we cannot emit a sibling call when the
9030 current function returns a structure. This is because the "unimp
9031 after call" convention would cause the callee to return to the
9032 wrong place. The generic code already disallows cases where the
9033 function being called returns a structure.
9035 It may seem strange how this last case could occur. Usually there
9036 is code after the call which jumps to epilogue code which dumps the
9037 return value into the struct return area. That ought to invalidate
9038 the sibling call right? Well, in the C++ case we can end up passing
9039 the pointer to the struct return area to a constructor (which returns
9040 void) and then nothing else happens. Such a sibling call would look
9041 valid without the added check here.
9043 VxWorks PIC PLT entries require the global pointer to be initialized
9044 on entry. We therefore can't emit sibling calls to them. */
9046 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
9049 && flag_delayed_branch
9050 && (TARGET_ARCH64 || ! cfun->returns_struct)
9051 && !(TARGET_VXWORKS_RTP
9053 && !targetm.binds_local_p (decl)));
9056 /* libfunc renaming. */
9059 sparc_init_libfuncs (void)
9063 /* Use the subroutines that Sun's library provides for integer
9064 multiply and divide. The `*' prevents an underscore from
9065 being prepended by the compiler. .umul is a little faster
9067 set_optab_libfunc (smul_optab, SImode, "*.umul");
9068 set_optab_libfunc (sdiv_optab, SImode, "*.div");
9069 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
9070 set_optab_libfunc (smod_optab, SImode, "*.rem");
9071 set_optab_libfunc (umod_optab, SImode, "*.urem");
9073 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
9074 set_optab_libfunc (add_optab, TFmode, "_Q_add");
9075 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
9076 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
9077 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
9078 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
9080 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
9081 is because with soft-float, the SFmode and DFmode sqrt
9082 instructions will be absent, and the compiler will notice and
9083 try to use the TFmode sqrt instruction for calls to the
9084 builtin function sqrt, but this fails. */
9086 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
9088 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
9089 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
9090 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
9091 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
9092 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
9093 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
9095 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
9096 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
9097 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
9098 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
9100 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
9101 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
9102 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
9103 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
9105 if (DITF_CONVERSION_LIBFUNCS)
9107 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
9108 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
9109 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
9110 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
9113 if (SUN_CONVERSION_LIBFUNCS)
9115 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
9116 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
9117 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
9118 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
9123 /* In the SPARC 64bit ABI, SImode multiply and divide functions
9124 do not exist in the library. Make sure the compiler does not
9125 emit calls to them by accident. (It should always use the
9126 hardware instructions.) */
9127 set_optab_libfunc (smul_optab, SImode, 0);
9128 set_optab_libfunc (sdiv_optab, SImode, 0);
9129 set_optab_libfunc (udiv_optab, SImode, 0);
9130 set_optab_libfunc (smod_optab, SImode, 0);
9131 set_optab_libfunc (umod_optab, SImode, 0);
9133 if (SUN_INTEGER_MULTIPLY_64)
9135 set_optab_libfunc (smul_optab, DImode, "__mul64");
9136 set_optab_libfunc (sdiv_optab, DImode, "__div64");
9137 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
9138 set_optab_libfunc (smod_optab, DImode, "__rem64");
9139 set_optab_libfunc (umod_optab, DImode, "__urem64");
9142 if (SUN_CONVERSION_LIBFUNCS)
9144 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
9145 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
9146 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
9147 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
9152 static tree def_builtin(const char *name, int code, tree type)
9154 return add_builtin_function(name, type, code, BUILT_IN_MD, NULL,
9158 static tree def_builtin_const(const char *name, int code, tree type)
9160 tree t = def_builtin(name, code, type);
9163 TREE_READONLY (t) = 1;
9168 /* Implement the TARGET_INIT_BUILTINS target hook.
9169 Create builtin functions for special SPARC instructions. */
9172 sparc_init_builtins (void)
9175 sparc_vis_init_builtins ();
9178 /* Create builtin functions for VIS 1.0 instructions. */
9181 sparc_vis_init_builtins (void)
9183 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
9184 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
9185 tree v4hi = build_vector_type (intHI_type_node, 4);
9186 tree v2hi = build_vector_type (intHI_type_node, 2);
9187 tree v2si = build_vector_type (intSI_type_node, 2);
9188 tree v1si = build_vector_type (intSI_type_node, 1);
9190 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
9191 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
9192 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
9193 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
9194 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
9195 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
9196 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
9197 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
9198 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
9199 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
9200 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
9201 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
9202 tree v2hi_ftype_v2hi_v2hi = build_function_type_list (v2hi, v2hi, v2hi, 0);
9203 tree v1si_ftype_v1si_v1si = build_function_type_list (v1si, v1si, v1si, 0);
9204 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
9206 intDI_type_node, 0);
9207 tree di_ftype_v8qi_v8qi = build_function_type_list (intDI_type_node,
9209 tree si_ftype_v8qi_v8qi = build_function_type_list (intSI_type_node,
9211 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
9213 intDI_type_node, 0);
9214 tree si_ftype_si_si = build_function_type_list (intSI_type_node,
9216 intSI_type_node, 0);
9217 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
9219 intSI_type_node, 0);
9220 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
9222 intDI_type_node, 0);
9223 tree si_ftype_ptr_ptr = build_function_type_list (intSI_type_node,
9226 tree di_ftype_ptr_ptr = build_function_type_list (intDI_type_node,
9229 tree si_ftype_v4hi_v4hi = build_function_type_list (intSI_type_node,
9231 tree si_ftype_v2si_v2si = build_function_type_list (intSI_type_node,
9233 tree di_ftype_v4hi_v4hi = build_function_type_list (intDI_type_node,
9235 tree di_ftype_v2si_v2si = build_function_type_list (intDI_type_node,
9237 tree void_ftype_di = build_function_type_list (void_type_node,
9238 intDI_type_node, 0);
9239 tree di_ftype_void = build_function_type_list (intDI_type_node,
9241 tree void_ftype_si = build_function_type_list (void_type_node,
9242 intSI_type_node, 0);
9243 tree sf_ftype_sf_sf = build_function_type_list (float_type_node,
9245 float_type_node, 0);
9246 tree df_ftype_df_df = build_function_type_list (double_type_node,
9248 double_type_node, 0);
9250 /* Packing and expanding vectors. */
9251 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis,
9253 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
9254 v8qi_ftype_v2si_v8qi);
9255 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
9257 def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis,
9259 def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
9260 v8qi_ftype_v4qi_v4qi);
9262 /* Multiplications. */
9263 def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
9264 v4hi_ftype_v4qi_v4hi);
9265 def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
9266 v4hi_ftype_v4qi_v2hi);
9267 def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
9268 v4hi_ftype_v4qi_v2hi);
9269 def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
9270 v4hi_ftype_v8qi_v4hi);
9271 def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
9272 v4hi_ftype_v8qi_v4hi);
9273 def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
9274 v2si_ftype_v4qi_v2hi);
9275 def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
9276 v2si_ftype_v4qi_v2hi);
9278 /* Data aligning. */
9279 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
9280 v4hi_ftype_v4hi_v4hi);
9281 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
9282 v8qi_ftype_v8qi_v8qi);
9283 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
9284 v2si_ftype_v2si_v2si);
9285 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
9288 def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
9290 def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis,
9295 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
9297 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis,
9302 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
9304 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis,
9308 /* Pixel distance. */
9309 def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
9310 di_ftype_v8qi_v8qi_di);
9312 /* Edge handling. */
9315 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis,
9317 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis,
9319 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis,
9321 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis,
9323 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis,
9325 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis,
9329 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
9331 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
9333 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
9335 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
9337 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
9339 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
9345 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis,
9347 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis,
9349 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis,
9351 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis,
9353 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis,
9355 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis,
9359 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
9361 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
9363 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
9365 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
9367 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
9369 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
9374 /* Pixel compare. */
9377 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis,
9378 di_ftype_v4hi_v4hi);
9379 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis,
9380 di_ftype_v2si_v2si);
9381 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis,
9382 di_ftype_v4hi_v4hi);
9383 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis,
9384 di_ftype_v2si_v2si);
9385 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis,
9386 di_ftype_v4hi_v4hi);
9387 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis,
9388 di_ftype_v2si_v2si);
9389 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis,
9390 di_ftype_v4hi_v4hi);
9391 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis,
9392 di_ftype_v2si_v2si);
9396 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis,
9397 si_ftype_v4hi_v4hi);
9398 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis,
9399 si_ftype_v2si_v2si);
9400 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis,
9401 si_ftype_v4hi_v4hi);
9402 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis,
9403 si_ftype_v2si_v2si);
9404 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis,
9405 si_ftype_v4hi_v4hi);
9406 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis,
9407 si_ftype_v2si_v2si);
9408 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis,
9409 si_ftype_v4hi_v4hi);
9410 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis,
9411 si_ftype_v2si_v2si);
9414 /* Addition and subtraction. */
9415 def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3,
9416 v4hi_ftype_v4hi_v4hi);
9417 def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3,
9418 v2hi_ftype_v2hi_v2hi);
9419 def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
9420 v2si_ftype_v2si_v2si);
9421 def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addsi3,
9422 v1si_ftype_v1si_v1si);
9423 def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
9424 v4hi_ftype_v4hi_v4hi);
9425 def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3,
9426 v2hi_ftype_v2hi_v2hi);
9427 def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
9428 v2si_ftype_v2si_v2si);
9429 def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subsi3,
9430 v1si_ftype_v1si_v1si);
9432 /* Three-dimensional array addressing. */
9435 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis,
9437 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis,
9439 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis,
9444 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis,
9446 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis,
9448 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis,
9454 /* Byte mask and shuffle */
9456 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis,
9459 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis,
9461 def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis,
9462 v4hi_ftype_v4hi_v4hi);
9463 def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis,
9464 v8qi_ftype_v8qi_v8qi);
9465 def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
9466 v2si_ftype_v2si_v2si);
9467 def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshuffledi_vis,
9475 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis,
9477 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis,
9479 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis,
9484 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis,
9486 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis,
9488 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis,
9492 def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis,
9493 v4hi_ftype_v4hi_v4hi);
9495 def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_fsll16_vis,
9496 v4hi_ftype_v4hi_v4hi);
9497 def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_fslas16_vis,
9498 v4hi_ftype_v4hi_v4hi);
9499 def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_fsrl16_vis,
9500 v4hi_ftype_v4hi_v4hi);
9501 def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_fsra16_vis,
9502 v4hi_ftype_v4hi_v4hi);
9503 def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_fsll32_vis,
9504 v2si_ftype_v2si_v2si);
9505 def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_fslas32_vis,
9506 v2si_ftype_v2si_v2si);
9507 def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_fsrl32_vis,
9508 v2si_ftype_v2si_v2si);
9509 def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_fsra32_vis,
9510 v2si_ftype_v2si_v2si);
9513 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis,
9514 di_ftype_v8qi_v8qi);
9516 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis,
9517 si_ftype_v8qi_v8qi);
9519 def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis,
9520 v4hi_ftype_v4hi_v4hi);
9521 def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis,
9523 def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis,
9526 def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_fpadds16_vis,
9527 v4hi_ftype_v4hi_v4hi);
9528 def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_fpadds16s_vis,
9529 v2hi_ftype_v2hi_v2hi);
9530 def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_fpsubs16_vis,
9531 v4hi_ftype_v4hi_v4hi);
9532 def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_fpsubs16s_vis,
9533 v2hi_ftype_v2hi_v2hi);
9534 def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_fpadds32_vis,
9535 v2si_ftype_v2si_v2si);
9536 def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_fpadds32s_vis,
9537 v1si_ftype_v1si_v1si);
9538 def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_fpsubs32_vis,
9539 v2si_ftype_v2si_v2si);
9540 def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_fpsubs32s_vis,
9541 v1si_ftype_v1si_v1si);
9545 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis,
9546 di_ftype_v8qi_v8qi);
9547 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis,
9548 di_ftype_v8qi_v8qi);
9549 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis,
9550 di_ftype_v8qi_v8qi);
9551 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis,
9552 di_ftype_v8qi_v8qi);
9556 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis,
9557 si_ftype_v8qi_v8qi);
9558 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis,
9559 si_ftype_v8qi_v8qi);
9560 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis,
9561 si_ftype_v8qi_v8qi);
9562 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis,
9563 si_ftype_v8qi_v8qi);
9566 def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis,
9568 def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis,
9570 def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis,
9572 def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis,
9574 def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis,
9576 def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis,
9579 def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis,
9581 def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis,
9583 def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis,
9588 /* Handle TARGET_EXPAND_BUILTIN target hook.
9589 Expand builtin functions for sparc intrinsics. */
9592 sparc_expand_builtin (tree exp, rtx target,
9593 rtx subtarget ATTRIBUTE_UNUSED,
9594 enum machine_mode tmode ATTRIBUTE_UNUSED,
9595 int ignore ATTRIBUTE_UNUSED)
9598 call_expr_arg_iterator iter;
9599 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9600 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
9605 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9609 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9611 || GET_MODE (target) != tmode
9612 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9613 op[0] = gen_reg_rtx (tmode);
9617 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9619 const struct insn_operand_data *insn_op;
9622 if (arg == error_mark_node)
9626 idx = arg_count - !nonvoid;
9627 insn_op = &insn_data[icode].operand[idx];
9628 op[arg_count] = expand_normal (arg);
9630 if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
9632 op[arg_count] = copy_to_mode_reg (insn_op->mode, op[arg_count]);
9638 pat = GEN_FCN (icode) (op[0]);
9642 pat = GEN_FCN (icode) (op[0], op[1]);
9644 pat = GEN_FCN (icode) (op[1]);
9647 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
9650 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
9668 sparc_vis_mul8x16 (int e8, int e16)
9670 return (e8 * e16 + 128) / 256;
9673 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
9674 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
9675 constants. A tree list with the results of the multiplications is returned,
9676 and each element in the list is of INNER_TYPE. */
9679 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
9681 tree n_elts = NULL_TREE;
9686 case CODE_FOR_fmul8x16_vis:
9687 for (; elts0 && elts1;
9688 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
9691 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9692 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
9693 n_elts = tree_cons (NULL_TREE,
9694 build_int_cst (inner_type, val),
9699 case CODE_FOR_fmul8x16au_vis:
9700 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
9702 for (; elts0; elts0 = TREE_CHAIN (elts0))
9705 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9707 n_elts = tree_cons (NULL_TREE,
9708 build_int_cst (inner_type, val),
9713 case CODE_FOR_fmul8x16al_vis:
9714 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
9716 for (; elts0; elts0 = TREE_CHAIN (elts0))
9719 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9721 n_elts = tree_cons (NULL_TREE,
9722 build_int_cst (inner_type, val),
9731 return nreverse (n_elts);
9734 /* Handle TARGET_FOLD_BUILTIN target hook.
9735 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
9736 result of the function call is ignored. NULL_TREE is returned if the
9737 function could not be folded. */
9740 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
9741 tree *args, bool ignore)
9743 tree arg0, arg1, arg2;
9744 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
9745 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
9749 /* Note that a switch statement instead of the sequence of tests would
9750 be incorrect as many of the CODE_FOR values could be CODE_FOR_nothing
9751 and that would yield multiple alternatives with identical values. */
9752 if (icode == CODE_FOR_alignaddrsi_vis
9753 || icode == CODE_FOR_alignaddrdi_vis
9754 || icode == CODE_FOR_wrgsr_vis
9755 || icode == CODE_FOR_bmasksi_vis
9756 || icode == CODE_FOR_bmaskdi_vis
9757 || icode == CODE_FOR_cmask8si_vis
9758 || icode == CODE_FOR_cmask8di_vis
9759 || icode == CODE_FOR_cmask16si_vis
9760 || icode == CODE_FOR_cmask16di_vis
9761 || icode == CODE_FOR_cmask32si_vis
9762 || icode == CODE_FOR_cmask32di_vis)
9765 return build_zero_cst (rtype);
9770 case CODE_FOR_fexpand_vis:
9774 if (TREE_CODE (arg0) == VECTOR_CST)
9776 tree inner_type = TREE_TYPE (rtype);
9777 tree elts = TREE_VECTOR_CST_ELTS (arg0);
9778 tree n_elts = NULL_TREE;
9780 for (; elts; elts = TREE_CHAIN (elts))
9782 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
9783 n_elts = tree_cons (NULL_TREE,
9784 build_int_cst (inner_type, val),
9787 return build_vector (rtype, nreverse (n_elts));
9791 case CODE_FOR_fmul8x16_vis:
9792 case CODE_FOR_fmul8x16au_vis:
9793 case CODE_FOR_fmul8x16al_vis:
9799 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
9801 tree inner_type = TREE_TYPE (rtype);
9802 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
9803 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
9804 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
9807 return build_vector (rtype, n_elts);
9811 case CODE_FOR_fpmerge_vis:
9817 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
9819 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
9820 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
9821 tree n_elts = NULL_TREE;
9823 for (; elts0 && elts1;
9824 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
9826 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
9827 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
9830 return build_vector (rtype, nreverse (n_elts));
9834 case CODE_FOR_pdist_vis:
9842 if (TREE_CODE (arg0) == VECTOR_CST
9843 && TREE_CODE (arg1) == VECTOR_CST
9844 && TREE_CODE (arg2) == INTEGER_CST)
9847 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
9848 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
9849 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
9850 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
9852 for (; elts0 && elts1;
9853 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
9855 unsigned HOST_WIDE_INT
9856 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9857 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
9858 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
9859 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
9861 unsigned HOST_WIDE_INT l;
9864 overflow |= neg_double (low1, high1, &l, &h);
9865 overflow |= add_double (low0, high0, l, h, &l, &h);
9867 overflow |= neg_double (l, h, &l, &h);
9869 overflow |= add_double (low, high, l, h, &low, &high);
9872 gcc_assert (overflow == 0);
9874 return build_int_cst_wide (rtype, low, high);
9884 /* ??? This duplicates information provided to the compiler by the
9885 ??? scheduler description. Some day, teach genautomata to output
9886 ??? the latencies and then CSE will just use that. */
9889 sparc_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
9890 int *total, bool speed ATTRIBUTE_UNUSED)
9892 enum machine_mode mode = GET_MODE (x);
9893 bool float_mode_p = FLOAT_MODE_P (mode);
9898 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
9916 if (GET_MODE (x) == VOIDmode
9917 && ((CONST_DOUBLE_HIGH (x) == 0
9918 && CONST_DOUBLE_LOW (x) < 0x1000)
9919 || (CONST_DOUBLE_HIGH (x) == -1
9920 && CONST_DOUBLE_LOW (x) < 0
9921 && CONST_DOUBLE_LOW (x) >= -0x1000)))
9928 /* If outer-code was a sign or zero extension, a cost
9929 of COSTS_N_INSNS (1) was already added in. This is
9930 why we are subtracting it back out. */
9931 if (outer_code == ZERO_EXTEND)
9933 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
9935 else if (outer_code == SIGN_EXTEND)
9937 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
9939 else if (float_mode_p)
9941 *total = sparc_costs->float_load;
9945 *total = sparc_costs->int_load;
9953 *total = sparc_costs->float_plusminus;
9955 *total = COSTS_N_INSNS (1);
9962 gcc_assert (float_mode_p);
9963 *total = sparc_costs->float_mul;
9966 if (GET_CODE (sub) == NEG)
9967 sub = XEXP (sub, 0);
9968 *total += rtx_cost (sub, FMA, 0, speed);
9971 if (GET_CODE (sub) == NEG)
9972 sub = XEXP (sub, 0);
9973 *total += rtx_cost (sub, FMA, 2, speed);
9979 *total = sparc_costs->float_mul;
9980 else if (! TARGET_HARD_MUL)
9981 *total = COSTS_N_INSNS (25);
9987 if (sparc_costs->int_mul_bit_factor)
9991 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
9993 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
9994 for (nbits = 0; value != 0; value &= value - 1)
9997 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
9998 && GET_MODE (XEXP (x, 1)) == VOIDmode)
10000 rtx x1 = XEXP (x, 1);
10001 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
10002 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
10004 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
10006 for (; value2 != 0; value2 &= value2 - 1)
10014 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
10015 bit_cost = COSTS_N_INSNS (bit_cost);
10018 if (mode == DImode)
10019 *total = sparc_costs->int_mulX + bit_cost;
10021 *total = sparc_costs->int_mul + bit_cost;
10028 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
10037 if (mode == DFmode)
10038 *total = sparc_costs->float_div_df;
10040 *total = sparc_costs->float_div_sf;
10044 if (mode == DImode)
10045 *total = sparc_costs->int_divX;
10047 *total = sparc_costs->int_div;
10052 if (! float_mode_p)
10054 *total = COSTS_N_INSNS (1);
10061 case UNSIGNED_FLOAT:
10065 case FLOAT_TRUNCATE:
10066 *total = sparc_costs->float_move;
10070 if (mode == DFmode)
10071 *total = sparc_costs->float_sqrt_df;
10073 *total = sparc_costs->float_sqrt_sf;
10078 *total = sparc_costs->float_cmp;
10080 *total = COSTS_N_INSNS (1);
10085 *total = sparc_costs->float_cmove;
10087 *total = sparc_costs->int_cmove;
10091 /* Handle the NAND vector patterns. */
10092 if (sparc_vector_mode_supported_p (GET_MODE (x))
10093 && GET_CODE (XEXP (x, 0)) == NOT
10094 && GET_CODE (XEXP (x, 1)) == NOT)
10096 *total = COSTS_N_INSNS (1);
10107 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
10110 general_or_i64_p (reg_class_t rclass)
10112 return (rclass == GENERAL_REGS || rclass == I64_REGS);
10115 /* Implement TARGET_REGISTER_MOVE_COST. */
10118 sparc_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10119 reg_class_t from, reg_class_t to)
10121 if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
10122 || (general_or_i64_p (from) && FP_REG_CLASS_P (to))
10123 || from == FPCC_REGS
10124 || to == FPCC_REGS)
10126 if (sparc_cpu == PROCESSOR_ULTRASPARC
10127 || sparc_cpu == PROCESSOR_ULTRASPARC3
10128 || sparc_cpu == PROCESSOR_NIAGARA
10129 || sparc_cpu == PROCESSOR_NIAGARA2
10130 || sparc_cpu == PROCESSOR_NIAGARA3
10131 || sparc_cpu == PROCESSOR_NIAGARA4)
10140 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
10141 This is achieved by means of a manual dynamic stack space allocation in
10142 the current frame. We make the assumption that SEQ doesn't contain any
10143 function calls, with the possible exception of calls to the GOT helper. */
10146 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
10148 /* We must preserve the lowest 16 words for the register save area. */
10149 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
10150 /* We really need only 2 words of fresh stack space. */
10151 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
10154 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
10155 SPARC_STACK_BIAS + offset));
10157 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
10158 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
10160 emit_insn (gen_rtx_SET (VOIDmode,
10161 adjust_address (slot, word_mode, UNITS_PER_WORD),
10165 emit_insn (gen_rtx_SET (VOIDmode,
10167 adjust_address (slot, word_mode, UNITS_PER_WORD)));
10168 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
10169 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
10172 /* Output the assembler code for a thunk function. THUNK_DECL is the
10173 declaration for the thunk function itself, FUNCTION is the decl for
10174 the target function. DELTA is an immediate constant offset to be
10175 added to THIS. If VCALL_OFFSET is nonzero, the word at address
10176 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
10179 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
10180 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10183 rtx this_rtx, insn, funexp;
10184 unsigned int int_arg_first;
10186 reload_completed = 1;
10187 epilogue_completed = 1;
10189 emit_note (NOTE_INSN_PROLOGUE_END);
10193 sparc_leaf_function_p = 1;
10195 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
10197 else if (flag_delayed_branch)
10199 /* We will emit a regular sibcall below, so we need to instruct
10200 output_sibcall that we are in a leaf function. */
10201 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
10203 /* This will cause final.c to invoke leaf_renumber_regs so we
10204 must behave as if we were in a not-yet-leafified function. */
10205 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
10209 /* We will emit the sibcall manually below, so we will need to
10210 manually spill non-leaf registers. */
10211 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
10213 /* We really are in a leaf function. */
10214 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
10217 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
10218 returns a structure, the structure return pointer is there instead. */
10220 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
10221 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
10223 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
10225 /* Add DELTA. When possible use a plain add, otherwise load it into
10226 a register first. */
10229 rtx delta_rtx = GEN_INT (delta);
10231 if (! SPARC_SIMM13_P (delta))
10233 rtx scratch = gen_rtx_REG (Pmode, 1);
10234 emit_move_insn (scratch, delta_rtx);
10235 delta_rtx = scratch;
10238 /* THIS_RTX += DELTA. */
10239 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
10242 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
10245 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10246 rtx scratch = gen_rtx_REG (Pmode, 1);
10248 gcc_assert (vcall_offset < 0);
10250 /* SCRATCH = *THIS_RTX. */
10251 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
10253 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
10254 may not have any available scratch register at this point. */
10255 if (SPARC_SIMM13_P (vcall_offset))
10257 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
10258 else if (! fixed_regs[5]
10259 /* The below sequence is made up of at least 2 insns,
10260 while the default method may need only one. */
10261 && vcall_offset < -8192)
10263 rtx scratch2 = gen_rtx_REG (Pmode, 5);
10264 emit_move_insn (scratch2, vcall_offset_rtx);
10265 vcall_offset_rtx = scratch2;
10269 rtx increment = GEN_INT (-4096);
10271 /* VCALL_OFFSET is a negative number whose typical range can be
10272 estimated as -32768..0 in 32-bit mode. In almost all cases
10273 it is therefore cheaper to emit multiple add insns than
10274 spilling and loading the constant into a register (at least
10276 while (! SPARC_SIMM13_P (vcall_offset))
10278 emit_insn (gen_add2_insn (scratch, increment));
10279 vcall_offset += 4096;
10281 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
10284 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
10285 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
10286 gen_rtx_PLUS (Pmode,
10288 vcall_offset_rtx)));
10290 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
10291 emit_insn (gen_add2_insn (this_rtx, scratch));
10294 /* Generate a tail call to the target function. */
10295 if (! TREE_USED (function))
10297 assemble_external (function);
10298 TREE_USED (function) = 1;
10300 funexp = XEXP (DECL_RTL (function), 0);
10302 if (flag_delayed_branch)
10304 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10305 insn = emit_call_insn (gen_sibcall (funexp));
10306 SIBLING_CALL_P (insn) = 1;
10310 /* The hoops we have to jump through in order to generate a sibcall
10311 without using delay slots... */
10312 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
10316 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
10318 load_got_register (); /* clobbers %o7 */
10319 scratch = sparc_legitimize_pic_address (funexp, scratch);
10320 seq = get_insns ();
10322 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
10324 else if (TARGET_ARCH32)
10326 emit_insn (gen_rtx_SET (VOIDmode,
10328 gen_rtx_HIGH (SImode, funexp)));
10329 emit_insn (gen_rtx_SET (VOIDmode,
10331 gen_rtx_LO_SUM (SImode, scratch, funexp)));
10333 else /* TARGET_ARCH64 */
10335 switch (sparc_cmodel)
10339 /* The destination can serve as a temporary. */
10340 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
10345 /* The destination cannot serve as a temporary. */
10346 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
10348 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
10349 seq = get_insns ();
10351 emit_and_preserve (seq, spill_reg, 0);
10355 gcc_unreachable ();
10359 emit_jump_insn (gen_indirect_jump (scratch));
10364 /* Run just enough of rest_of_compilation to get the insns emitted.
10365 There's not really enough bulk here to make other passes such as
10366 instruction scheduling worth while. Note that use_thunk calls
10367 assemble_start_function and assemble_end_function. */
10368 insn = get_insns ();
10369 insn_locators_alloc ();
10370 shorten_branches (insn);
10371 final_start_function (insn, file, 1);
10372 final (insn, file, 1);
10373 final_end_function ();
10375 reload_completed = 0;
10376 epilogue_completed = 0;
10379 /* Return true if sparc_output_mi_thunk would be able to output the
10380 assembler code for the thunk function specified by the arguments
10381 it is passed, and false otherwise. */
10383 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
10384 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
10385 HOST_WIDE_INT vcall_offset,
10386 const_tree function ATTRIBUTE_UNUSED)
10388 /* Bound the loop used in the default method above. */
10389 return (vcall_offset >= -32768 || ! fixed_regs[5]);
10392 /* How to allocate a 'struct machine_function'. */
10394 static struct machine_function *
10395 sparc_init_machine_status (void)
10397 return ggc_alloc_cleared_machine_function ();
10400 /* Locate some local-dynamic symbol still in use by this function
10401 so that we can print its name in local-dynamic base patterns. */
10403 static const char *
10404 get_some_local_dynamic_name (void)
10408 if (cfun->machine->some_ld_name)
10409 return cfun->machine->some_ld_name;
10411 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
10413 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
10414 return cfun->machine->some_ld_name;
10416 gcc_unreachable ();
10420 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
10425 && GET_CODE (x) == SYMBOL_REF
10426 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
10428 cfun->machine->some_ld_name = XSTR (x, 0);
10435 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10436 We need to emit DTP-relative relocations. */
10439 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
10444 fputs ("\t.word\t%r_tls_dtpoff32(", file);
10447 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
10450 gcc_unreachable ();
10452 output_addr_const (file, x);
10456 /* Do whatever processing is required at the end of a file. */
10459 sparc_file_end (void)
10461 /* If we need to emit the special GOT helper function, do so now. */
10462 if (got_helper_rtx)
10464 const char *name = XSTR (got_helper_rtx, 0);
10465 const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
10466 #ifdef DWARF2_UNWIND_INFO
10470 if (USE_HIDDEN_LINKONCE)
10472 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
10473 get_identifier (name),
10474 build_function_type_list (void_type_node,
10476 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
10477 NULL_TREE, void_type_node);
10478 TREE_STATIC (decl) = 1;
10479 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
10480 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
10481 DECL_VISIBILITY_SPECIFIED (decl) = 1;
10482 resolve_unique_section (decl, 0, flag_function_sections);
10483 allocate_struct_function (decl, true);
10484 cfun->is_thunk = 1;
10485 current_function_decl = decl;
10486 init_varasm_status ();
10487 assemble_start_function (decl, name);
10491 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10492 switch_to_section (text_section);
10494 ASM_OUTPUT_ALIGN (asm_out_file, align);
10495 ASM_OUTPUT_LABEL (asm_out_file, name);
10498 #ifdef DWARF2_UNWIND_INFO
10499 do_cfi = dwarf2out_do_cfi_asm ();
10501 fprintf (asm_out_file, "\t.cfi_startproc\n");
10503 if (flag_delayed_branch)
10504 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
10505 reg_name, reg_name);
10507 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
10508 reg_name, reg_name);
10509 #ifdef DWARF2_UNWIND_INFO
10511 fprintf (asm_out_file, "\t.cfi_endproc\n");
10515 if (NEED_INDICATE_EXEC_STACK)
10516 file_end_indicate_exec_stack ();
10518 #ifdef TARGET_SOLARIS
10519 solaris_file_end ();
10523 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10524 /* Implement TARGET_MANGLE_TYPE. */
10526 static const char *
10527 sparc_mangle_type (const_tree type)
10530 && TYPE_MAIN_VARIANT (type) == long_double_type_node
10531 && TARGET_LONG_DOUBLE_128)
10534 /* For all other types, use normal C++ mangling. */
10539 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
10540 compare and swap on the word containing the byte or half-word. */
10543 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
10545 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
10546 rtx addr = gen_reg_rtx (Pmode);
10547 rtx off = gen_reg_rtx (SImode);
10548 rtx oldv = gen_reg_rtx (SImode);
10549 rtx newv = gen_reg_rtx (SImode);
10550 rtx oldvalue = gen_reg_rtx (SImode);
10551 rtx newvalue = gen_reg_rtx (SImode);
10552 rtx res = gen_reg_rtx (SImode);
10553 rtx resv = gen_reg_rtx (SImode);
10554 rtx memsi, val, mask, end_label, loop_label, cc;
10556 emit_insn (gen_rtx_SET (VOIDmode, addr,
10557 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
10559 if (Pmode != SImode)
10560 addr1 = gen_lowpart (SImode, addr1);
10561 emit_insn (gen_rtx_SET (VOIDmode, off,
10562 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
10564 memsi = gen_rtx_MEM (SImode, addr);
10565 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
10566 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
10568 val = force_reg (SImode, memsi);
10570 emit_insn (gen_rtx_SET (VOIDmode, off,
10571 gen_rtx_XOR (SImode, off,
10572 GEN_INT (GET_MODE (mem) == QImode
10575 emit_insn (gen_rtx_SET (VOIDmode, off,
10576 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
10578 if (GET_MODE (mem) == QImode)
10579 mask = force_reg (SImode, GEN_INT (0xff));
10581 mask = force_reg (SImode, GEN_INT (0xffff));
10583 emit_insn (gen_rtx_SET (VOIDmode, mask,
10584 gen_rtx_ASHIFT (SImode, mask, off)));
10586 emit_insn (gen_rtx_SET (VOIDmode, val,
10587 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
10590 oldval = gen_lowpart (SImode, oldval);
10591 emit_insn (gen_rtx_SET (VOIDmode, oldv,
10592 gen_rtx_ASHIFT (SImode, oldval, off)));
10594 newval = gen_lowpart_common (SImode, newval);
10595 emit_insn (gen_rtx_SET (VOIDmode, newv,
10596 gen_rtx_ASHIFT (SImode, newval, off)));
10598 emit_insn (gen_rtx_SET (VOIDmode, oldv,
10599 gen_rtx_AND (SImode, oldv, mask)));
10601 emit_insn (gen_rtx_SET (VOIDmode, newv,
10602 gen_rtx_AND (SImode, newv, mask)));
10604 end_label = gen_label_rtx ();
10605 loop_label = gen_label_rtx ();
10606 emit_label (loop_label);
10608 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
10609 gen_rtx_IOR (SImode, oldv, val)));
10611 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
10612 gen_rtx_IOR (SImode, newv, val)));
10614 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
10616 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
10618 emit_insn (gen_rtx_SET (VOIDmode, resv,
10619 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
10622 cc = gen_compare_reg_1 (NE, resv, val);
10623 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
10625 /* Use cbranchcc4 to separate the compare and branch! */
10626 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
10627 cc, const0_rtx, loop_label));
10629 emit_label (end_label);
10631 emit_insn (gen_rtx_SET (VOIDmode, res,
10632 gen_rtx_AND (SImode, res, mask)));
10634 emit_insn (gen_rtx_SET (VOIDmode, res,
10635 gen_rtx_LSHIFTRT (SImode, res, off)));
10637 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
10640 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
10643 sparc_frame_pointer_required (void)
10645 /* If the stack pointer is dynamically modified in the function, it cannot
10646 serve as the frame pointer. */
10647 if (cfun->calls_alloca)
10650 /* If the function receives nonlocal gotos, it needs to save the frame
10651 pointer in the nonlocal_goto_save_area object. */
10652 if (cfun->has_nonlocal_label)
10655 /* In flat mode, that's it. */
10659 /* Otherwise, the frame pointer is required if the function isn't leaf. */
10660 return !(current_function_is_leaf && only_leaf_regs_used ());
10663 /* The way this is structured, we can't eliminate SFP in favor of SP
10664 if the frame pointer is required: we want to use the SFP->HFP elimination
10665 in that case. But the test in update_eliminables doesn't know we are
10666 assuming below that we only do the former elimination. */
10669 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
10671 return to == HARD_FRAME_POINTER_REGNUM || !sparc_frame_pointer_required ();
10674 /* Return the hard frame pointer directly to bypass the stack bias. */
10677 sparc_builtin_setjmp_frame_value (void)
10679 return hard_frame_pointer_rtx;
10682 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
10683 they won't be allocated. */
10686 sparc_conditional_register_usage (void)
10688 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
10690 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10691 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10693 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
10694 /* then honor it. */
10695 if (TARGET_ARCH32 && fixed_regs[5])
10697 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
10702 for (regno = SPARC_FIRST_V9_FP_REG;
10703 regno <= SPARC_LAST_V9_FP_REG;
10705 fixed_regs[regno] = 1;
10706 /* %fcc0 is used by v8 and v9. */
10707 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
10708 regno <= SPARC_LAST_V9_FCC_REG;
10710 fixed_regs[regno] = 1;
10715 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
10716 fixed_regs[regno] = 1;
10718 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
10719 /* then honor it. Likewise with g3 and g4. */
10720 if (fixed_regs[2] == 2)
10721 fixed_regs[2] = ! TARGET_APP_REGS;
10722 if (fixed_regs[3] == 2)
10723 fixed_regs[3] = ! TARGET_APP_REGS;
10724 if (TARGET_ARCH32 && fixed_regs[4] == 2)
10725 fixed_regs[4] = ! TARGET_APP_REGS;
10726 else if (TARGET_CM_EMBMEDANY)
10728 else if (fixed_regs[4] == 2)
10733 /* Disable leaf functions. */
10734 memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER);
10735 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10736 leaf_reg_remap [regno] = regno;
10739 global_regs[SPARC_GSR_REG] = 1;
10742 /* Implement TARGET_PREFERRED_RELOAD_CLASS
10744 - We can't load constants into FP registers.
10745 - We can't load FP constants into integer registers when soft-float,
10746 because there is no soft-float pattern with a r/F constraint.
10747 - We can't load FP constants into integer registers for TFmode unless
10748 it is 0.0L, because there is no movtf pattern with a r/F constraint.
10749 - Try and reload integer constants (symbolic or otherwise) back into
10750 registers directly, rather than having them dumped to memory. */
10753 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
10755 if (CONSTANT_P (x))
10757 if (FP_REG_CLASS_P (rclass)
10758 || rclass == GENERAL_OR_FP_REGS
10759 || rclass == GENERAL_OR_EXTRA_FP_REGS
10760 || (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT && ! TARGET_FPU)
10761 || (GET_MODE (x) == TFmode && ! const_zero_operand (x, TFmode)))
10764 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
10765 return GENERAL_REGS;
10772 output_v8plus_mult (rtx insn, rtx *operands, const char *name)
10776 gcc_assert (! TARGET_ARCH64);
10778 if (sparc_check_64 (operands[1], insn) <= 0)
10779 output_asm_insn ("srl\t%L1, 0, %L1", operands);
10780 if (which_alternative == 1)
10781 output_asm_insn ("sllx\t%H1, 32, %H1", operands);
10782 if (GET_CODE (operands[2]) == CONST_INT)
10784 if (which_alternative == 1)
10786 output_asm_insn ("or\t%L1, %H1, %H1", operands);
10787 sprintf (mulstr, "%s\t%%H1, %%2, %%L0", name);
10788 output_asm_insn (mulstr, operands);
10789 return "srlx\t%L0, 32, %H0";
10793 output_asm_insn ("sllx\t%H1, 32, %3", operands);
10794 output_asm_insn ("or\t%L1, %3, %3", operands);
10795 sprintf (mulstr, "%s\t%%3, %%2, %%3", name);
10796 output_asm_insn (mulstr, operands);
10797 output_asm_insn ("srlx\t%3, 32, %H0", operands);
10798 return "mov\t%3, %L0";
10801 else if (rtx_equal_p (operands[1], operands[2]))
10803 if (which_alternative == 1)
10805 output_asm_insn ("or\t%L1, %H1, %H1", operands);
10806 sprintf (mulstr, "%s\t%%H1, %%H1, %%L0", name);
10807 output_asm_insn (mulstr, operands);
10808 return "srlx\t%L0, 32, %H0";
10812 output_asm_insn ("sllx\t%H1, 32, %3", operands);
10813 output_asm_insn ("or\t%L1, %3, %3", operands);
10814 sprintf (mulstr, "%s\t%%3, %%3, %%3", name);
10815 output_asm_insn (mulstr, operands);
10816 output_asm_insn ("srlx\t%3, 32, %H0", operands);
10817 return "mov\t%3, %L0";
10820 if (sparc_check_64 (operands[2], insn) <= 0)
10821 output_asm_insn ("srl\t%L2, 0, %L2", operands);
10822 if (which_alternative == 1)
10824 output_asm_insn ("or\t%L1, %H1, %H1", operands);
10825 output_asm_insn ("sllx\t%H2, 32, %L1", operands);
10826 output_asm_insn ("or\t%L2, %L1, %L1", operands);
10827 sprintf (mulstr, "%s\t%%H1, %%L1, %%L0", name);
10828 output_asm_insn (mulstr, operands);
10829 return "srlx\t%L0, 32, %H0";
10833 output_asm_insn ("sllx\t%H1, 32, %3", operands);
10834 output_asm_insn ("sllx\t%H2, 32, %4", operands);
10835 output_asm_insn ("or\t%L1, %3, %3", operands);
10836 output_asm_insn ("or\t%L2, %4, %4", operands);
10837 sprintf (mulstr, "%s\t%%3, %%4, %%3", name);
10838 output_asm_insn (mulstr, operands);
10839 output_asm_insn ("srlx\t%3, 32, %H0", operands);
10840 return "mov\t%3, %L0";
10844 #include "gt-sparc.h"