1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
38 #include "insn-attr.h"
49 #include "target-def.h"
50 #include "cfglayout.h"
52 #include "langhooks.h"
58 struct processor_costs cypress_costs = {
59 COSTS_N_INSNS (2), /* int load */
60 COSTS_N_INSNS (2), /* int signed load */
61 COSTS_N_INSNS (2), /* int zeroed load */
62 COSTS_N_INSNS (2), /* float load */
63 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
64 COSTS_N_INSNS (5), /* fadd, fsub */
65 COSTS_N_INSNS (1), /* fcmp */
66 COSTS_N_INSNS (1), /* fmov, fmovr */
67 COSTS_N_INSNS (7), /* fmul */
68 COSTS_N_INSNS (37), /* fdivs */
69 COSTS_N_INSNS (37), /* fdivd */
70 COSTS_N_INSNS (63), /* fsqrts */
71 COSTS_N_INSNS (63), /* fsqrtd */
72 COSTS_N_INSNS (1), /* imul */
73 COSTS_N_INSNS (1), /* imulX */
74 0, /* imul bit factor */
75 COSTS_N_INSNS (1), /* idiv */
76 COSTS_N_INSNS (1), /* idivX */
77 COSTS_N_INSNS (1), /* movcc/movr */
78 0, /* shift penalty */
82 struct processor_costs supersparc_costs = {
83 COSTS_N_INSNS (1), /* int load */
84 COSTS_N_INSNS (1), /* int signed load */
85 COSTS_N_INSNS (1), /* int zeroed load */
86 COSTS_N_INSNS (0), /* float load */
87 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
88 COSTS_N_INSNS (3), /* fadd, fsub */
89 COSTS_N_INSNS (3), /* fcmp */
90 COSTS_N_INSNS (1), /* fmov, fmovr */
91 COSTS_N_INSNS (3), /* fmul */
92 COSTS_N_INSNS (6), /* fdivs */
93 COSTS_N_INSNS (9), /* fdivd */
94 COSTS_N_INSNS (12), /* fsqrts */
95 COSTS_N_INSNS (12), /* fsqrtd */
96 COSTS_N_INSNS (4), /* imul */
97 COSTS_N_INSNS (4), /* imulX */
98 0, /* imul bit factor */
99 COSTS_N_INSNS (4), /* idiv */
100 COSTS_N_INSNS (4), /* idivX */
101 COSTS_N_INSNS (1), /* movcc/movr */
102 1, /* shift penalty */
106 struct processor_costs hypersparc_costs = {
107 COSTS_N_INSNS (1), /* int load */
108 COSTS_N_INSNS (1), /* int signed load */
109 COSTS_N_INSNS (1), /* int zeroed load */
110 COSTS_N_INSNS (1), /* float load */
111 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
112 COSTS_N_INSNS (1), /* fadd, fsub */
113 COSTS_N_INSNS (1), /* fcmp */
114 COSTS_N_INSNS (1), /* fmov, fmovr */
115 COSTS_N_INSNS (1), /* fmul */
116 COSTS_N_INSNS (8), /* fdivs */
117 COSTS_N_INSNS (12), /* fdivd */
118 COSTS_N_INSNS (17), /* fsqrts */
119 COSTS_N_INSNS (17), /* fsqrtd */
120 COSTS_N_INSNS (17), /* imul */
121 COSTS_N_INSNS (17), /* imulX */
122 0, /* imul bit factor */
123 COSTS_N_INSNS (17), /* idiv */
124 COSTS_N_INSNS (17), /* idivX */
125 COSTS_N_INSNS (1), /* movcc/movr */
126 0, /* shift penalty */
130 struct processor_costs sparclet_costs = {
131 COSTS_N_INSNS (3), /* int load */
132 COSTS_N_INSNS (3), /* int signed load */
133 COSTS_N_INSNS (1), /* int zeroed load */
134 COSTS_N_INSNS (1), /* float load */
135 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
136 COSTS_N_INSNS (1), /* fadd, fsub */
137 COSTS_N_INSNS (1), /* fcmp */
138 COSTS_N_INSNS (1), /* fmov, fmovr */
139 COSTS_N_INSNS (1), /* fmul */
140 COSTS_N_INSNS (1), /* fdivs */
141 COSTS_N_INSNS (1), /* fdivd */
142 COSTS_N_INSNS (1), /* fsqrts */
143 COSTS_N_INSNS (1), /* fsqrtd */
144 COSTS_N_INSNS (5), /* imul */
145 COSTS_N_INSNS (5), /* imulX */
146 0, /* imul bit factor */
147 COSTS_N_INSNS (5), /* idiv */
148 COSTS_N_INSNS (5), /* idivX */
149 COSTS_N_INSNS (1), /* movcc/movr */
150 0, /* shift penalty */
154 struct processor_costs ultrasparc_costs = {
155 COSTS_N_INSNS (2), /* int load */
156 COSTS_N_INSNS (3), /* int signed load */
157 COSTS_N_INSNS (2), /* int zeroed load */
158 COSTS_N_INSNS (2), /* float load */
159 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
160 COSTS_N_INSNS (4), /* fadd, fsub */
161 COSTS_N_INSNS (1), /* fcmp */
162 COSTS_N_INSNS (2), /* fmov, fmovr */
163 COSTS_N_INSNS (4), /* fmul */
164 COSTS_N_INSNS (13), /* fdivs */
165 COSTS_N_INSNS (23), /* fdivd */
166 COSTS_N_INSNS (13), /* fsqrts */
167 COSTS_N_INSNS (23), /* fsqrtd */
168 COSTS_N_INSNS (4), /* imul */
169 COSTS_N_INSNS (4), /* imulX */
170 2, /* imul bit factor */
171 COSTS_N_INSNS (37), /* idiv */
172 COSTS_N_INSNS (68), /* idivX */
173 COSTS_N_INSNS (2), /* movcc/movr */
174 2, /* shift penalty */
178 struct processor_costs ultrasparc3_costs = {
179 COSTS_N_INSNS (2), /* int load */
180 COSTS_N_INSNS (3), /* int signed load */
181 COSTS_N_INSNS (3), /* int zeroed load */
182 COSTS_N_INSNS (2), /* float load */
183 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
184 COSTS_N_INSNS (4), /* fadd, fsub */
185 COSTS_N_INSNS (5), /* fcmp */
186 COSTS_N_INSNS (3), /* fmov, fmovr */
187 COSTS_N_INSNS (4), /* fmul */
188 COSTS_N_INSNS (17), /* fdivs */
189 COSTS_N_INSNS (20), /* fdivd */
190 COSTS_N_INSNS (20), /* fsqrts */
191 COSTS_N_INSNS (29), /* fsqrtd */
192 COSTS_N_INSNS (6), /* imul */
193 COSTS_N_INSNS (6), /* imulX */
194 0, /* imul bit factor */
195 COSTS_N_INSNS (40), /* idiv */
196 COSTS_N_INSNS (71), /* idivX */
197 COSTS_N_INSNS (2), /* movcc/movr */
198 0, /* shift penalty */
202 struct processor_costs niagara_costs = {
203 COSTS_N_INSNS (3), /* int load */
204 COSTS_N_INSNS (3), /* int signed load */
205 COSTS_N_INSNS (3), /* int zeroed load */
206 COSTS_N_INSNS (9), /* float load */
207 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
208 COSTS_N_INSNS (8), /* fadd, fsub */
209 COSTS_N_INSNS (26), /* fcmp */
210 COSTS_N_INSNS (8), /* fmov, fmovr */
211 COSTS_N_INSNS (29), /* fmul */
212 COSTS_N_INSNS (54), /* fdivs */
213 COSTS_N_INSNS (83), /* fdivd */
214 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
215 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
216 COSTS_N_INSNS (11), /* imul */
217 COSTS_N_INSNS (11), /* imulX */
218 0, /* imul bit factor */
219 COSTS_N_INSNS (72), /* idiv */
220 COSTS_N_INSNS (72), /* idivX */
221 COSTS_N_INSNS (1), /* movcc/movr */
222 0, /* shift penalty */
226 struct processor_costs niagara2_costs = {
227 COSTS_N_INSNS (3), /* int load */
228 COSTS_N_INSNS (3), /* int signed load */
229 COSTS_N_INSNS (3), /* int zeroed load */
230 COSTS_N_INSNS (3), /* float load */
231 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
232 COSTS_N_INSNS (6), /* fadd, fsub */
233 COSTS_N_INSNS (6), /* fcmp */
234 COSTS_N_INSNS (6), /* fmov, fmovr */
235 COSTS_N_INSNS (6), /* fmul */
236 COSTS_N_INSNS (19), /* fdivs */
237 COSTS_N_INSNS (33), /* fdivd */
238 COSTS_N_INSNS (19), /* fsqrts */
239 COSTS_N_INSNS (33), /* fsqrtd */
240 COSTS_N_INSNS (5), /* imul */
241 COSTS_N_INSNS (5), /* imulX */
242 0, /* imul bit factor */
243 COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
244 COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
245 COSTS_N_INSNS (1), /* movcc/movr */
246 0, /* shift penalty */
249 const struct processor_costs *sparc_costs = &cypress_costs;
251 #ifdef HAVE_AS_RELAX_OPTION
252 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
253 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
254 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
255 somebody does not branch between the sethi and jmp. */
256 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
258 #define LEAF_SIBCALL_SLOT_RESERVED_P \
259 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
262 /* Global variables for machine-dependent things. */
264 /* Size of frame. Need to know this to emit return insns from leaf procedures.
265 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
266 reload pass. This is important as the value is later used for scheduling
267 (to see what can go in a delay slot).
268 APPARENT_FSIZE is the size of the stack less the register save area and less
269 the outgoing argument area. It is used when saving call preserved regs. */
270 static HOST_WIDE_INT apparent_fsize;
271 static HOST_WIDE_INT actual_fsize;
273 /* Number of live general or floating point registers needed to be
274 saved (as 4-byte quantities). */
275 static int num_gfregs;
277 /* The alias set for prologue/epilogue register save/restore. */
278 static GTY(()) alias_set_type sparc_sr_alias_set;
280 /* The alias set for the structure return value. */
281 static GTY(()) alias_set_type struct_value_alias_set;
283 /* Vector to say how input registers are mapped to output registers.
284 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
285 eliminate it. You must use -fomit-frame-pointer to get that. */
286 char leaf_reg_remap[] =
287 { 0, 1, 2, 3, 4, 5, 6, 7,
288 -1, -1, -1, -1, -1, -1, 14, -1,
289 -1, -1, -1, -1, -1, -1, -1, -1,
290 8, 9, 10, 11, 12, 13, -1, 15,
292 32, 33, 34, 35, 36, 37, 38, 39,
293 40, 41, 42, 43, 44, 45, 46, 47,
294 48, 49, 50, 51, 52, 53, 54, 55,
295 56, 57, 58, 59, 60, 61, 62, 63,
296 64, 65, 66, 67, 68, 69, 70, 71,
297 72, 73, 74, 75, 76, 77, 78, 79,
298 80, 81, 82, 83, 84, 85, 86, 87,
299 88, 89, 90, 91, 92, 93, 94, 95,
300 96, 97, 98, 99, 100};
302 /* Vector, indexed by hard register number, which contains 1
303 for a register that is allowable in a candidate for leaf
304 function treatment. */
305 char sparc_leaf_regs[] =
306 { 1, 1, 1, 1, 1, 1, 1, 1,
307 0, 0, 0, 0, 0, 0, 1, 0,
308 0, 0, 0, 0, 0, 0, 0, 0,
309 1, 1, 1, 1, 1, 1, 0, 1,
310 1, 1, 1, 1, 1, 1, 1, 1,
311 1, 1, 1, 1, 1, 1, 1, 1,
312 1, 1, 1, 1, 1, 1, 1, 1,
313 1, 1, 1, 1, 1, 1, 1, 1,
314 1, 1, 1, 1, 1, 1, 1, 1,
315 1, 1, 1, 1, 1, 1, 1, 1,
316 1, 1, 1, 1, 1, 1, 1, 1,
317 1, 1, 1, 1, 1, 1, 1, 1,
320 struct GTY(()) machine_function
322 /* Some local-dynamic TLS symbol name. */
323 const char *some_ld_name;
325 /* True if the current function is leaf and uses only leaf regs,
326 so that the SPARC leaf function optimization can be applied.
327 Private version of current_function_uses_only_leaf_regs, see
328 sparc_expand_prologue for the rationale. */
331 /* True if the data calculated by sparc_expand_prologue are valid. */
332 bool prologue_data_valid_p;
335 #define sparc_leaf_function_p cfun->machine->leaf_function_p
336 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
338 /* Register we pretend to think the frame pointer is allocated to.
339 Normally, this is %fp, but if we are in a leaf procedure, this
340 is %sp+"something". We record "something" separately as it may
341 be too big for reg+constant addressing. */
342 static rtx frame_base_reg;
343 static HOST_WIDE_INT frame_base_offset;
345 /* 1 if the next opcode is to be specially indented. */
346 int sparc_indent_opcode = 0;
348 static bool sparc_handle_option (size_t, const char *, int);
349 static void sparc_init_modes (void);
350 static void scan_record_type (tree, int *, int *, int *);
351 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
352 tree, int, int, int *, int *);
354 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
355 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
357 static void sparc_output_addr_vec (rtx);
358 static void sparc_output_addr_diff_vec (rtx);
359 static void sparc_output_deferred_case_vectors (void);
360 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
361 static rtx sparc_builtin_saveregs (void);
362 static int epilogue_renumber (rtx *, int);
363 static bool sparc_assemble_integer (rtx, unsigned int, int);
364 static int set_extends (rtx);
365 static void emit_pic_helper (void);
366 static void load_pic_register (bool);
367 static int save_or_restore_regs (int, int, rtx, int, int);
368 static void emit_save_or_restore_regs (int);
369 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
370 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
371 #if defined (OBJECT_FORMAT_ELF)
372 static void sparc_elf_asm_named_section (const char *, unsigned int, tree)
376 static int sparc_adjust_cost (rtx, rtx, rtx, int);
377 static int sparc_issue_rate (void);
378 static void sparc_sched_init (FILE *, int, int);
379 static int sparc_use_sched_lookahead (void);
381 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
382 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
383 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
384 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
385 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
387 static bool sparc_function_ok_for_sibcall (tree, tree);
388 static void sparc_init_libfuncs (void);
389 static void sparc_init_builtins (void);
390 static void sparc_vis_init_builtins (void);
391 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
392 static tree sparc_fold_builtin (tree, tree, bool);
393 static int sparc_vis_mul8x16 (int, int);
394 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
395 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
396 HOST_WIDE_INT, tree);
397 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
398 HOST_WIDE_INT, const_tree);
399 static struct machine_function * sparc_init_machine_status (void);
400 static bool sparc_cannot_force_const_mem (rtx);
401 static rtx sparc_tls_get_addr (void);
402 static rtx sparc_tls_got (void);
403 static const char *get_some_local_dynamic_name (void);
404 static int get_some_local_dynamic_name_1 (rtx *, void *);
405 static bool sparc_rtx_costs (rtx, int, int, int *, bool);
406 static bool sparc_promote_prototypes (const_tree);
407 static rtx sparc_struct_value_rtx (tree, int);
408 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
409 int *, const_tree, int);
410 static bool sparc_return_in_memory (const_tree, const_tree);
411 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
412 static void sparc_va_start (tree, rtx);
413 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
414 static bool sparc_vector_mode_supported_p (enum machine_mode);
415 static bool sparc_tls_referenced_p (rtx);
416 static rtx legitimize_tls_address (rtx);
417 static rtx legitimize_pic_address (rtx, rtx);
418 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
419 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
420 enum machine_mode, const_tree, bool);
421 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
422 enum machine_mode, tree, bool);
423 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
424 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
425 static void sparc_file_end (void);
426 static bool sparc_frame_pointer_required (void);
427 static bool sparc_can_eliminate (const int, const int);
428 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
429 static const char *sparc_mangle_type (const_tree);
431 static void sparc_trampoline_init (rtx, tree, rtx);
433 #ifdef SUBTARGET_ATTRIBUTE_TABLE
434 /* Table of valid machine attributes. */
435 static const struct attribute_spec sparc_attribute_table[] =
437 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
438 SUBTARGET_ATTRIBUTE_TABLE,
439 { NULL, 0, 0, false, false, false, NULL }
443 /* Option handling. */
446 enum cmodel sparc_cmodel;
448 char sparc_hard_reg_printed[8];
450 struct sparc_cpu_select sparc_select[] =
452 /* switch name, tune arch */
453 { (char *)0, "default", 1, 1 },
454 { (char *)0, "-mcpu=", 1, 1 },
455 { (char *)0, "-mtune=", 1, 0 },
459 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
460 enum processor_type sparc_cpu;
462 /* Whether
\fan FPU option was specified. */
463 static bool fpu_option_set = false;
465 /* Initialize the GCC target structure. */
467 /* The sparc default is to use .half rather than .short for aligned
468 HI objects. Use .word instead of .long on non-ELF systems. */
469 #undef TARGET_ASM_ALIGNED_HI_OP
470 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
471 #ifndef OBJECT_FORMAT_ELF
472 #undef TARGET_ASM_ALIGNED_SI_OP
473 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
476 #undef TARGET_ASM_UNALIGNED_HI_OP
477 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
478 #undef TARGET_ASM_UNALIGNED_SI_OP
479 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
480 #undef TARGET_ASM_UNALIGNED_DI_OP
481 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
483 /* The target hook has to handle DI-mode values. */
484 #undef TARGET_ASM_INTEGER
485 #define TARGET_ASM_INTEGER sparc_assemble_integer
487 #undef TARGET_ASM_FUNCTION_PROLOGUE
488 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
489 #undef TARGET_ASM_FUNCTION_EPILOGUE
490 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
492 #undef TARGET_SCHED_ADJUST_COST
493 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
494 #undef TARGET_SCHED_ISSUE_RATE
495 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
496 #undef TARGET_SCHED_INIT
497 #define TARGET_SCHED_INIT sparc_sched_init
498 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
499 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
501 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
502 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
504 #undef TARGET_INIT_LIBFUNCS
505 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
506 #undef TARGET_INIT_BUILTINS
507 #define TARGET_INIT_BUILTINS sparc_init_builtins
509 #undef TARGET_LEGITIMIZE_ADDRESS
510 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
512 #undef TARGET_EXPAND_BUILTIN
513 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
514 #undef TARGET_FOLD_BUILTIN
515 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
518 #undef TARGET_HAVE_TLS
519 #define TARGET_HAVE_TLS true
522 #undef TARGET_CANNOT_FORCE_CONST_MEM
523 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
525 #undef TARGET_ASM_OUTPUT_MI_THUNK
526 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
527 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
528 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
530 #undef TARGET_RTX_COSTS
531 #define TARGET_RTX_COSTS sparc_rtx_costs
532 #undef TARGET_ADDRESS_COST
533 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
535 #undef TARGET_PROMOTE_FUNCTION_MODE
536 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
538 #undef TARGET_PROMOTE_PROTOTYPES
539 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
541 #undef TARGET_STRUCT_VALUE_RTX
542 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
543 #undef TARGET_RETURN_IN_MEMORY
544 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
545 #undef TARGET_MUST_PASS_IN_STACK
546 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
547 #undef TARGET_PASS_BY_REFERENCE
548 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
549 #undef TARGET_ARG_PARTIAL_BYTES
550 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
552 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
553 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
554 #undef TARGET_STRICT_ARGUMENT_NAMING
555 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
557 #undef TARGET_EXPAND_BUILTIN_VA_START
558 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
559 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
560 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
562 #undef TARGET_VECTOR_MODE_SUPPORTED_P
563 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
565 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
566 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
568 #ifdef SUBTARGET_INSERT_ATTRIBUTES
569 #undef TARGET_INSERT_ATTRIBUTES
570 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
573 #ifdef SUBTARGET_ATTRIBUTE_TABLE
574 #undef TARGET_ATTRIBUTE_TABLE
575 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
578 #undef TARGET_RELAXED_ORDERING
579 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
581 #undef TARGET_DEFAULT_TARGET_FLAGS
582 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
583 #undef TARGET_HANDLE_OPTION
584 #define TARGET_HANDLE_OPTION sparc_handle_option
586 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
587 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
588 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
591 #undef TARGET_ASM_FILE_END
592 #define TARGET_ASM_FILE_END sparc_file_end
594 #undef TARGET_FRAME_POINTER_REQUIRED
595 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
597 #undef TARGET_CAN_ELIMINATE
598 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
600 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
601 #undef TARGET_MANGLE_TYPE
602 #define TARGET_MANGLE_TYPE sparc_mangle_type
605 #undef TARGET_LEGITIMATE_ADDRESS_P
606 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
608 #undef TARGET_TRAMPOLINE_INIT
609 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
611 struct gcc_target targetm = TARGET_INITIALIZER;
613 /* Implement TARGET_HANDLE_OPTION. */
616 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
621 case OPT_mhard_float:
622 case OPT_msoft_float:
623 fpu_option_set = true;
627 sparc_select[1].string = arg;
631 sparc_select[2].string = arg;
638 /* Validate and override various options, and do some machine dependent
642 sparc_override_options (void)
644 static struct code_model {
645 const char *const name;
646 const enum cmodel value;
647 } const cmodels[] = {
649 { "medlow", CM_MEDLOW },
650 { "medmid", CM_MEDMID },
651 { "medany", CM_MEDANY },
652 { "embmedany", CM_EMBMEDANY },
653 { NULL, (enum cmodel) 0 }
655 const struct code_model *cmodel;
656 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
657 static struct cpu_default {
659 const char *const name;
660 } const cpu_default[] = {
661 /* There must be one entry here for each TARGET_CPU value. */
662 { TARGET_CPU_sparc, "cypress" },
663 { TARGET_CPU_sparclet, "tsc701" },
664 { TARGET_CPU_sparclite, "f930" },
665 { TARGET_CPU_v8, "v8" },
666 { TARGET_CPU_hypersparc, "hypersparc" },
667 { TARGET_CPU_sparclite86x, "sparclite86x" },
668 { TARGET_CPU_supersparc, "supersparc" },
669 { TARGET_CPU_v9, "v9" },
670 { TARGET_CPU_ultrasparc, "ultrasparc" },
671 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
672 { TARGET_CPU_niagara, "niagara" },
673 { TARGET_CPU_niagara2, "niagara2" },
676 const struct cpu_default *def;
677 /* Table of values for -m{cpu,tune}=. */
678 static struct cpu_table {
679 const char *const name;
680 const enum processor_type processor;
683 } const cpu_table[] = {
684 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
685 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
686 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
687 /* TI TMS390Z55 supersparc */
688 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
689 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
690 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
691 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
692 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
693 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
694 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
695 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
697 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
699 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
700 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
701 /* TI ultrasparc I, II, IIi */
702 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
703 /* Although insns using %y are deprecated, it is a clear win on current
705 |MASK_DEPRECATED_V8_INSNS},
706 /* TI ultrasparc III */
707 /* ??? Check if %y issue still holds true in ultra3. */
708 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
710 { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
711 { "niagara2", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9},
712 { 0, (enum processor_type) 0, 0, 0 }
714 const struct cpu_table *cpu;
715 const struct sparc_cpu_select *sel;
718 #ifndef SPARC_BI_ARCH
719 /* Check for unsupported architecture size. */
720 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
721 error ("%s is not supported by this configuration",
722 DEFAULT_ARCH32_P ? "-m64" : "-m32");
725 /* We force all 64bit archs to use 128 bit long double */
726 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
728 error ("-mlong-double-64 not allowed with -m64");
729 target_flags |= MASK_LONG_DOUBLE_128;
732 /* Code model selection. */
733 sparc_cmodel = SPARC_DEFAULT_CMODEL;
737 sparc_cmodel = CM_32;
740 if (sparc_cmodel_string != NULL)
744 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
745 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
747 if (cmodel->name == NULL)
748 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
750 sparc_cmodel = cmodel->value;
753 error ("-mcmodel= is not supported on 32 bit systems");
756 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
758 /* Set the default CPU. */
759 for (def = &cpu_default[0]; def->name; ++def)
760 if (def->cpu == TARGET_CPU_DEFAULT)
762 gcc_assert (def->name);
763 sparc_select[0].string = def->name;
765 for (sel = &sparc_select[0]; sel->name; ++sel)
769 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
770 if (! strcmp (sel->string, cpu->name))
773 sparc_cpu = cpu->processor;
777 target_flags &= ~cpu->disable;
778 target_flags |= cpu->enable;
784 error ("bad value (%s) for %s switch", sel->string, sel->name);
788 /* If -mfpu or -mno-fpu was explicitly used, don't override with
789 the processor default. */
791 target_flags = (target_flags & ~MASK_FPU) | fpu;
793 /* Don't allow -mvis if FPU is disabled. */
795 target_flags &= ~MASK_VIS;
797 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
799 -m64 also implies v9. */
800 if (TARGET_VIS || TARGET_ARCH64)
802 target_flags |= MASK_V9;
803 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
806 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
807 if (TARGET_V9 && TARGET_ARCH32)
808 target_flags |= MASK_DEPRECATED_V8_INSNS;
810 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
811 if (! TARGET_V9 || TARGET_ARCH64)
812 target_flags &= ~MASK_V8PLUS;
814 /* Don't use stack biasing in 32 bit mode. */
816 target_flags &= ~MASK_STACK_BIAS;
818 /* Supply a default value for align_functions. */
819 if (align_functions == 0
820 && (sparc_cpu == PROCESSOR_ULTRASPARC
821 || sparc_cpu == PROCESSOR_ULTRASPARC3
822 || sparc_cpu == PROCESSOR_NIAGARA
823 || sparc_cpu == PROCESSOR_NIAGARA2))
824 align_functions = 32;
826 /* Validate PCC_STRUCT_RETURN. */
827 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
828 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
830 /* Only use .uaxword when compiling for a 64-bit target. */
832 targetm.asm_out.unaligned_op.di = NULL;
834 /* Do various machine dependent initializations. */
837 /* Acquire unique alias sets for our private stuff. */
838 sparc_sr_alias_set = new_alias_set ();
839 struct_value_alias_set = new_alias_set ();
841 /* Set up function hooks. */
842 init_machine_status = sparc_init_machine_status;
847 case PROCESSOR_CYPRESS:
848 sparc_costs = &cypress_costs;
851 case PROCESSOR_SPARCLITE:
852 case PROCESSOR_SUPERSPARC:
853 sparc_costs = &supersparc_costs;
857 case PROCESSOR_HYPERSPARC:
858 case PROCESSOR_SPARCLITE86X:
859 sparc_costs = &hypersparc_costs;
861 case PROCESSOR_SPARCLET:
862 case PROCESSOR_TSC701:
863 sparc_costs = &sparclet_costs;
866 case PROCESSOR_ULTRASPARC:
867 sparc_costs = &ultrasparc_costs;
869 case PROCESSOR_ULTRASPARC3:
870 sparc_costs = &ultrasparc3_costs;
872 case PROCESSOR_NIAGARA:
873 sparc_costs = &niagara_costs;
875 case PROCESSOR_NIAGARA2:
876 sparc_costs = &niagara2_costs;
880 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
881 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
882 target_flags |= MASK_LONG_DOUBLE_128;
885 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
886 set_param_value ("simultaneous-prefetches",
887 ((sparc_cpu == PROCESSOR_ULTRASPARC
888 || sparc_cpu == PROCESSOR_NIAGARA
889 || sparc_cpu == PROCESSOR_NIAGARA2)
891 : (sparc_cpu == PROCESSOR_ULTRASPARC3
893 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
894 set_param_value ("l1-cache-line-size",
895 ((sparc_cpu == PROCESSOR_ULTRASPARC
896 || sparc_cpu == PROCESSOR_ULTRASPARC3
897 || sparc_cpu == PROCESSOR_NIAGARA
898 || sparc_cpu == PROCESSOR_NIAGARA2)
902 /* Miscellaneous utilities. */
904 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
905 or branch on register contents instructions. */
908 v9_regcmp_p (enum rtx_code code)
910 return (code == EQ || code == NE || code == GE || code == LT
911 || code == LE || code == GT);
914 /* Nonzero if OP is a floating point constant which can
915 be loaded into an integer register using a single
916 sethi instruction. */
921 if (GET_CODE (op) == CONST_DOUBLE)
926 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
927 REAL_VALUE_TO_TARGET_SINGLE (r, i);
928 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
934 /* Nonzero if OP is a floating point constant which can
935 be loaded into an integer register using a single
941 if (GET_CODE (op) == CONST_DOUBLE)
946 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
947 REAL_VALUE_TO_TARGET_SINGLE (r, i);
948 return SPARC_SIMM13_P (i);
954 /* Nonzero if OP is a floating point constant which can
955 be loaded into an integer register using a high/losum
956 instruction sequence. */
959 fp_high_losum_p (rtx op)
961 /* The constraints calling this should only be in
962 SFmode move insns, so any constant which cannot
963 be moved using a single insn will do. */
964 if (GET_CODE (op) == CONST_DOUBLE)
969 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
970 REAL_VALUE_TO_TARGET_SINGLE (r, i);
971 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
977 /* Expand a move instruction. Return true if all work is done. */
980 sparc_expand_move (enum machine_mode mode, rtx *operands)
982 /* Handle sets of MEM first. */
983 if (GET_CODE (operands[0]) == MEM)
985 /* 0 is a register (or a pair of registers) on SPARC. */
986 if (register_or_zero_operand (operands[1], mode))
989 if (!reload_in_progress)
991 operands[0] = validize_mem (operands[0]);
992 operands[1] = force_reg (mode, operands[1]);
996 /* Fixup TLS cases. */
998 && CONSTANT_P (operands[1])
999 && sparc_tls_referenced_p (operands [1]))
1001 operands[1] = legitimize_tls_address (operands[1]);
1005 /* Fixup PIC cases. */
1006 if (flag_pic && CONSTANT_P (operands[1]))
1008 if (pic_address_needs_scratch (operands[1]))
1009 operands[1] = legitimize_pic_address (operands[1], NULL_RTX);
1011 /* VxWorks does not impose a fixed gap between segments; the run-time
1012 gap can be different from the object-file gap. We therefore can't
1013 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1014 are absolutely sure that X is in the same segment as the GOT.
1015 Unfortunately, the flexibility of linker scripts means that we
1016 can't be sure of that in general, so assume that _G_O_T_-relative
1017 accesses are never valid on VxWorks. */
1018 if (GET_CODE (operands[1]) == LABEL_REF && !TARGET_VXWORKS_RTP)
1022 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1028 gcc_assert (TARGET_ARCH64);
1029 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1034 if (symbolic_operand (operands[1], mode))
1036 operands[1] = legitimize_pic_address (operands[1],
1038 ? operands[0] : NULL_RTX);
1043 /* If we are trying to toss an integer constant into FP registers,
1044 or loading a FP or vector constant, force it into memory. */
1045 if (CONSTANT_P (operands[1])
1046 && REG_P (operands[0])
1047 && (SPARC_FP_REG_P (REGNO (operands[0]))
1048 || SCALAR_FLOAT_MODE_P (mode)
1049 || VECTOR_MODE_P (mode)))
1051 /* emit_group_store will send such bogosity to us when it is
1052 not storing directly into memory. So fix this up to avoid
1053 crashes in output_constant_pool. */
1054 if (operands [1] == const0_rtx)
1055 operands[1] = CONST0_RTX (mode);
1057 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1058 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1059 && const_zero_operand (operands[1], mode))
1062 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1063 /* We are able to build any SF constant in integer registers
1064 with at most 2 instructions. */
1066 /* And any DF constant in integer registers. */
1068 && (reload_completed || reload_in_progress))))
1071 operands[1] = force_const_mem (mode, operands[1]);
1072 if (!reload_in_progress)
1073 operands[1] = validize_mem (operands[1]);
1077 /* Accept non-constants and valid constants unmodified. */
1078 if (!CONSTANT_P (operands[1])
1079 || GET_CODE (operands[1]) == HIGH
1080 || input_operand (operands[1], mode))
1086 /* All QImode constants require only one insn, so proceed. */
1091 sparc_emit_set_const32 (operands[0], operands[1]);
1095 /* input_operand should have filtered out 32-bit mode. */
1096 sparc_emit_set_const64 (operands[0], operands[1]);
1106 /* Load OP1, a 32-bit constant, into OP0, a register.
1107 We know it can't be done in one insn when we get
1108 here, the move expander guarantees this. */
1111 sparc_emit_set_const32 (rtx op0, rtx op1)
1113 enum machine_mode mode = GET_MODE (op0);
1116 if (reload_in_progress || reload_completed)
1119 temp = gen_reg_rtx (mode);
1121 if (GET_CODE (op1) == CONST_INT)
1123 gcc_assert (!small_int_operand (op1, mode)
1124 && !const_high_operand (op1, mode));
1126 /* Emit them as real moves instead of a HIGH/LO_SUM,
1127 this way CSE can see everything and reuse intermediate
1128 values if it wants. */
1129 emit_insn (gen_rtx_SET (VOIDmode, temp,
1130 GEN_INT (INTVAL (op1)
1131 & ~(HOST_WIDE_INT)0x3ff)));
1133 emit_insn (gen_rtx_SET (VOIDmode,
1135 gen_rtx_IOR (mode, temp,
1136 GEN_INT (INTVAL (op1) & 0x3ff))));
1140 /* A symbol, emit in the traditional way. */
1141 emit_insn (gen_rtx_SET (VOIDmode, temp,
1142 gen_rtx_HIGH (mode, op1)));
1143 emit_insn (gen_rtx_SET (VOIDmode,
1144 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1148 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1149 If TEMP is nonzero, we are forbidden to use any other scratch
1150 registers. Otherwise, we are allowed to generate them as needed.
1152 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1153 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1156 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1158 rtx temp1, temp2, temp3, temp4, temp5;
1161 if (temp && GET_MODE (temp) == TImode)
1164 temp = gen_rtx_REG (DImode, REGNO (temp));
1167 /* SPARC-V9 code-model support. */
1168 switch (sparc_cmodel)
1171 /* The range spanned by all instructions in the object is less
1172 than 2^31 bytes (2GB) and the distance from any instruction
1173 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1174 than 2^31 bytes (2GB).
1176 The executable must be in the low 4TB of the virtual address
1179 sethi %hi(symbol), %temp1
1180 or %temp1, %lo(symbol), %reg */
1182 temp1 = temp; /* op0 is allowed. */
1184 temp1 = gen_reg_rtx (DImode);
1186 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1187 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1191 /* The range spanned by all instructions in the object is less
1192 than 2^31 bytes (2GB) and the distance from any instruction
1193 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1194 than 2^31 bytes (2GB).
1196 The executable must be in the low 16TB of the virtual address
1199 sethi %h44(symbol), %temp1
1200 or %temp1, %m44(symbol), %temp2
1201 sllx %temp2, 12, %temp3
1202 or %temp3, %l44(symbol), %reg */
1207 temp3 = temp; /* op0 is allowed. */
1211 temp1 = gen_reg_rtx (DImode);
1212 temp2 = gen_reg_rtx (DImode);
1213 temp3 = gen_reg_rtx (DImode);
1216 emit_insn (gen_seth44 (temp1, op1));
1217 emit_insn (gen_setm44 (temp2, temp1, op1));
1218 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1219 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1220 emit_insn (gen_setl44 (op0, temp3, op1));
1224 /* The range spanned by all instructions in the object is less
1225 than 2^31 bytes (2GB) and the distance from any instruction
1226 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1227 than 2^31 bytes (2GB).
1229 The executable can be placed anywhere in the virtual address
1232 sethi %hh(symbol), %temp1
1233 sethi %lm(symbol), %temp2
1234 or %temp1, %hm(symbol), %temp3
1235 sllx %temp3, 32, %temp4
1236 or %temp4, %temp2, %temp5
1237 or %temp5, %lo(symbol), %reg */
1240 /* It is possible that one of the registers we got for operands[2]
1241 might coincide with that of operands[0] (which is why we made
1242 it TImode). Pick the other one to use as our scratch. */
1243 if (rtx_equal_p (temp, op0))
1245 gcc_assert (ti_temp);
1246 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1249 temp2 = temp; /* op0 is _not_ allowed, see above. */
1256 temp1 = gen_reg_rtx (DImode);
1257 temp2 = gen_reg_rtx (DImode);
1258 temp3 = gen_reg_rtx (DImode);
1259 temp4 = gen_reg_rtx (DImode);
1260 temp5 = gen_reg_rtx (DImode);
1263 emit_insn (gen_sethh (temp1, op1));
1264 emit_insn (gen_setlm (temp2, op1));
1265 emit_insn (gen_sethm (temp3, temp1, op1));
1266 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1267 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1268 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1269 gen_rtx_PLUS (DImode, temp4, temp2)));
1270 emit_insn (gen_setlo (op0, temp5, op1));
1274 /* Old old old backwards compatibility kruft here.
1275 Essentially it is MEDLOW with a fixed 64-bit
1276 virtual base added to all data segment addresses.
1277 Text-segment stuff is computed like MEDANY, we can't
1278 reuse the code above because the relocation knobs
1281 Data segment: sethi %hi(symbol), %temp1
1282 add %temp1, EMBMEDANY_BASE_REG, %temp2
1283 or %temp2, %lo(symbol), %reg */
1284 if (data_segment_operand (op1, GET_MODE (op1)))
1288 temp1 = temp; /* op0 is allowed. */
1293 temp1 = gen_reg_rtx (DImode);
1294 temp2 = gen_reg_rtx (DImode);
1297 emit_insn (gen_embmedany_sethi (temp1, op1));
1298 emit_insn (gen_embmedany_brsum (temp2, temp1));
1299 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1302 /* Text segment: sethi %uhi(symbol), %temp1
1303 sethi %hi(symbol), %temp2
1304 or %temp1, %ulo(symbol), %temp3
1305 sllx %temp3, 32, %temp4
1306 or %temp4, %temp2, %temp5
1307 or %temp5, %lo(symbol), %reg */
1312 /* It is possible that one of the registers we got for operands[2]
1313 might coincide with that of operands[0] (which is why we made
1314 it TImode). Pick the other one to use as our scratch. */
1315 if (rtx_equal_p (temp, op0))
1317 gcc_assert (ti_temp);
1318 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1321 temp2 = temp; /* op0 is _not_ allowed, see above. */
1328 temp1 = gen_reg_rtx (DImode);
1329 temp2 = gen_reg_rtx (DImode);
1330 temp3 = gen_reg_rtx (DImode);
1331 temp4 = gen_reg_rtx (DImode);
1332 temp5 = gen_reg_rtx (DImode);
1335 emit_insn (gen_embmedany_textuhi (temp1, op1));
1336 emit_insn (gen_embmedany_texthi (temp2, op1));
1337 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1338 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1339 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1340 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1341 gen_rtx_PLUS (DImode, temp4, temp2)));
1342 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1351 #if HOST_BITS_PER_WIDE_INT == 32
1353 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1358 /* These avoid problems when cross compiling. If we do not
1359 go through all this hair then the optimizer will see
1360 invalid REG_EQUAL notes or in some cases none at all. */
1361 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1362 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1363 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1364 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1366 /* The optimizer is not to assume anything about exactly
1367 which bits are set for a HIGH, they are unspecified.
1368 Unfortunately this leads to many missed optimizations
1369 during CSE. We mask out the non-HIGH bits, and matches
1370 a plain movdi, to alleviate this problem. */
1372 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1374 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1378 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1380 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1384 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1386 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1390 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1392 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1395 /* Worker routines for 64-bit constant formation on arch64.
1396 One of the key things to be doing in these emissions is
1397 to create as many temp REGs as possible. This makes it
1398 possible for half-built constants to be used later when
1399 such values are similar to something required later on.
1400 Without doing this, the optimizer cannot see such
1403 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1404 unsigned HOST_WIDE_INT, int);
1407 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1408 unsigned HOST_WIDE_INT low_bits, int is_neg)
1410 unsigned HOST_WIDE_INT high_bits;
1413 high_bits = (~low_bits) & 0xffffffff;
1415 high_bits = low_bits;
1417 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1420 emit_insn (gen_rtx_SET (VOIDmode, op0,
1421 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1425 /* If we are XOR'ing with -1, then we should emit a one's complement
1426 instead. This way the combiner will notice logical operations
1427 such as ANDN later on and substitute. */
1428 if ((low_bits & 0x3ff) == 0x3ff)
1430 emit_insn (gen_rtx_SET (VOIDmode, op0,
1431 gen_rtx_NOT (DImode, temp)));
1435 emit_insn (gen_rtx_SET (VOIDmode, op0,
1436 gen_safe_XOR64 (temp,
1437 (-(HOST_WIDE_INT)0x400
1438 | (low_bits & 0x3ff)))));
1443 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1444 unsigned HOST_WIDE_INT, int);
1447 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1448 unsigned HOST_WIDE_INT high_bits,
1449 unsigned HOST_WIDE_INT low_immediate,
1454 if ((high_bits & 0xfffffc00) != 0)
1456 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1457 if ((high_bits & ~0xfffffc00) != 0)
1458 emit_insn (gen_rtx_SET (VOIDmode, op0,
1459 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1465 emit_insn (gen_safe_SET64 (temp, high_bits));
1469 /* Now shift it up into place. */
1470 emit_insn (gen_rtx_SET (VOIDmode, op0,
1471 gen_rtx_ASHIFT (DImode, temp2,
1472 GEN_INT (shift_count))));
1474 /* If there is a low immediate part piece, finish up by
1475 putting that in as well. */
1476 if (low_immediate != 0)
1477 emit_insn (gen_rtx_SET (VOIDmode, op0,
1478 gen_safe_OR64 (op0, low_immediate)));
1481 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1482 unsigned HOST_WIDE_INT);
1484 /* Full 64-bit constant decomposition. Even though this is the
1485 'worst' case, we still optimize a few things away. */
1487 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1488 unsigned HOST_WIDE_INT high_bits,
1489 unsigned HOST_WIDE_INT low_bits)
1493 if (reload_in_progress || reload_completed)
1496 sub_temp = gen_reg_rtx (DImode);
1498 if ((high_bits & 0xfffffc00) != 0)
1500 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1501 if ((high_bits & ~0xfffffc00) != 0)
1502 emit_insn (gen_rtx_SET (VOIDmode,
1504 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1510 emit_insn (gen_safe_SET64 (temp, high_bits));
1514 if (!reload_in_progress && !reload_completed)
1516 rtx temp2 = gen_reg_rtx (DImode);
1517 rtx temp3 = gen_reg_rtx (DImode);
1518 rtx temp4 = gen_reg_rtx (DImode);
1520 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1521 gen_rtx_ASHIFT (DImode, sub_temp,
1524 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1525 if ((low_bits & ~0xfffffc00) != 0)
1527 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1528 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1529 emit_insn (gen_rtx_SET (VOIDmode, op0,
1530 gen_rtx_PLUS (DImode, temp4, temp3)));
1534 emit_insn (gen_rtx_SET (VOIDmode, op0,
1535 gen_rtx_PLUS (DImode, temp4, temp2)));
1540 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1541 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1542 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1545 /* We are in the middle of reload, so this is really
1546 painful. However we do still make an attempt to
1547 avoid emitting truly stupid code. */
1548 if (low1 != const0_rtx)
1550 emit_insn (gen_rtx_SET (VOIDmode, op0,
1551 gen_rtx_ASHIFT (DImode, sub_temp,
1552 GEN_INT (to_shift))));
1553 emit_insn (gen_rtx_SET (VOIDmode, op0,
1554 gen_rtx_IOR (DImode, op0, low1)));
1562 if (low2 != const0_rtx)
1564 emit_insn (gen_rtx_SET (VOIDmode, op0,
1565 gen_rtx_ASHIFT (DImode, sub_temp,
1566 GEN_INT (to_shift))));
1567 emit_insn (gen_rtx_SET (VOIDmode, op0,
1568 gen_rtx_IOR (DImode, op0, low2)));
1576 emit_insn (gen_rtx_SET (VOIDmode, op0,
1577 gen_rtx_ASHIFT (DImode, sub_temp,
1578 GEN_INT (to_shift))));
1579 if (low3 != const0_rtx)
1580 emit_insn (gen_rtx_SET (VOIDmode, op0,
1581 gen_rtx_IOR (DImode, op0, low3)));
1586 /* Analyze a 64-bit constant for certain properties. */
1587 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1588 unsigned HOST_WIDE_INT,
1589 int *, int *, int *);
1592 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1593 unsigned HOST_WIDE_INT low_bits,
1594 int *hbsp, int *lbsp, int *abbasp)
1596 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1599 lowest_bit_set = highest_bit_set = -1;
1603 if ((lowest_bit_set == -1)
1604 && ((low_bits >> i) & 1))
1606 if ((highest_bit_set == -1)
1607 && ((high_bits >> (32 - i - 1)) & 1))
1608 highest_bit_set = (64 - i - 1);
1611 && ((highest_bit_set == -1)
1612 || (lowest_bit_set == -1)));
1618 if ((lowest_bit_set == -1)
1619 && ((high_bits >> i) & 1))
1620 lowest_bit_set = i + 32;
1621 if ((highest_bit_set == -1)
1622 && ((low_bits >> (32 - i - 1)) & 1))
1623 highest_bit_set = 32 - i - 1;
1626 && ((highest_bit_set == -1)
1627 || (lowest_bit_set == -1)));
1629 /* If there are no bits set this should have gone out
1630 as one instruction! */
1631 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1632 all_bits_between_are_set = 1;
1633 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1637 if ((low_bits & (1 << i)) != 0)
1642 if ((high_bits & (1 << (i - 32))) != 0)
1645 all_bits_between_are_set = 0;
1648 *hbsp = highest_bit_set;
1649 *lbsp = lowest_bit_set;
1650 *abbasp = all_bits_between_are_set;
1653 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1656 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1657 unsigned HOST_WIDE_INT low_bits)
1659 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1662 || high_bits == 0xffffffff)
1665 analyze_64bit_constant (high_bits, low_bits,
1666 &highest_bit_set, &lowest_bit_set,
1667 &all_bits_between_are_set);
1669 if ((highest_bit_set == 63
1670 || lowest_bit_set == 0)
1671 && all_bits_between_are_set != 0)
1674 if ((highest_bit_set - lowest_bit_set) < 21)
1680 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1681 unsigned HOST_WIDE_INT,
1684 static unsigned HOST_WIDE_INT
1685 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1686 unsigned HOST_WIDE_INT low_bits,
1687 int lowest_bit_set, int shift)
1689 HOST_WIDE_INT hi, lo;
1691 if (lowest_bit_set < 32)
1693 lo = (low_bits >> lowest_bit_set) << shift;
1694 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1699 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1701 gcc_assert (! (hi & lo));
1705 /* Here we are sure to be arch64 and this is an integer constant
1706 being loaded into a register. Emit the most efficient
1707 insn sequence possible. Detection of all the 1-insn cases
1708 has been done already. */
1710 sparc_emit_set_const64 (rtx op0, rtx op1)
1712 unsigned HOST_WIDE_INT high_bits, low_bits;
1713 int lowest_bit_set, highest_bit_set;
1714 int all_bits_between_are_set;
1717 /* Sanity check that we know what we are working with. */
1718 gcc_assert (TARGET_ARCH64
1719 && (GET_CODE (op0) == SUBREG
1720 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1722 if (reload_in_progress || reload_completed)
1725 if (GET_CODE (op1) != CONST_INT)
1727 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1732 temp = gen_reg_rtx (DImode);
1734 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1735 low_bits = (INTVAL (op1) & 0xffffffff);
1737 /* low_bits bits 0 --> 31
1738 high_bits bits 32 --> 63 */
1740 analyze_64bit_constant (high_bits, low_bits,
1741 &highest_bit_set, &lowest_bit_set,
1742 &all_bits_between_are_set);
1744 /* First try for a 2-insn sequence. */
1746 /* These situations are preferred because the optimizer can
1747 * do more things with them:
1749 * sllx %reg, shift, %reg
1751 * srlx %reg, shift, %reg
1752 * 3) mov some_small_const, %reg
1753 * sllx %reg, shift, %reg
1755 if (((highest_bit_set == 63
1756 || lowest_bit_set == 0)
1757 && all_bits_between_are_set != 0)
1758 || ((highest_bit_set - lowest_bit_set) < 12))
1760 HOST_WIDE_INT the_const = -1;
1761 int shift = lowest_bit_set;
1763 if ((highest_bit_set != 63
1764 && lowest_bit_set != 0)
1765 || all_bits_between_are_set == 0)
1768 create_simple_focus_bits (high_bits, low_bits,
1771 else if (lowest_bit_set == 0)
1772 shift = -(63 - highest_bit_set);
1774 gcc_assert (SPARC_SIMM13_P (the_const));
1775 gcc_assert (shift != 0);
1777 emit_insn (gen_safe_SET64 (temp, the_const));
1779 emit_insn (gen_rtx_SET (VOIDmode,
1781 gen_rtx_ASHIFT (DImode,
1785 emit_insn (gen_rtx_SET (VOIDmode,
1787 gen_rtx_LSHIFTRT (DImode,
1789 GEN_INT (-shift))));
1793 /* Now a range of 22 or less bits set somewhere.
1794 * 1) sethi %hi(focus_bits), %reg
1795 * sllx %reg, shift, %reg
1796 * 2) sethi %hi(focus_bits), %reg
1797 * srlx %reg, shift, %reg
1799 if ((highest_bit_set - lowest_bit_set) < 21)
1801 unsigned HOST_WIDE_INT focus_bits =
1802 create_simple_focus_bits (high_bits, low_bits,
1803 lowest_bit_set, 10);
1805 gcc_assert (SPARC_SETHI_P (focus_bits));
1806 gcc_assert (lowest_bit_set != 10);
1808 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1810 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1811 if (lowest_bit_set < 10)
1812 emit_insn (gen_rtx_SET (VOIDmode,
1814 gen_rtx_LSHIFTRT (DImode, temp,
1815 GEN_INT (10 - lowest_bit_set))));
1816 else if (lowest_bit_set > 10)
1817 emit_insn (gen_rtx_SET (VOIDmode,
1819 gen_rtx_ASHIFT (DImode, temp,
1820 GEN_INT (lowest_bit_set - 10))));
1824 /* 1) sethi %hi(low_bits), %reg
1825 * or %reg, %lo(low_bits), %reg
1826 * 2) sethi %hi(~low_bits), %reg
1827 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1830 || high_bits == 0xffffffff)
1832 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1833 (high_bits == 0xffffffff));
1837 /* Now, try 3-insn sequences. */
1839 /* 1) sethi %hi(high_bits), %reg
1840 * or %reg, %lo(high_bits), %reg
1841 * sllx %reg, 32, %reg
1845 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1849 /* We may be able to do something quick
1850 when the constant is negated, so try that. */
1851 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1852 (~low_bits) & 0xfffffc00))
1854 /* NOTE: The trailing bits get XOR'd so we need the
1855 non-negated bits, not the negated ones. */
1856 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1858 if ((((~high_bits) & 0xffffffff) == 0
1859 && ((~low_bits) & 0x80000000) == 0)
1860 || (((~high_bits) & 0xffffffff) == 0xffffffff
1861 && ((~low_bits) & 0x80000000) != 0))
1863 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1865 if ((SPARC_SETHI_P (fast_int)
1866 && (~high_bits & 0xffffffff) == 0)
1867 || SPARC_SIMM13_P (fast_int))
1868 emit_insn (gen_safe_SET64 (temp, fast_int));
1870 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1875 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1876 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1877 sparc_emit_set_const64 (temp, negated_const);
1880 /* If we are XOR'ing with -1, then we should emit a one's complement
1881 instead. This way the combiner will notice logical operations
1882 such as ANDN later on and substitute. */
1883 if (trailing_bits == 0x3ff)
1885 emit_insn (gen_rtx_SET (VOIDmode, op0,
1886 gen_rtx_NOT (DImode, temp)));
1890 emit_insn (gen_rtx_SET (VOIDmode,
1892 gen_safe_XOR64 (temp,
1893 (-0x400 | trailing_bits))));
1898 /* 1) sethi %hi(xxx), %reg
1899 * or %reg, %lo(xxx), %reg
1900 * sllx %reg, yyy, %reg
1902 * ??? This is just a generalized version of the low_bits==0
1903 * thing above, FIXME...
1905 if ((highest_bit_set - lowest_bit_set) < 32)
1907 unsigned HOST_WIDE_INT focus_bits =
1908 create_simple_focus_bits (high_bits, low_bits,
1911 /* We can't get here in this state. */
1912 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1914 /* So what we know is that the set bits straddle the
1915 middle of the 64-bit word. */
1916 sparc_emit_set_const64_quick2 (op0, temp,
1922 /* 1) sethi %hi(high_bits), %reg
1923 * or %reg, %lo(high_bits), %reg
1924 * sllx %reg, 32, %reg
1925 * or %reg, low_bits, %reg
1927 if (SPARC_SIMM13_P(low_bits)
1928 && ((int)low_bits > 0))
1930 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1934 /* The easiest way when all else fails, is full decomposition. */
1936 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1937 high_bits, low_bits, ~high_bits, ~low_bits);
1939 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1941 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1943 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1944 return the mode to be used for the comparison. For floating-point,
1945 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1946 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1947 processing is needed. */
1950 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1952 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1978 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1979 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1981 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1982 return CCX_NOOVmode;
1988 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1995 /* Emit the compare insn and return the CC reg for a CODE comparison
1996 with operands X and Y. */
1999 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2001 enum machine_mode mode;
2004 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2007 mode = SELECT_CC_MODE (code, x, y);
2009 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2010 fcc regs (cse can't tell they're really call clobbered regs and will
2011 remove a duplicate comparison even if there is an intervening function
2012 call - it will then try to reload the cc reg via an int reg which is why
2013 we need the movcc patterns). It is possible to provide the movcc
2014 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2015 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2016 to tell cse that CCFPE mode registers (even pseudos) are call
2019 /* ??? This is an experiment. Rather than making changes to cse which may
2020 or may not be easy/clean, we do our own cse. This is possible because
2021 we will generate hard registers. Cse knows they're call clobbered (it
2022 doesn't know the same thing about pseudos). If we guess wrong, no big
2023 deal, but if we win, great! */
2025 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2026 #if 1 /* experiment */
2029 /* We cycle through the registers to ensure they're all exercised. */
2030 static int next_fcc_reg = 0;
2031 /* Previous x,y for each fcc reg. */
2032 static rtx prev_args[4][2];
2034 /* Scan prev_args for x,y. */
2035 for (reg = 0; reg < 4; reg++)
2036 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2041 prev_args[reg][0] = x;
2042 prev_args[reg][1] = y;
2043 next_fcc_reg = (next_fcc_reg + 1) & 3;
2045 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2048 cc_reg = gen_reg_rtx (mode);
2049 #endif /* ! experiment */
2050 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2051 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2053 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2055 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2056 will only result in an unrecognizable insn so no point in asserting. */
2057 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2063 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2066 gen_compare_reg (rtx cmp)
2068 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2071 /* This function is used for v9 only.
2072 DEST is the target of the Scc insn.
2073 CODE is the code for an Scc's comparison.
2074 X and Y are the values we compare.
2076 This function is needed to turn
2079 (gt (reg:CCX 100 %icc)
2083 (gt:DI (reg:CCX 100 %icc)
2086 IE: The instruction recognizer needs to see the mode of the comparison to
2087 find the right instruction. We could use "gt:DI" right in the
2088 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2091 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2094 && (GET_MODE (x) == DImode
2095 || GET_MODE (dest) == DImode))
2098 /* Try to use the movrCC insns. */
2100 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2102 && v9_regcmp_p (compare_code))
2107 /* Special case for op0 != 0. This can be done with one instruction if
2110 if (compare_code == NE
2111 && GET_MODE (dest) == DImode
2112 && rtx_equal_p (op0, dest))
2114 emit_insn (gen_rtx_SET (VOIDmode, dest,
2115 gen_rtx_IF_THEN_ELSE (DImode,
2116 gen_rtx_fmt_ee (compare_code, DImode,
2123 if (reg_overlap_mentioned_p (dest, op0))
2125 /* Handle the case where dest == x.
2126 We "early clobber" the result. */
2127 op0 = gen_reg_rtx (GET_MODE (x));
2128 emit_move_insn (op0, x);
2131 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2132 if (GET_MODE (op0) != DImode)
2134 temp = gen_reg_rtx (DImode);
2135 convert_move (temp, op0, 0);
2139 emit_insn (gen_rtx_SET (VOIDmode, dest,
2140 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2141 gen_rtx_fmt_ee (compare_code, DImode,
2149 x = gen_compare_reg_1 (compare_code, x, y);
2152 gcc_assert (GET_MODE (x) != CC_NOOVmode
2153 && GET_MODE (x) != CCX_NOOVmode);
2155 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2156 emit_insn (gen_rtx_SET (VOIDmode, dest,
2157 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2158 gen_rtx_fmt_ee (compare_code,
2159 GET_MODE (x), x, y),
2160 const1_rtx, dest)));
2166 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2167 without jumps using the addx/subx instructions. */
2170 emit_scc_insn (rtx operands[])
2177 /* The quad-word fp compare library routines all return nonzero to indicate
2178 true, which is different from the equivalent libgcc routines, so we must
2179 handle them specially here. */
2180 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2182 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2183 GET_CODE (operands[1]));
2184 operands[2] = XEXP (operands[1], 0);
2185 operands[3] = XEXP (operands[1], 1);
2188 code = GET_CODE (operands[1]);
2192 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2193 more applications). The exception to this is "reg != 0" which can
2194 be done in one instruction on v9 (so we do it). */
2197 if (GET_MODE (x) == SImode)
2199 rtx pat = gen_seqsi_special (operands[0], x, y);
2203 else if (GET_MODE (x) == DImode)
2205 rtx pat = gen_seqdi_special (operands[0], x, y);
2213 if (GET_MODE (x) == SImode)
2215 rtx pat = gen_snesi_special (operands[0], x, y);
2219 else if (GET_MODE (x) == DImode)
2221 rtx pat = gen_snedi_special (operands[0], x, y);
2227 /* For the rest, on v9 we can use conditional moves. */
2231 if (gen_v9_scc (operands[0], code, x, y))
2235 /* We can do LTU and GEU using the addx/subx instructions too. And
2236 for GTU/LEU, if both operands are registers swap them and fall
2237 back to the easy case. */
2238 if (code == GTU || code == LEU)
2240 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2241 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2246 code = swap_condition (code);
2250 if (code == LTU || code == GEU)
2252 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2253 gen_rtx_fmt_ee (code, SImode,
2254 gen_compare_reg_1 (code, x, y),
2259 /* Nope, do branches. */
2263 /* Emit a conditional jump insn for the v9 architecture using comparison code
2264 CODE and jump target LABEL.
2265 This function exists to take advantage of the v9 brxx insns. */
2268 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2270 emit_jump_insn (gen_rtx_SET (VOIDmode,
2272 gen_rtx_IF_THEN_ELSE (VOIDmode,
2273 gen_rtx_fmt_ee (code, GET_MODE (op0),
2275 gen_rtx_LABEL_REF (VOIDmode, label),
2280 emit_conditional_branch_insn (rtx operands[])
2282 /* The quad-word fp compare library routines all return nonzero to indicate
2283 true, which is different from the equivalent libgcc routines, so we must
2284 handle them specially here. */
2285 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2287 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2288 GET_CODE (operands[0]));
2289 operands[1] = XEXP (operands[0], 0);
2290 operands[2] = XEXP (operands[0], 1);
2293 if (TARGET_ARCH64 && operands[2] == const0_rtx
2294 && GET_CODE (operands[1]) == REG
2295 && GET_MODE (operands[1]) == DImode)
2297 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2301 operands[1] = gen_compare_reg (operands[0]);
2302 operands[2] = const0_rtx;
2303 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2304 operands[1], operands[2]);
2305 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2310 /* Generate a DFmode part of a hard TFmode register.
2311 REG is the TFmode hard register, LOW is 1 for the
2312 low 64bit of the register and 0 otherwise.
2315 gen_df_reg (rtx reg, int low)
2317 int regno = REGNO (reg);
2319 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2320 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2321 return gen_rtx_REG (DFmode, regno);
2324 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2325 Unlike normal calls, TFmode operands are passed by reference. It is
2326 assumed that no more than 3 operands are required. */
2329 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2331 rtx ret_slot = NULL, arg[3], func_sym;
2334 /* We only expect to be called for conversions, unary, and binary ops. */
2335 gcc_assert (nargs == 2 || nargs == 3);
2337 for (i = 0; i < nargs; ++i)
2339 rtx this_arg = operands[i];
2342 /* TFmode arguments and return values are passed by reference. */
2343 if (GET_MODE (this_arg) == TFmode)
2345 int force_stack_temp;
2347 force_stack_temp = 0;
2348 if (TARGET_BUGGY_QP_LIB && i == 0)
2349 force_stack_temp = 1;
2351 if (GET_CODE (this_arg) == MEM
2352 && ! force_stack_temp)
2353 this_arg = XEXP (this_arg, 0);
2354 else if (CONSTANT_P (this_arg)
2355 && ! force_stack_temp)
2357 this_slot = force_const_mem (TFmode, this_arg);
2358 this_arg = XEXP (this_slot, 0);
2362 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2364 /* Operand 0 is the return value. We'll copy it out later. */
2366 emit_move_insn (this_slot, this_arg);
2368 ret_slot = this_slot;
2370 this_arg = XEXP (this_slot, 0);
2377 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2379 if (GET_MODE (operands[0]) == TFmode)
2382 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2383 arg[0], GET_MODE (arg[0]),
2384 arg[1], GET_MODE (arg[1]));
2386 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2387 arg[0], GET_MODE (arg[0]),
2388 arg[1], GET_MODE (arg[1]),
2389 arg[2], GET_MODE (arg[2]));
2392 emit_move_insn (operands[0], ret_slot);
2398 gcc_assert (nargs == 2);
2400 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2401 GET_MODE (operands[0]), 1,
2402 arg[1], GET_MODE (arg[1]));
2404 if (ret != operands[0])
2405 emit_move_insn (operands[0], ret);
2409 /* Expand soft-float TFmode calls to sparc abi routines. */
2412 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2434 emit_soft_tfmode_libcall (func, 3, operands);
2438 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2442 gcc_assert (code == SQRT);
2445 emit_soft_tfmode_libcall (func, 2, operands);
2449 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2456 switch (GET_MODE (operands[1]))
2469 case FLOAT_TRUNCATE:
2470 switch (GET_MODE (operands[0]))
2484 switch (GET_MODE (operands[1]))
2489 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2499 case UNSIGNED_FLOAT:
2500 switch (GET_MODE (operands[1]))
2505 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2516 switch (GET_MODE (operands[0]))
2530 switch (GET_MODE (operands[0]))
2547 emit_soft_tfmode_libcall (func, 2, operands);
2550 /* Expand a hard-float tfmode operation. All arguments must be in
2554 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2558 if (GET_RTX_CLASS (code) == RTX_UNARY)
2560 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2561 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2565 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2566 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2567 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2568 operands[1], operands[2]);
2571 if (register_operand (operands[0], VOIDmode))
2574 dest = gen_reg_rtx (GET_MODE (operands[0]));
2576 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2578 if (dest != operands[0])
2579 emit_move_insn (operands[0], dest);
2583 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2585 if (TARGET_HARD_QUAD)
2586 emit_hard_tfmode_operation (code, operands);
2588 emit_soft_tfmode_binop (code, operands);
2592 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2594 if (TARGET_HARD_QUAD)
2595 emit_hard_tfmode_operation (code, operands);
2597 emit_soft_tfmode_unop (code, operands);
2601 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2603 if (TARGET_HARD_QUAD)
2604 emit_hard_tfmode_operation (code, operands);
2606 emit_soft_tfmode_cvt (code, operands);
2609 /* Return nonzero if a branch/jump/call instruction will be emitting
2610 nop into its delay slot. */
2613 empty_delay_slot (rtx insn)
2617 /* If no previous instruction (should not happen), return true. */
2618 if (PREV_INSN (insn) == NULL)
2621 seq = NEXT_INSN (PREV_INSN (insn));
2622 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2628 /* Return nonzero if TRIAL can go into the call delay slot. */
2631 tls_call_delay (rtx trial)
2636 call __tls_get_addr, %tgd_call (foo)
2637 add %l7, %o0, %o0, %tgd_add (foo)
2638 while Sun as/ld does not. */
2639 if (TARGET_GNU_TLS || !TARGET_TLS)
2642 pat = PATTERN (trial);
2644 /* We must reject tgd_add{32|64}, i.e.
2645 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2646 and tldm_add{32|64}, i.e.
2647 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2649 if (GET_CODE (pat) == SET
2650 && GET_CODE (SET_SRC (pat)) == PLUS)
2652 rtx unspec = XEXP (SET_SRC (pat), 1);
2654 if (GET_CODE (unspec) == UNSPEC
2655 && (XINT (unspec, 1) == UNSPEC_TLSGD
2656 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2663 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2664 instruction. RETURN_P is true if the v9 variant 'return' is to be
2665 considered in the test too.
2667 TRIAL must be a SET whose destination is a REG appropriate for the
2668 'restore' instruction or, if RETURN_P is true, for the 'return'
2672 eligible_for_restore_insn (rtx trial, bool return_p)
2674 rtx pat = PATTERN (trial);
2675 rtx src = SET_SRC (pat);
2677 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2678 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2679 && arith_operand (src, GET_MODE (src)))
2682 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2684 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2687 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2688 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2689 && arith_double_operand (src, GET_MODE (src)))
2690 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2692 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2693 else if (! TARGET_FPU && register_operand (src, SFmode))
2696 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2697 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2700 /* If we have the 'return' instruction, anything that does not use
2701 local or output registers and can go into a delay slot wins. */
2702 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2703 && (get_attr_in_uncond_branch_delay (trial)
2704 == IN_UNCOND_BRANCH_DELAY_TRUE))
2707 /* The 'restore src1,src2,dest' pattern for SImode. */
2708 else if (GET_CODE (src) == PLUS
2709 && register_operand (XEXP (src, 0), SImode)
2710 && arith_operand (XEXP (src, 1), SImode))
2713 /* The 'restore src1,src2,dest' pattern for DImode. */
2714 else if (GET_CODE (src) == PLUS
2715 && register_operand (XEXP (src, 0), DImode)
2716 && arith_double_operand (XEXP (src, 1), DImode))
2719 /* The 'restore src1,%lo(src2),dest' pattern. */
2720 else if (GET_CODE (src) == LO_SUM
2721 && ! TARGET_CM_MEDMID
2722 && ((register_operand (XEXP (src, 0), SImode)
2723 && immediate_operand (XEXP (src, 1), SImode))
2725 && register_operand (XEXP (src, 0), DImode)
2726 && immediate_operand (XEXP (src, 1), DImode))))
2729 /* The 'restore src,src,dest' pattern. */
2730 else if (GET_CODE (src) == ASHIFT
2731 && (register_operand (XEXP (src, 0), SImode)
2732 || register_operand (XEXP (src, 0), DImode))
2733 && XEXP (src, 1) == const1_rtx)
2739 /* Return nonzero if TRIAL can go into the function return's
2743 eligible_for_return_delay (rtx trial)
2747 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2750 if (get_attr_length (trial) != 1)
2753 /* If there are any call-saved registers, we should scan TRIAL if it
2754 does not reference them. For now just make it easy. */
2758 /* If the function uses __builtin_eh_return, the eh_return machinery
2759 occupies the delay slot. */
2760 if (crtl->calls_eh_return)
2763 /* In the case of a true leaf function, anything can go into the slot. */
2764 if (sparc_leaf_function_p)
2765 return get_attr_in_uncond_branch_delay (trial)
2766 == IN_UNCOND_BRANCH_DELAY_TRUE;
2768 pat = PATTERN (trial);
2770 /* Otherwise, only operations which can be done in tandem with
2771 a `restore' or `return' insn can go into the delay slot. */
2772 if (GET_CODE (SET_DEST (pat)) != REG
2773 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2776 /* If this instruction sets up floating point register and we have a return
2777 instruction, it can probably go in. But restore will not work
2779 if (REGNO (SET_DEST (pat)) >= 32)
2781 && ! epilogue_renumber (&pat, 1)
2782 && (get_attr_in_uncond_branch_delay (trial)
2783 == IN_UNCOND_BRANCH_DELAY_TRUE));
2785 return eligible_for_restore_insn (trial, true);
2788 /* Return nonzero if TRIAL can go into the sibling call's
2792 eligible_for_sibcall_delay (rtx trial)
2796 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2799 if (get_attr_length (trial) != 1)
2802 pat = PATTERN (trial);
2804 if (sparc_leaf_function_p)
2806 /* If the tail call is done using the call instruction,
2807 we have to restore %o7 in the delay slot. */
2808 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2811 /* %g1 is used to build the function address */
2812 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2818 /* Otherwise, only operations which can be done in tandem with
2819 a `restore' insn can go into the delay slot. */
2820 if (GET_CODE (SET_DEST (pat)) != REG
2821 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2822 || REGNO (SET_DEST (pat)) >= 32)
2825 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2827 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2830 return eligible_for_restore_insn (trial, false);
2834 short_branch (int uid1, int uid2)
2836 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2838 /* Leave a few words of "slop". */
2839 if (delta >= -1023 && delta <= 1022)
2845 /* Return nonzero if REG is not used after INSN.
2846 We assume REG is a reload reg, and therefore does
2847 not live past labels or calls or jumps. */
2849 reg_unused_after (rtx reg, rtx insn)
2851 enum rtx_code code, prev_code = UNKNOWN;
2853 while ((insn = NEXT_INSN (insn)))
2855 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2858 code = GET_CODE (insn);
2859 if (GET_CODE (insn) == CODE_LABEL)
2864 rtx set = single_set (insn);
2865 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2868 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2870 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2878 /* Determine if it's legal to put X into the constant pool. This
2879 is not possible if X contains the address of a symbol that is
2880 not constant (TLS) or not known at final link time (PIC). */
2883 sparc_cannot_force_const_mem (rtx x)
2885 switch (GET_CODE (x))
2890 /* Accept all non-symbolic constants. */
2894 /* Labels are OK iff we are non-PIC. */
2895 return flag_pic != 0;
2898 /* 'Naked' TLS symbol references are never OK,
2899 non-TLS symbols are OK iff we are non-PIC. */
2900 if (SYMBOL_REF_TLS_MODEL (x))
2903 return flag_pic != 0;
2906 return sparc_cannot_force_const_mem (XEXP (x, 0));
2909 return sparc_cannot_force_const_mem (XEXP (x, 0))
2910 || sparc_cannot_force_const_mem (XEXP (x, 1));
2919 static GTY(()) char pic_helper_symbol_name[256];
2920 static GTY(()) rtx pic_helper_symbol;
2921 static GTY(()) bool pic_helper_emitted_p = false;
2922 static GTY(()) rtx global_offset_table;
2924 /* Ensure that we are not using patterns that are not OK with PIC. */
2932 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2933 && (GET_CODE (recog_data.operand[i]) != CONST
2934 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2935 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2936 == global_offset_table)
2937 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2945 /* Return true if X is an address which needs a temporary register when
2946 reloaded while generating PIC code. */
2949 pic_address_needs_scratch (rtx x)
2951 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2952 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2953 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2954 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2955 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2961 /* Determine if a given RTX is a valid constant. We already know this
2962 satisfies CONSTANT_P. */
2965 legitimate_constant_p (rtx x)
2967 switch (GET_CODE (x))
2971 if (sparc_tls_referenced_p (x))
2976 if (GET_MODE (x) == VOIDmode)
2979 /* Floating point constants are generally not ok.
2980 The only exception is 0.0 in VIS. */
2982 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
2983 && const_zero_operand (x, GET_MODE (x)))
2989 /* Vector constants are generally not ok.
2990 The only exception is 0 in VIS. */
2992 && const_zero_operand (x, GET_MODE (x)))
3004 /* Determine if a given RTX is a valid constant address. */
3007 constant_address_p (rtx x)
3009 switch (GET_CODE (x))
3017 if (flag_pic && pic_address_needs_scratch (x))
3019 return legitimate_constant_p (x);
3022 return !flag_pic && legitimate_constant_p (x);
3029 /* Nonzero if the constant value X is a legitimate general operand
3030 when generating PIC code. It is given that flag_pic is on and
3031 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3034 legitimate_pic_operand_p (rtx x)
3036 if (pic_address_needs_scratch (x))
3038 if (sparc_tls_referenced_p (x))
3043 /* Return nonzero if ADDR is a valid memory address.
3044 STRICT specifies whether strict register checking applies. */
3047 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3049 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3051 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3053 else if (GET_CODE (addr) == PLUS)
3055 rs1 = XEXP (addr, 0);
3056 rs2 = XEXP (addr, 1);
3058 /* Canonicalize. REG comes first, if there are no regs,
3059 LO_SUM comes first. */
3061 && GET_CODE (rs1) != SUBREG
3063 || GET_CODE (rs2) == SUBREG
3064 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3066 rs1 = XEXP (addr, 1);
3067 rs2 = XEXP (addr, 0);
3071 && rs1 == pic_offset_table_rtx
3073 && GET_CODE (rs2) != SUBREG
3074 && GET_CODE (rs2) != LO_SUM
3075 && GET_CODE (rs2) != MEM
3076 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
3077 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3078 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3080 || GET_CODE (rs1) == SUBREG)
3081 && RTX_OK_FOR_OFFSET_P (rs2)))
3086 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3087 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3089 /* We prohibit REG + REG for TFmode when there are no quad move insns
3090 and we consequently need to split. We do this because REG+REG
3091 is not an offsettable address. If we get the situation in reload
3092 where source and destination of a movtf pattern are both MEMs with
3093 REG+REG address, then only one of them gets converted to an
3094 offsettable address. */
3096 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3099 /* We prohibit REG + REG on ARCH32 if not optimizing for
3100 DFmode/DImode because then mem_min_alignment is likely to be zero
3101 after reload and the forced split would lack a matching splitter
3103 if (TARGET_ARCH32 && !optimize
3104 && (mode == DFmode || mode == DImode))
3107 else if (USE_AS_OFFSETABLE_LO10
3108 && GET_CODE (rs1) == LO_SUM
3110 && ! TARGET_CM_MEDMID
3111 && RTX_OK_FOR_OLO10_P (rs2))
3114 imm1 = XEXP (rs1, 1);
3115 rs1 = XEXP (rs1, 0);
3116 if (!CONSTANT_P (imm1)
3117 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3121 else if (GET_CODE (addr) == LO_SUM)
3123 rs1 = XEXP (addr, 0);
3124 imm1 = XEXP (addr, 1);
3126 if (!CONSTANT_P (imm1)
3127 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3130 /* We can't allow TFmode in 32-bit mode, because an offset greater
3131 than the alignment (8) may cause the LO_SUM to overflow. */
3132 if (mode == TFmode && TARGET_ARCH32)
3135 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3140 if (GET_CODE (rs1) == SUBREG)
3141 rs1 = SUBREG_REG (rs1);
3147 if (GET_CODE (rs2) == SUBREG)
3148 rs2 = SUBREG_REG (rs2);
3155 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3156 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3161 if ((REGNO (rs1) >= 32
3162 && REGNO (rs1) != FRAME_POINTER_REGNUM
3163 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3165 && (REGNO (rs2) >= 32
3166 && REGNO (rs2) != FRAME_POINTER_REGNUM
3167 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3173 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3175 static GTY(()) rtx sparc_tls_symbol;
3178 sparc_tls_get_addr (void)
3180 if (!sparc_tls_symbol)
3181 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3183 return sparc_tls_symbol;
3187 sparc_tls_got (void)
3192 crtl->uses_pic_offset_table = 1;
3193 return pic_offset_table_rtx;
3196 if (!global_offset_table)
3197 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3198 temp = gen_reg_rtx (Pmode);
3199 emit_move_insn (temp, global_offset_table);
3203 /* Return true if X contains a thread-local symbol. */
3206 sparc_tls_referenced_p (rtx x)
3208 if (!TARGET_HAVE_TLS)
3211 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3212 x = XEXP (XEXP (x, 0), 0);
3214 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
3217 /* That's all we handle in legitimize_tls_address for now. */
3221 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3222 this (thread-local) address. */
3225 legitimize_tls_address (rtx addr)
3227 rtx temp1, temp2, temp3, ret, o0, got, insn;
3229 gcc_assert (can_create_pseudo_p ());
3231 if (GET_CODE (addr) == SYMBOL_REF)
3232 switch (SYMBOL_REF_TLS_MODEL (addr))
3234 case TLS_MODEL_GLOBAL_DYNAMIC:
3236 temp1 = gen_reg_rtx (SImode);
3237 temp2 = gen_reg_rtx (SImode);
3238 ret = gen_reg_rtx (Pmode);
3239 o0 = gen_rtx_REG (Pmode, 8);
3240 got = sparc_tls_got ();
3241 emit_insn (gen_tgd_hi22 (temp1, addr));
3242 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3245 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3246 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3251 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3252 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3255 CALL_INSN_FUNCTION_USAGE (insn)
3256 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3257 CALL_INSN_FUNCTION_USAGE (insn));
3258 insn = get_insns ();
3260 emit_libcall_block (insn, ret, o0, addr);
3263 case TLS_MODEL_LOCAL_DYNAMIC:
3265 temp1 = gen_reg_rtx (SImode);
3266 temp2 = gen_reg_rtx (SImode);
3267 temp3 = gen_reg_rtx (Pmode);
3268 ret = gen_reg_rtx (Pmode);
3269 o0 = gen_rtx_REG (Pmode, 8);
3270 got = sparc_tls_got ();
3271 emit_insn (gen_tldm_hi22 (temp1));
3272 emit_insn (gen_tldm_lo10 (temp2, temp1));
3275 emit_insn (gen_tldm_add32 (o0, got, temp2));
3276 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3281 emit_insn (gen_tldm_add64 (o0, got, temp2));
3282 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3285 CALL_INSN_FUNCTION_USAGE (insn)
3286 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3287 CALL_INSN_FUNCTION_USAGE (insn));
3288 insn = get_insns ();
3290 emit_libcall_block (insn, temp3, o0,
3291 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3292 UNSPEC_TLSLD_BASE));
3293 temp1 = gen_reg_rtx (SImode);
3294 temp2 = gen_reg_rtx (SImode);
3295 emit_insn (gen_tldo_hix22 (temp1, addr));
3296 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3298 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3300 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3303 case TLS_MODEL_INITIAL_EXEC:
3304 temp1 = gen_reg_rtx (SImode);
3305 temp2 = gen_reg_rtx (SImode);
3306 temp3 = gen_reg_rtx (Pmode);
3307 got = sparc_tls_got ();
3308 emit_insn (gen_tie_hi22 (temp1, addr));
3309 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3311 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3313 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3316 ret = gen_reg_rtx (Pmode);
3318 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3321 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3325 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3328 case TLS_MODEL_LOCAL_EXEC:
3329 temp1 = gen_reg_rtx (Pmode);
3330 temp2 = gen_reg_rtx (Pmode);
3333 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3334 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3338 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3339 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3341 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3348 else if (GET_CODE (addr) == CONST)
3352 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
3354 base = legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
3355 offset = XEXP (XEXP (addr, 0), 1);
3357 base = force_operand (base, NULL_RTX);
3358 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
3359 offset = force_reg (Pmode, offset);
3360 ret = gen_rtx_PLUS (Pmode, base, offset);
3364 gcc_unreachable (); /* for now ... */
3369 /* Legitimize PIC addresses. If the address is already position-independent,
3370 we return ORIG. Newly generated position-independent addresses go into a
3371 reg. This is REG if nonzero, otherwise we allocate register(s) as
3375 legitimize_pic_address (rtx orig, rtx reg)
3377 if (GET_CODE (orig) == SYMBOL_REF
3378 /* See the comment in sparc_expand_move. */
3379 || (TARGET_VXWORKS_RTP && GET_CODE (orig) == LABEL_REF))
3381 rtx pic_ref, address;
3386 gcc_assert (! reload_in_progress && ! reload_completed);
3387 reg = gen_reg_rtx (Pmode);
3392 /* If not during reload, allocate another temp reg here for loading
3393 in the address, so that these instructions can be optimized
3395 rtx temp_reg = ((reload_in_progress || reload_completed)
3396 ? reg : gen_reg_rtx (Pmode));
3398 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3399 won't get confused into thinking that these two instructions
3400 are loading in the true address of the symbol. If in the
3401 future a PIC rtx exists, that should be used instead. */
3404 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3405 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3409 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3410 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3417 pic_ref = gen_const_mem (Pmode,
3418 gen_rtx_PLUS (Pmode,
3419 pic_offset_table_rtx, address));
3420 crtl->uses_pic_offset_table = 1;
3421 insn = emit_move_insn (reg, pic_ref);
3422 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3424 set_unique_reg_note (insn, REG_EQUAL, orig);
3427 else if (GET_CODE (orig) == CONST)
3431 if (GET_CODE (XEXP (orig, 0)) == PLUS
3432 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3437 gcc_assert (! reload_in_progress && ! reload_completed);
3438 reg = gen_reg_rtx (Pmode);
3441 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3442 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
3443 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
3444 base == reg ? NULL_RTX : reg);
3446 if (GET_CODE (offset) == CONST_INT)
3448 if (SMALL_INT (offset))
3449 return plus_constant (base, INTVAL (offset));
3450 else if (! reload_in_progress && ! reload_completed)
3451 offset = force_reg (Pmode, offset);
3453 /* If we reach here, then something is seriously wrong. */
3456 return gen_rtx_PLUS (Pmode, base, offset);
3458 else if (GET_CODE (orig) == LABEL_REF)
3459 /* ??? Why do we do this? */
3460 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3461 the register is live instead, in case it is eliminated. */
3462 crtl->uses_pic_offset_table = 1;
3467 /* Try machine-dependent ways of modifying an illegitimate address X
3468 to be legitimate. If we find one, return the new, valid address.
3470 OLDX is the address as it was before break_out_memory_refs was called.
3471 In some cases it is useful to look at this to decide what needs to be done.
3473 MODE is the mode of the operand pointed to by X.
3475 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3478 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3479 enum machine_mode mode)
3483 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3484 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3485 force_operand (XEXP (x, 0), NULL_RTX));
3486 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3487 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3488 force_operand (XEXP (x, 1), NULL_RTX));
3489 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3490 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3492 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3493 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3494 force_operand (XEXP (x, 1), NULL_RTX));
3496 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3499 if (sparc_tls_referenced_p (x))
3500 x = legitimize_tls_address (x);
3502 x = legitimize_pic_address (x, NULL_RTX);
3503 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3504 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3505 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3506 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3507 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3508 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3509 else if (GET_CODE (x) == SYMBOL_REF
3510 || GET_CODE (x) == CONST
3511 || GET_CODE (x) == LABEL_REF)
3512 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3517 /* Emit the special PIC helper function. */
3520 emit_pic_helper (void)
3522 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3525 switch_to_section (text_section);
3527 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3529 ASM_OUTPUT_ALIGN (asm_out_file, align);
3530 ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3531 if (flag_delayed_branch)
3532 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3533 pic_name, pic_name);
3535 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3536 pic_name, pic_name);
3538 pic_helper_emitted_p = true;
3541 /* Emit code to load the PIC register. */
3544 load_pic_register (bool delay_pic_helper)
3546 int orig_flag_pic = flag_pic;
3548 if (TARGET_VXWORKS_RTP)
3550 emit_insn (gen_vxworks_load_got ());
3551 emit_use (pic_offset_table_rtx);
3555 /* If we haven't initialized the special PIC symbols, do so now. */
3556 if (!pic_helper_symbol_name[0])
3558 ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3559 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3560 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3563 /* If we haven't emitted the special PIC helper function, do so now unless
3564 we are requested to delay it. */
3565 if (!delay_pic_helper && !pic_helper_emitted_p)
3570 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3571 pic_helper_symbol));
3573 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3574 pic_helper_symbol));
3575 flag_pic = orig_flag_pic;
3577 /* Need to emit this whether or not we obey regdecls,
3578 since setjmp/longjmp can cause life info to screw up.
3579 ??? In the case where we don't obey regdecls, this is not sufficient
3580 since we may not fall out the bottom. */
3581 emit_use (pic_offset_table_rtx);
3584 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3585 address of the call target. */
3588 sparc_emit_call_insn (rtx pat, rtx addr)
3592 insn = emit_call_insn (pat);
3594 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3595 if (TARGET_VXWORKS_RTP
3597 && GET_CODE (addr) == SYMBOL_REF
3598 && (SYMBOL_REF_DECL (addr)
3599 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3600 : !SYMBOL_REF_LOCAL_P (addr)))
3602 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3603 crtl->uses_pic_offset_table = 1;
3607 /* Return 1 if RTX is a MEM which is known to be aligned to at
3608 least a DESIRED byte boundary. */
3611 mem_min_alignment (rtx mem, int desired)
3613 rtx addr, base, offset;
3615 /* If it's not a MEM we can't accept it. */
3616 if (GET_CODE (mem) != MEM)
3620 if (!TARGET_UNALIGNED_DOUBLES
3621 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3624 /* ??? The rest of the function predates MEM_ALIGN so
3625 there is probably a bit of redundancy. */
3626 addr = XEXP (mem, 0);
3627 base = offset = NULL_RTX;
3628 if (GET_CODE (addr) == PLUS)
3630 if (GET_CODE (XEXP (addr, 0)) == REG)
3632 base = XEXP (addr, 0);
3634 /* What we are saying here is that if the base
3635 REG is aligned properly, the compiler will make
3636 sure any REG based index upon it will be so
3638 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3639 offset = XEXP (addr, 1);
3641 offset = const0_rtx;
3644 else if (GET_CODE (addr) == REG)
3647 offset = const0_rtx;
3650 if (base != NULL_RTX)
3652 int regno = REGNO (base);
3654 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3656 /* Check if the compiler has recorded some information
3657 about the alignment of the base REG. If reload has
3658 completed, we already matched with proper alignments.
3659 If not running global_alloc, reload might give us
3660 unaligned pointer to local stack though. */
3662 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3663 || (optimize && reload_completed))
3664 && (INTVAL (offset) & (desired - 1)) == 0)
3669 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3673 else if (! TARGET_UNALIGNED_DOUBLES
3674 || CONSTANT_P (addr)
3675 || GET_CODE (addr) == LO_SUM)
3677 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3678 is true, in which case we can only assume that an access is aligned if
3679 it is to a constant address, or the address involves a LO_SUM. */
3683 /* An obviously unaligned address. */
3688 /* Vectors to keep interesting information about registers where it can easily
3689 be got. We used to use the actual mode value as the bit number, but there
3690 are more than 32 modes now. Instead we use two tables: one indexed by
3691 hard register number, and one indexed by mode. */
3693 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3694 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3695 mapped into one sparc_mode_class mode. */
3697 enum sparc_mode_class {
3698 S_MODE, D_MODE, T_MODE, O_MODE,
3699 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3703 /* Modes for single-word and smaller quantities. */
3704 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3706 /* Modes for double-word and smaller quantities. */
3707 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3709 /* Modes for quad-word and smaller quantities. */
3710 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3712 /* Modes for 8-word and smaller quantities. */
3713 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3715 /* Modes for single-float quantities. We must allow any single word or
3716 smaller quantity. This is because the fix/float conversion instructions
3717 take integer inputs/outputs from the float registers. */
3718 #define SF_MODES (S_MODES)
3720 /* Modes for double-float and smaller quantities. */
3721 #define DF_MODES (D_MODES)
3723 /* Modes for quad-float and smaller quantities. */
3724 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
3726 /* Modes for quad-float pairs and smaller quantities. */
3727 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
3729 /* Modes for double-float only quantities. */
3730 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3732 /* Modes for quad-float and double-float only quantities. */
3733 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
3735 /* Modes for quad-float pairs and double-float only quantities. */
3736 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
3738 /* Modes for condition codes. */
3739 #define CC_MODES (1 << (int) CC_MODE)
3740 #define CCFP_MODES (1 << (int) CCFP_MODE)
3742 /* Value is 1 if register/mode pair is acceptable on sparc.
3743 The funny mixture of D and T modes is because integer operations
3744 do not specially operate on tetra quantities, so non-quad-aligned
3745 registers can hold quadword quantities (except %o4 and %i4 because
3746 they cross fixed registers). */
3748 /* This points to either the 32 bit or the 64 bit version. */
3749 const int *hard_regno_mode_classes;
3751 static const int hard_32bit_mode_classes[] = {
3752 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3753 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3754 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3755 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3757 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3758 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3759 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3760 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3762 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3763 and none can hold SFmode/SImode values. */
3764 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3765 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3766 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3767 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3770 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3776 static const int hard_64bit_mode_classes[] = {
3777 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3778 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3779 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3780 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3782 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3783 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3784 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3785 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3787 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3788 and none can hold SFmode/SImode values. */
3789 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3790 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3791 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3792 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3795 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3801 int sparc_mode_class [NUM_MACHINE_MODES];
3803 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3806 sparc_init_modes (void)
3810 for (i = 0; i < NUM_MACHINE_MODES; i++)
3812 switch (GET_MODE_CLASS (i))
3815 case MODE_PARTIAL_INT:
3816 case MODE_COMPLEX_INT:
3817 if (GET_MODE_SIZE (i) <= 4)
3818 sparc_mode_class[i] = 1 << (int) S_MODE;
3819 else if (GET_MODE_SIZE (i) == 8)
3820 sparc_mode_class[i] = 1 << (int) D_MODE;
3821 else if (GET_MODE_SIZE (i) == 16)
3822 sparc_mode_class[i] = 1 << (int) T_MODE;
3823 else if (GET_MODE_SIZE (i) == 32)
3824 sparc_mode_class[i] = 1 << (int) O_MODE;
3826 sparc_mode_class[i] = 0;
3828 case MODE_VECTOR_INT:
3829 if (GET_MODE_SIZE (i) <= 4)
3830 sparc_mode_class[i] = 1 << (int)SF_MODE;
3831 else if (GET_MODE_SIZE (i) == 8)
3832 sparc_mode_class[i] = 1 << (int)DF_MODE;
3835 case MODE_COMPLEX_FLOAT:
3836 if (GET_MODE_SIZE (i) <= 4)
3837 sparc_mode_class[i] = 1 << (int) SF_MODE;
3838 else if (GET_MODE_SIZE (i) == 8)
3839 sparc_mode_class[i] = 1 << (int) DF_MODE;
3840 else if (GET_MODE_SIZE (i) == 16)
3841 sparc_mode_class[i] = 1 << (int) TF_MODE;
3842 else if (GET_MODE_SIZE (i) == 32)
3843 sparc_mode_class[i] = 1 << (int) OF_MODE;
3845 sparc_mode_class[i] = 0;
3848 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3849 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3851 sparc_mode_class[i] = 1 << (int) CC_MODE;
3854 sparc_mode_class[i] = 0;
3860 hard_regno_mode_classes = hard_64bit_mode_classes;
3862 hard_regno_mode_classes = hard_32bit_mode_classes;
3864 /* Initialize the array used by REGNO_REG_CLASS. */
3865 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3867 if (i < 16 && TARGET_V8PLUS)
3868 sparc_regno_reg_class[i] = I64_REGS;
3869 else if (i < 32 || i == FRAME_POINTER_REGNUM)
3870 sparc_regno_reg_class[i] = GENERAL_REGS;
3872 sparc_regno_reg_class[i] = FP_REGS;
3874 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3876 sparc_regno_reg_class[i] = FPCC_REGS;
3878 sparc_regno_reg_class[i] = NO_REGS;
3882 /* Compute the frame size required by the function. This function is called
3883 during the reload pass and also by sparc_expand_prologue. */
3886 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3888 int outgoing_args_size = (crtl->outgoing_args_size
3889 + REG_PARM_STACK_SPACE (current_function_decl));
3890 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
3895 for (i = 0; i < 8; i++)
3896 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3901 for (i = 0; i < 8; i += 2)
3902 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3903 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3907 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3908 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3909 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3912 /* Set up values for use in prologue and epilogue. */
3913 num_gfregs = n_regs;
3918 && crtl->outgoing_args_size == 0)
3919 actual_fsize = apparent_fsize = 0;
3922 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
3923 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3924 apparent_fsize += n_regs * 4;
3925 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3928 /* Make sure nothing can clobber our register windows.
3929 If a SAVE must be done, or there is a stack-local variable,
3930 the register window area must be allocated. */
3931 if (! leaf_function_p || size > 0)
3932 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3934 return SPARC_STACK_ALIGN (actual_fsize);
3937 /* Output any necessary .register pseudo-ops. */
3940 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3942 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3948 /* Check if %g[2367] were used without
3949 .register being printed for them already. */
3950 for (i = 2; i < 8; i++)
3952 if (df_regs_ever_live_p (i)
3953 && ! sparc_hard_reg_printed [i])
3955 sparc_hard_reg_printed [i] = 1;
3956 /* %g7 is used as TLS base register, use #ignore
3957 for it instead of #scratch. */
3958 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
3959 i == 7 ? "ignore" : "scratch");
3966 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3967 as needed. LOW should be double-word aligned for 32-bit registers.
3968 Return the new OFFSET. */
3971 #define SORR_RESTORE 1
3974 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3979 if (TARGET_ARCH64 && high <= 32)
3981 for (i = low; i < high; i++)
3983 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3985 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
3986 set_mem_alias_set (mem, sparc_sr_alias_set);
3987 if (action == SORR_SAVE)
3989 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
3990 RTX_FRAME_RELATED_P (insn) = 1;
3992 else /* action == SORR_RESTORE */
3993 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4000 for (i = low; i < high; i += 2)
4002 bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i];
4003 bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1];
4004 enum machine_mode mode;
4009 mode = i < 32 ? DImode : DFmode;
4014 mode = i < 32 ? SImode : SFmode;
4019 mode = i < 32 ? SImode : SFmode;
4026 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
4027 set_mem_alias_set (mem, sparc_sr_alias_set);
4028 if (action == SORR_SAVE)
4030 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4031 RTX_FRAME_RELATED_P (insn) = 1;
4033 else /* action == SORR_RESTORE */
4034 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4036 /* Always preserve double-word alignment. */
4037 offset = (offset + 7) & -8;
4044 /* Emit code to save call-saved registers. */
4047 emit_save_or_restore_regs (int action)
4049 HOST_WIDE_INT offset;
4052 offset = frame_base_offset - apparent_fsize;
4054 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
4056 /* ??? This might be optimized a little as %g1 might already have a
4057 value close enough that a single add insn will do. */
4058 /* ??? Although, all of this is probably only a temporary fix
4059 because if %g1 can hold a function result, then
4060 sparc_expand_epilogue will lose (the result will be
4062 base = gen_rtx_REG (Pmode, 1);
4063 emit_move_insn (base, GEN_INT (offset));
4064 emit_insn (gen_rtx_SET (VOIDmode,
4066 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
4070 base = frame_base_reg;
4072 offset = save_or_restore_regs (0, 8, base, offset, action);
4073 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
4076 /* Generate a save_register_window insn. */
4079 gen_save_register_window (rtx increment)
4082 return gen_save_register_windowdi (increment);
4084 return gen_save_register_windowsi (increment);
4087 /* Generate an increment for the stack pointer. */
4090 gen_stack_pointer_inc (rtx increment)
4092 return gen_rtx_SET (VOIDmode,
4094 gen_rtx_PLUS (Pmode,
4099 /* Generate a decrement for the stack pointer. */
4102 gen_stack_pointer_dec (rtx decrement)
4104 return gen_rtx_SET (VOIDmode,
4106 gen_rtx_MINUS (Pmode,
4111 /* Expand the function prologue. The prologue is responsible for reserving
4112 storage for the frame, saving the call-saved registers and loading the
4113 PIC register if needed. */
4116 sparc_expand_prologue (void)
4121 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4122 on the final value of the flag means deferring the prologue/epilogue
4123 expansion until just before the second scheduling pass, which is too
4124 late to emit multiple epilogues or return insns.
4126 Of course we are making the assumption that the value of the flag
4127 will not change between now and its final value. Of the three parts
4128 of the formula, only the last one can reasonably vary. Let's take a
4129 closer look, after assuming that the first two ones are set to true
4130 (otherwise the last value is effectively silenced).
4132 If only_leaf_regs_used returns false, the global predicate will also
4133 be false so the actual frame size calculated below will be positive.
4134 As a consequence, the save_register_window insn will be emitted in
4135 the instruction stream; now this insn explicitly references %fp
4136 which is not a leaf register so only_leaf_regs_used will always
4137 return false subsequently.
4139 If only_leaf_regs_used returns true, we hope that the subsequent
4140 optimization passes won't cause non-leaf registers to pop up. For
4141 example, the regrename pass has special provisions to not rename to
4142 non-leaf registers in a leaf function. */
4143 sparc_leaf_function_p
4144 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
4146 /* Need to use actual_fsize, since we are also allocating
4147 space for our callee (and our own register save area). */
4149 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4151 /* Advertise that the data calculated just above are now valid. */
4152 sparc_prologue_data_valid_p = true;
4154 if (sparc_leaf_function_p)
4156 frame_base_reg = stack_pointer_rtx;
4157 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4161 frame_base_reg = hard_frame_pointer_rtx;
4162 frame_base_offset = SPARC_STACK_BIAS;
4165 if (actual_fsize == 0)
4167 else if (sparc_leaf_function_p)
4169 if (actual_fsize <= 4096)
4170 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4171 else if (actual_fsize <= 8192)
4173 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4174 /* %sp is still the CFA register. */
4175 RTX_FRAME_RELATED_P (insn) = 1;
4177 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4181 rtx reg = gen_rtx_REG (Pmode, 1);
4182 emit_move_insn (reg, GEN_INT (-actual_fsize));
4183 insn = emit_insn (gen_stack_pointer_inc (reg));
4184 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4185 gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4188 RTX_FRAME_RELATED_P (insn) = 1;
4192 if (actual_fsize <= 4096)
4193 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4194 else if (actual_fsize <= 8192)
4196 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
4197 /* %sp is not the CFA register anymore. */
4198 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4202 rtx reg = gen_rtx_REG (Pmode, 1);
4203 emit_move_insn (reg, GEN_INT (-actual_fsize));
4204 insn = emit_insn (gen_save_register_window (reg));
4207 RTX_FRAME_RELATED_P (insn) = 1;
4208 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4209 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4213 emit_save_or_restore_regs (SORR_SAVE);
4215 /* Load the PIC register if needed. */
4216 if (flag_pic && crtl->uses_pic_offset_table)
4217 load_pic_register (false);
4220 /* This function generates the assembly code for function entry, which boils
4221 down to emitting the necessary .register directives. */
4224 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4226 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4227 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4229 sparc_output_scratch_registers (file);
4232 /* Expand the function epilogue, either normal or part of a sibcall.
4233 We emit all the instructions except the return or the call. */
4236 sparc_expand_epilogue (void)
4239 emit_save_or_restore_regs (SORR_RESTORE);
4241 if (actual_fsize == 0)
4243 else if (sparc_leaf_function_p)
4245 if (actual_fsize <= 4096)
4246 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4247 else if (actual_fsize <= 8192)
4249 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4250 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4254 rtx reg = gen_rtx_REG (Pmode, 1);
4255 emit_move_insn (reg, GEN_INT (-actual_fsize));
4256 emit_insn (gen_stack_pointer_dec (reg));
4261 /* Return true if it is appropriate to emit `return' instructions in the
4262 body of a function. */
4265 sparc_can_use_return_insn_p (void)
4267 return sparc_prologue_data_valid_p
4268 && (actual_fsize == 0 || !sparc_leaf_function_p);
4271 /* This function generates the assembly code for function exit. */
4274 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4276 /* If code does not drop into the epilogue, we have to still output
4277 a dummy nop for the sake of sane backtraces. Otherwise, if the
4278 last two instructions of a function were "call foo; dslot;" this
4279 can make the return PC of foo (i.e. address of call instruction
4280 plus 8) point to the first instruction in the next function. */
4282 rtx insn, last_real_insn;
4284 insn = get_last_insn ();
4286 last_real_insn = prev_real_insn (insn);
4288 && GET_CODE (last_real_insn) == INSN
4289 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4290 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4292 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4293 fputs("\tnop\n", file);
4295 sparc_output_deferred_case_vectors ();
4298 /* Output a 'restore' instruction. */
4301 output_restore (rtx pat)
4307 fputs ("\t restore\n", asm_out_file);
4311 gcc_assert (GET_CODE (pat) == SET);
4313 operands[0] = SET_DEST (pat);
4314 pat = SET_SRC (pat);
4316 switch (GET_CODE (pat))
4319 operands[1] = XEXP (pat, 0);
4320 operands[2] = XEXP (pat, 1);
4321 output_asm_insn (" restore %r1, %2, %Y0", operands);
4324 operands[1] = XEXP (pat, 0);
4325 operands[2] = XEXP (pat, 1);
4326 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4329 operands[1] = XEXP (pat, 0);
4330 gcc_assert (XEXP (pat, 1) == const1_rtx);
4331 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4335 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4340 /* Output a return. */
4343 output_return (rtx insn)
4345 if (sparc_leaf_function_p)
4347 /* This is a leaf function so we don't have to bother restoring the
4348 register window, which frees us from dealing with the convoluted
4349 semantics of restore/return. We simply output the jump to the
4350 return address and the insn in the delay slot (if any). */
4352 gcc_assert (! crtl->calls_eh_return);
4354 return "jmp\t%%o7+%)%#";
4358 /* This is a regular function so we have to restore the register window.
4359 We may have a pending insn for the delay slot, which will be either
4360 combined with the 'restore' instruction or put in the delay slot of
4361 the 'return' instruction. */
4363 if (crtl->calls_eh_return)
4365 /* If the function uses __builtin_eh_return, the eh_return
4366 machinery occupies the delay slot. */
4367 gcc_assert (! final_sequence);
4369 if (! flag_delayed_branch)
4370 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4373 fputs ("\treturn\t%i7+8\n", asm_out_file);
4375 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4377 if (flag_delayed_branch)
4378 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4380 fputs ("\t nop\n", asm_out_file);
4382 else if (final_sequence)
4386 delay = NEXT_INSN (insn);
4389 pat = PATTERN (delay);
4391 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4393 epilogue_renumber (&pat, 0);
4394 return "return\t%%i7+%)%#";
4398 output_asm_insn ("jmp\t%%i7+%)", NULL);
4399 output_restore (pat);
4400 PATTERN (delay) = gen_blockage ();
4401 INSN_CODE (delay) = -1;
4406 /* The delay slot is empty. */
4408 return "return\t%%i7+%)\n\t nop";
4409 else if (flag_delayed_branch)
4410 return "jmp\t%%i7+%)\n\t restore";
4412 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4419 /* Output a sibling call. */
4422 output_sibcall (rtx insn, rtx call_operand)
4426 gcc_assert (flag_delayed_branch);
4428 operands[0] = call_operand;
4430 if (sparc_leaf_function_p)
4432 /* This is a leaf function so we don't have to bother restoring the
4433 register window. We simply output the jump to the function and
4434 the insn in the delay slot (if any). */
4436 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4439 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4442 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4443 it into branch if possible. */
4444 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4449 /* This is a regular function so we have to restore the register window.
4450 We may have a pending insn for the delay slot, which will be combined
4451 with the 'restore' instruction. */
4453 output_asm_insn ("call\t%a0, 0", operands);
4457 rtx delay = NEXT_INSN (insn);
4460 output_restore (PATTERN (delay));
4462 PATTERN (delay) = gen_blockage ();
4463 INSN_CODE (delay) = -1;
4466 output_restore (NULL_RTX);
4472 /* Functions for handling argument passing.
4474 For 32-bit, the first 6 args are normally in registers and the rest are
4475 pushed. Any arg that starts within the first 6 words is at least
4476 partially passed in a register unless its data type forbids.
4478 For 64-bit, the argument registers are laid out as an array of 16 elements
4479 and arguments are added sequentially. The first 6 int args and up to the
4480 first 16 fp args (depending on size) are passed in regs.
4482 Slot Stack Integral Float Float in structure Double Long Double
4483 ---- ----- -------- ----- ------------------ ------ -----------
4484 15 [SP+248] %f31 %f30,%f31 %d30
4485 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4486 13 [SP+232] %f27 %f26,%f27 %d26
4487 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4488 11 [SP+216] %f23 %f22,%f23 %d22
4489 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4490 9 [SP+200] %f19 %f18,%f19 %d18
4491 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4492 7 [SP+184] %f15 %f14,%f15 %d14
4493 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4494 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4495 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4496 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4497 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4498 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4499 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4501 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4503 Integral arguments are always passed as 64-bit quantities appropriately
4506 Passing of floating point values is handled as follows.
4507 If a prototype is in scope:
4508 If the value is in a named argument (i.e. not a stdarg function or a
4509 value not part of the `...') then the value is passed in the appropriate
4511 If the value is part of the `...' and is passed in one of the first 6
4512 slots then the value is passed in the appropriate int reg.
4513 If the value is part of the `...' and is not passed in one of the first 6
4514 slots then the value is passed in memory.
4515 If a prototype is not in scope:
4516 If the value is one of the first 6 arguments the value is passed in the
4517 appropriate integer reg and the appropriate fp reg.
4518 If the value is not one of the first 6 arguments the value is passed in
4519 the appropriate fp reg and in memory.
4522 Summary of the calling conventions implemented by GCC on the SPARC:
4525 size argument return value
4527 small integer <4 int. reg. int. reg.
4528 word 4 int. reg. int. reg.
4529 double word 8 int. reg. int. reg.
4531 _Complex small integer <8 int. reg. int. reg.
4532 _Complex word 8 int. reg. int. reg.
4533 _Complex double word 16 memory int. reg.
4535 vector integer <=8 int. reg. FP reg.
4536 vector integer >8 memory memory
4538 float 4 int. reg. FP reg.
4539 double 8 int. reg. FP reg.
4540 long double 16 memory memory
4542 _Complex float 8 memory FP reg.
4543 _Complex double 16 memory FP reg.
4544 _Complex long double 32 memory FP reg.
4546 vector float any memory memory
4548 aggregate any memory memory
4553 size argument return value
4555 small integer <8 int. reg. int. reg.
4556 word 8 int. reg. int. reg.
4557 double word 16 int. reg. int. reg.
4559 _Complex small integer <16 int. reg. int. reg.
4560 _Complex word 16 int. reg. int. reg.
4561 _Complex double word 32 memory int. reg.
4563 vector integer <=16 FP reg. FP reg.
4564 vector integer 16<s<=32 memory FP reg.
4565 vector integer >32 memory memory
4567 float 4 FP reg. FP reg.
4568 double 8 FP reg. FP reg.
4569 long double 16 FP reg. FP reg.
4571 _Complex float 8 FP reg. FP reg.
4572 _Complex double 16 FP reg. FP reg.
4573 _Complex long double 32 memory FP reg.
4575 vector float <=16 FP reg. FP reg.
4576 vector float 16<s<=32 memory FP reg.
4577 vector float >32 memory memory
4579 aggregate <=16 reg. reg.
4580 aggregate 16<s<=32 memory reg.
4581 aggregate >32 memory memory
4585 Note #1: complex floating-point types follow the extended SPARC ABIs as
4586 implemented by the Sun compiler.
4588 Note #2: integral vector types follow the scalar floating-point types
4589 conventions to match what is implemented by the Sun VIS SDK.
4591 Note #3: floating-point vector types follow the aggregate types
4595 /* Maximum number of int regs for args. */
4596 #define SPARC_INT_ARG_MAX 6
4597 /* Maximum number of fp regs for args. */
4598 #define SPARC_FP_ARG_MAX 16
4600 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4602 /* Handle the INIT_CUMULATIVE_ARGS macro.
4603 Initialize a variable CUM of type CUMULATIVE_ARGS
4604 for a call to a function whose data type is FNTYPE.
4605 For a library call, FNTYPE is 0. */
4608 init_cumulative_args (struct sparc_args *cum, tree fntype,
4609 rtx libname ATTRIBUTE_UNUSED,
4610 tree fndecl ATTRIBUTE_UNUSED)
4613 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4614 cum->libcall_p = fntype == 0;
4617 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4618 When a prototype says `char' or `short', really pass an `int'. */
4621 sparc_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED)
4623 return TARGET_ARCH32 ? true : false;
4626 /* Handle promotion of pointer and integer arguments. */
4628 static enum machine_mode
4629 sparc_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
4630 enum machine_mode mode,
4631 int *punsignedp ATTRIBUTE_UNUSED,
4632 const_tree fntype ATTRIBUTE_UNUSED,
4633 int for_return ATTRIBUTE_UNUSED)
4635 if (POINTER_TYPE_P (type))
4637 *punsignedp = POINTERS_EXTEND_UNSIGNED;
4641 /* For TARGET_ARCH64 we need this, as we don't have instructions
4642 for arithmetic operations which do zero/sign extension at the same time,
4643 so without this we end up with a srl/sra after every assignment to an
4644 user variable, which means very very bad code. */
4646 && GET_MODE_CLASS (mode) == MODE_INT
4647 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4653 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4656 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4658 return TARGET_ARCH64 ? true : false;
4661 /* Scan the record type TYPE and return the following predicates:
4662 - INTREGS_P: the record contains at least one field or sub-field
4663 that is eligible for promotion in integer registers.
4664 - FP_REGS_P: the record contains at least one field or sub-field
4665 that is eligible for promotion in floating-point registers.
4666 - PACKED_P: the record contains at least one field that is packed.
4668 Sub-fields are not taken into account for the PACKED_P predicate. */
4671 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4675 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4677 if (TREE_CODE (field) == FIELD_DECL)
4679 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4680 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4681 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4682 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4688 if (packed_p && DECL_PACKED (field))
4694 /* Compute the slot number to pass an argument in.
4695 Return the slot number or -1 if passing on the stack.
4697 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4698 the preceding args and about the function being called.
4699 MODE is the argument's machine mode.
4700 TYPE is the data type of the argument (as a tree).
4701 This is null for libcalls where that information may
4703 NAMED is nonzero if this argument is a named parameter
4704 (otherwise it is an extra parameter matching an ellipsis).
4705 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4706 *PREGNO records the register number to use if scalar type.
4707 *PPADDING records the amount of padding needed in words. */
4710 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4711 tree type, int named, int incoming_p,
4712 int *pregno, int *ppadding)
4714 int regbase = (incoming_p
4715 ? SPARC_INCOMING_INT_ARG_FIRST
4716 : SPARC_OUTGOING_INT_ARG_FIRST);
4717 int slotno = cum->words;
4718 enum mode_class mclass;
4723 if (type && TREE_ADDRESSABLE (type))
4729 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4732 /* For SPARC64, objects requiring 16-byte alignment get it. */
4734 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4735 && (slotno & 1) != 0)
4736 slotno++, *ppadding = 1;
4738 mclass = GET_MODE_CLASS (mode);
4739 if (type && TREE_CODE (type) == VECTOR_TYPE)
4741 /* Vector types deserve special treatment because they are
4742 polymorphic wrt their mode, depending upon whether VIS
4743 instructions are enabled. */
4744 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4746 /* The SPARC port defines no floating-point vector modes. */
4747 gcc_assert (mode == BLKmode);
4751 /* Integral vector types should either have a vector
4752 mode or an integral mode, because we are guaranteed
4753 by pass_by_reference that their size is not greater
4754 than 16 bytes and TImode is 16-byte wide. */
4755 gcc_assert (mode != BLKmode);
4757 /* Vector integers are handled like floats according to
4759 mclass = MODE_FLOAT;
4766 case MODE_COMPLEX_FLOAT:
4767 case MODE_VECTOR_INT:
4768 if (TARGET_ARCH64 && TARGET_FPU && named)
4770 if (slotno >= SPARC_FP_ARG_MAX)
4772 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4773 /* Arguments filling only one single FP register are
4774 right-justified in the outer double FP register. */
4775 if (GET_MODE_SIZE (mode) <= 4)
4782 case MODE_COMPLEX_INT:
4783 if (slotno >= SPARC_INT_ARG_MAX)
4785 regno = regbase + slotno;
4789 if (mode == VOIDmode)
4790 /* MODE is VOIDmode when generating the actual call. */
4793 gcc_assert (mode == BLKmode);
4797 || (TREE_CODE (type) != VECTOR_TYPE
4798 && TREE_CODE (type) != RECORD_TYPE))
4800 if (slotno >= SPARC_INT_ARG_MAX)
4802 regno = regbase + slotno;
4804 else /* TARGET_ARCH64 && type */
4806 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4808 /* First see what kinds of registers we would need. */
4809 if (TREE_CODE (type) == VECTOR_TYPE)
4812 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4814 /* The ABI obviously doesn't specify how packed structures
4815 are passed. These are defined to be passed in int regs
4816 if possible, otherwise memory. */
4817 if (packed_p || !named)
4818 fpregs_p = 0, intregs_p = 1;
4820 /* If all arg slots are filled, then must pass on stack. */
4821 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4824 /* If there are only int args and all int arg slots are filled,
4825 then must pass on stack. */
4826 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4829 /* Note that even if all int arg slots are filled, fp members may
4830 still be passed in regs if such regs are available.
4831 *PREGNO isn't set because there may be more than one, it's up
4832 to the caller to compute them. */
4845 /* Handle recursive register counting for structure field layout. */
4847 struct function_arg_record_value_parms
4849 rtx ret; /* return expression being built. */
4850 int slotno; /* slot number of the argument. */
4851 int named; /* whether the argument is named. */
4852 int regbase; /* regno of the base register. */
4853 int stack; /* 1 if part of the argument is on the stack. */
4854 int intoffset; /* offset of the first pending integer field. */
4855 unsigned int nregs; /* number of words passed in registers. */
4858 static void function_arg_record_value_3
4859 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4860 static void function_arg_record_value_2
4861 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4862 static void function_arg_record_value_1
4863 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4864 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
4865 static rtx function_arg_union_value (int, enum machine_mode, int, int);
4867 /* A subroutine of function_arg_record_value. Traverse the structure
4868 recursively and determine how many registers will be required. */
4871 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
4872 struct function_arg_record_value_parms *parms,
4877 /* We need to compute how many registers are needed so we can
4878 allocate the PARALLEL but before we can do that we need to know
4879 whether there are any packed fields. The ABI obviously doesn't
4880 specify how structures are passed in this case, so they are
4881 defined to be passed in int regs if possible, otherwise memory,
4882 regardless of whether there are fp values present. */
4885 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4887 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4894 /* Compute how many registers we need. */
4895 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4897 if (TREE_CODE (field) == FIELD_DECL)
4899 HOST_WIDE_INT bitpos = startbitpos;
4901 if (DECL_SIZE (field) != 0)
4903 if (integer_zerop (DECL_SIZE (field)))
4906 if (host_integerp (bit_position (field), 1))
4907 bitpos += int_bit_position (field);
4910 /* ??? FIXME: else assume zero offset. */
4912 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4913 function_arg_record_value_1 (TREE_TYPE (field),
4917 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4918 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4923 if (parms->intoffset != -1)
4925 unsigned int startbit, endbit;
4926 int intslots, this_slotno;
4928 startbit = parms->intoffset & -BITS_PER_WORD;
4929 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4931 intslots = (endbit - startbit) / BITS_PER_WORD;
4932 this_slotno = parms->slotno + parms->intoffset
4935 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4937 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4938 /* We need to pass this field on the stack. */
4942 parms->nregs += intslots;
4943 parms->intoffset = -1;
4946 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4947 If it wasn't true we wouldn't be here. */
4948 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4949 && DECL_MODE (field) == BLKmode)
4950 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4951 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4958 if (parms->intoffset == -1)
4959 parms->intoffset = bitpos;
4965 /* A subroutine of function_arg_record_value. Assign the bits of the
4966 structure between parms->intoffset and bitpos to integer registers. */
4969 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4970 struct function_arg_record_value_parms *parms)
4972 enum machine_mode mode;
4974 unsigned int startbit, endbit;
4975 int this_slotno, intslots, intoffset;
4978 if (parms->intoffset == -1)
4981 intoffset = parms->intoffset;
4982 parms->intoffset = -1;
4984 startbit = intoffset & -BITS_PER_WORD;
4985 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4986 intslots = (endbit - startbit) / BITS_PER_WORD;
4987 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
4989 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4993 /* If this is the trailing part of a word, only load that much into
4994 the register. Otherwise load the whole register. Note that in
4995 the latter case we may pick up unwanted bits. It's not a problem
4996 at the moment but may wish to revisit. */
4998 if (intoffset % BITS_PER_WORD != 0)
4999 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5004 intoffset /= BITS_PER_UNIT;
5007 regno = parms->regbase + this_slotno;
5008 reg = gen_rtx_REG (mode, regno);
5009 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5010 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5013 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5018 while (intslots > 0);
5021 /* A subroutine of function_arg_record_value. Traverse the structure
5022 recursively and assign bits to floating point registers. Track which
5023 bits in between need integer registers; invoke function_arg_record_value_3
5024 to make that happen. */
5027 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
5028 struct function_arg_record_value_parms *parms,
5034 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5036 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5043 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5045 if (TREE_CODE (field) == FIELD_DECL)
5047 HOST_WIDE_INT bitpos = startbitpos;
5049 if (DECL_SIZE (field) != 0)
5051 if (integer_zerop (DECL_SIZE (field)))
5054 if (host_integerp (bit_position (field), 1))
5055 bitpos += int_bit_position (field);
5058 /* ??? FIXME: else assume zero offset. */
5060 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5061 function_arg_record_value_2 (TREE_TYPE (field),
5065 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5066 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5071 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5072 int regno, nregs, pos;
5073 enum machine_mode mode = DECL_MODE (field);
5076 function_arg_record_value_3 (bitpos, parms);
5078 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5081 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5082 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5084 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5086 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5092 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5093 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5095 reg = gen_rtx_REG (mode, regno);
5096 pos = bitpos / BITS_PER_UNIT;
5097 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5098 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5102 regno += GET_MODE_SIZE (mode) / 4;
5103 reg = gen_rtx_REG (mode, regno);
5104 pos += GET_MODE_SIZE (mode);
5105 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5106 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5112 if (parms->intoffset == -1)
5113 parms->intoffset = bitpos;
5119 /* Used by function_arg and function_value to implement the complex
5120 conventions of the 64-bit ABI for passing and returning structures.
5121 Return an expression valid as a return value for the two macros
5122 FUNCTION_ARG and FUNCTION_VALUE.
5124 TYPE is the data type of the argument (as a tree).
5125 This is null for libcalls where that information may
5127 MODE is the argument's machine mode.
5128 SLOTNO is the index number of the argument's slot in the parameter array.
5129 NAMED is nonzero if this argument is a named parameter
5130 (otherwise it is an extra parameter matching an ellipsis).
5131 REGBASE is the regno of the base register for the parameter array. */
5134 function_arg_record_value (const_tree type, enum machine_mode mode,
5135 int slotno, int named, int regbase)
5137 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5138 struct function_arg_record_value_parms parms;
5141 parms.ret = NULL_RTX;
5142 parms.slotno = slotno;
5143 parms.named = named;
5144 parms.regbase = regbase;
5147 /* Compute how many registers we need. */
5149 parms.intoffset = 0;
5150 function_arg_record_value_1 (type, 0, &parms, false);
5152 /* Take into account pending integer fields. */
5153 if (parms.intoffset != -1)
5155 unsigned int startbit, endbit;
5156 int intslots, this_slotno;
5158 startbit = parms.intoffset & -BITS_PER_WORD;
5159 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5160 intslots = (endbit - startbit) / BITS_PER_WORD;
5161 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5163 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5165 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5166 /* We need to pass this field on the stack. */
5170 parms.nregs += intslots;
5172 nregs = parms.nregs;
5174 /* Allocate the vector and handle some annoying special cases. */
5177 /* ??? Empty structure has no value? Duh? */
5180 /* Though there's nothing really to store, return a word register
5181 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5182 leads to breakage due to the fact that there are zero bytes to
5184 return gen_rtx_REG (mode, regbase);
5188 /* ??? C++ has structures with no fields, and yet a size. Give up
5189 for now and pass everything back in integer registers. */
5190 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5192 if (nregs + slotno > SPARC_INT_ARG_MAX)
5193 nregs = SPARC_INT_ARG_MAX - slotno;
5195 gcc_assert (nregs != 0);
5197 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5199 /* If at least one field must be passed on the stack, generate
5200 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5201 also be passed on the stack. We can't do much better because the
5202 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5203 of structures for which the fields passed exclusively in registers
5204 are not at the beginning of the structure. */
5206 XVECEXP (parms.ret, 0, 0)
5207 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5209 /* Fill in the entries. */
5211 parms.intoffset = 0;
5212 function_arg_record_value_2 (type, 0, &parms, false);
5213 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5215 gcc_assert (parms.nregs == nregs);
5220 /* Used by function_arg and function_value to implement the conventions
5221 of the 64-bit ABI for passing and returning unions.
5222 Return an expression valid as a return value for the two macros
5223 FUNCTION_ARG and FUNCTION_VALUE.
5225 SIZE is the size in bytes of the union.
5226 MODE is the argument's machine mode.
5227 REGNO is the hard register the union will be passed in. */
5230 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5233 int nwords = ROUND_ADVANCE (size), i;
5236 /* See comment in previous function for empty structures. */
5238 return gen_rtx_REG (mode, regno);
5240 if (slotno == SPARC_INT_ARG_MAX - 1)
5243 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5245 for (i = 0; i < nwords; i++)
5247 /* Unions are passed left-justified. */
5248 XVECEXP (regs, 0, i)
5249 = gen_rtx_EXPR_LIST (VOIDmode,
5250 gen_rtx_REG (word_mode, regno),
5251 GEN_INT (UNITS_PER_WORD * i));
5258 /* Used by function_arg and function_value to implement the conventions
5259 for passing and returning large (BLKmode) vectors.
5260 Return an expression valid as a return value for the two macros
5261 FUNCTION_ARG and FUNCTION_VALUE.
5263 SIZE is the size in bytes of the vector (at least 8 bytes).
5264 REGNO is the FP hard register the vector will be passed in. */
5267 function_arg_vector_value (int size, int regno)
5269 int i, nregs = size / 8;
5272 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5274 for (i = 0; i < nregs; i++)
5276 XVECEXP (regs, 0, i)
5277 = gen_rtx_EXPR_LIST (VOIDmode,
5278 gen_rtx_REG (DImode, regno + 2*i),
5285 /* Handle the FUNCTION_ARG macro.
5286 Determine where to put an argument to a function.
5287 Value is zero to push the argument on the stack,
5288 or a hard register in which to store the argument.
5290 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5291 the preceding args and about the function being called.
5292 MODE is the argument's machine mode.
5293 TYPE is the data type of the argument (as a tree).
5294 This is null for libcalls where that information may
5296 NAMED is nonzero if this argument is a named parameter
5297 (otherwise it is an extra parameter matching an ellipsis).
5298 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5301 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5302 tree type, int named, int incoming_p)
5304 int regbase = (incoming_p
5305 ? SPARC_INCOMING_INT_ARG_FIRST
5306 : SPARC_OUTGOING_INT_ARG_FIRST);
5307 int slotno, regno, padding;
5308 enum mode_class mclass = GET_MODE_CLASS (mode);
5310 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5315 /* Vector types deserve special treatment because they are polymorphic wrt
5316 their mode, depending upon whether VIS instructions are enabled. */
5317 if (type && TREE_CODE (type) == VECTOR_TYPE)
5319 HOST_WIDE_INT size = int_size_in_bytes (type);
5320 gcc_assert ((TARGET_ARCH32 && size <= 8)
5321 || (TARGET_ARCH64 && size <= 16));
5323 if (mode == BLKmode)
5324 return function_arg_vector_value (size,
5325 SPARC_FP_ARG_FIRST + 2*slotno);
5327 mclass = MODE_FLOAT;
5331 return gen_rtx_REG (mode, regno);
5333 /* Structures up to 16 bytes in size are passed in arg slots on the stack
5334 and are promoted to registers if possible. */
5335 if (type && TREE_CODE (type) == RECORD_TYPE)
5337 HOST_WIDE_INT size = int_size_in_bytes (type);
5338 gcc_assert (size <= 16);
5340 return function_arg_record_value (type, mode, slotno, named, regbase);
5343 /* Unions up to 16 bytes in size are passed in integer registers. */
5344 else if (type && TREE_CODE (type) == UNION_TYPE)
5346 HOST_WIDE_INT size = int_size_in_bytes (type);
5347 gcc_assert (size <= 16);
5349 return function_arg_union_value (size, mode, slotno, regno);
5352 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5353 but also have the slot allocated for them.
5354 If no prototype is in scope fp values in register slots get passed
5355 in two places, either fp regs and int regs or fp regs and memory. */
5356 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5357 && SPARC_FP_REG_P (regno))
5359 rtx reg = gen_rtx_REG (mode, regno);
5360 if (cum->prototype_p || cum->libcall_p)
5362 /* "* 2" because fp reg numbers are recorded in 4 byte
5365 /* ??? This will cause the value to be passed in the fp reg and
5366 in the stack. When a prototype exists we want to pass the
5367 value in the reg but reserve space on the stack. That's an
5368 optimization, and is deferred [for a bit]. */
5369 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5370 return gen_rtx_PARALLEL (mode,
5372 gen_rtx_EXPR_LIST (VOIDmode,
5373 NULL_RTX, const0_rtx),
5374 gen_rtx_EXPR_LIST (VOIDmode,
5378 /* ??? It seems that passing back a register even when past
5379 the area declared by REG_PARM_STACK_SPACE will allocate
5380 space appropriately, and will not copy the data onto the
5381 stack, exactly as we desire.
5383 This is due to locate_and_pad_parm being called in
5384 expand_call whenever reg_parm_stack_space > 0, which
5385 while beneficial to our example here, would seem to be
5386 in error from what had been intended. Ho hum... -- r~ */
5394 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5398 /* On incoming, we don't need to know that the value
5399 is passed in %f0 and %i0, and it confuses other parts
5400 causing needless spillage even on the simplest cases. */
5404 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5405 + (regno - SPARC_FP_ARG_FIRST) / 2);
5407 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5408 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5410 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5414 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5415 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5416 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5421 /* All other aggregate types are passed in an integer register in a mode
5422 corresponding to the size of the type. */
5423 else if (type && AGGREGATE_TYPE_P (type))
5425 HOST_WIDE_INT size = int_size_in_bytes (type);
5426 gcc_assert (size <= 16);
5428 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5431 return gen_rtx_REG (mode, regno);
5434 /* For an arg passed partly in registers and partly in memory,
5435 this is the number of bytes of registers used.
5436 For args passed entirely in registers or entirely in memory, zero.
5438 Any arg that starts in the first 6 regs but won't entirely fit in them
5439 needs partial registers on v8. On v9, structures with integer
5440 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5441 values that begin in the last fp reg [where "last fp reg" varies with the
5442 mode] will be split between that reg and memory. */
5445 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5446 tree type, bool named)
5448 int slotno, regno, padding;
5450 /* We pass 0 for incoming_p here, it doesn't matter. */
5451 slotno = function_arg_slotno (cum, mode, type, named, 0, ®no, &padding);
5458 if ((slotno + (mode == BLKmode
5459 ? ROUND_ADVANCE (int_size_in_bytes (type))
5460 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5461 > SPARC_INT_ARG_MAX)
5462 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5466 /* We are guaranteed by pass_by_reference that the size of the
5467 argument is not greater than 16 bytes, so we only need to return
5468 one word if the argument is partially passed in registers. */
5470 if (type && AGGREGATE_TYPE_P (type))
5472 int size = int_size_in_bytes (type);
5474 if (size > UNITS_PER_WORD
5475 && slotno == SPARC_INT_ARG_MAX - 1)
5476 return UNITS_PER_WORD;
5478 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5479 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5480 && ! (TARGET_FPU && named)))
5482 /* The complex types are passed as packed types. */
5483 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5484 && slotno == SPARC_INT_ARG_MAX - 1)
5485 return UNITS_PER_WORD;
5487 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5489 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5491 return UNITS_PER_WORD;
5498 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5499 Specify whether to pass the argument by reference. */
5502 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5503 enum machine_mode mode, const_tree type,
5504 bool named ATTRIBUTE_UNUSED)
5507 /* Original SPARC 32-bit ABI says that structures and unions,
5508 and quad-precision floats are passed by reference. For Pascal,
5509 also pass arrays by reference. All other base types are passed
5512 Extended ABI (as implemented by the Sun compiler) says that all
5513 complex floats are passed by reference. Pass complex integers
5514 in registers up to 8 bytes. More generally, enforce the 2-word
5515 cap for passing arguments in registers.
5517 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5518 integers are passed like floats of the same size, that is in
5519 registers up to 8 bytes. Pass all vector floats by reference
5520 like structure and unions. */
5521 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5523 /* Catch CDImode, TFmode, DCmode and TCmode. */
5524 || GET_MODE_SIZE (mode) > 8
5526 && TREE_CODE (type) == VECTOR_TYPE
5527 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5529 /* Original SPARC 64-bit ABI says that structures and unions
5530 smaller than 16 bytes are passed in registers, as well as
5531 all other base types.
5533 Extended ABI (as implemented by the Sun compiler) says that
5534 complex floats are passed in registers up to 16 bytes. Pass
5535 all complex integers in registers up to 16 bytes. More generally,
5536 enforce the 2-word cap for passing arguments in registers.
5538 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5539 integers are passed like floats of the same size, that is in
5540 registers (up to 16 bytes). Pass all vector floats like structure
5543 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5544 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5545 /* Catch CTImode and TCmode. */
5546 || GET_MODE_SIZE (mode) > 16);
5549 /* Handle the FUNCTION_ARG_ADVANCE macro.
5550 Update the data in CUM to advance over an argument
5551 of mode MODE and data type TYPE.
5552 TYPE is null for libcalls where that information may not be available. */
5555 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5556 tree type, int named)
5558 int slotno, regno, padding;
5560 /* We pass 0 for incoming_p here, it doesn't matter. */
5561 slotno = function_arg_slotno (cum, mode, type, named, 0, ®no, &padding);
5563 /* If register required leading padding, add it. */
5565 cum->words += padding;
5569 cum->words += (mode != BLKmode
5570 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5571 : ROUND_ADVANCE (int_size_in_bytes (type)));
5575 if (type && AGGREGATE_TYPE_P (type))
5577 int size = int_size_in_bytes (type);
5581 else if (size <= 16)
5583 else /* passed by reference */
5588 cum->words += (mode != BLKmode
5589 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5590 : ROUND_ADVANCE (int_size_in_bytes (type)));
5595 /* Handle the FUNCTION_ARG_PADDING macro.
5596 For the 64 bit ABI structs are always stored left shifted in their
5600 function_arg_padding (enum machine_mode mode, const_tree type)
5602 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5605 /* Fall back to the default. */
5606 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5609 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5610 Specify whether to return the return value in memory. */
5613 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5616 /* Original SPARC 32-bit ABI says that structures and unions,
5617 and quad-precision floats are returned in memory. All other
5618 base types are returned in registers.
5620 Extended ABI (as implemented by the Sun compiler) says that
5621 all complex floats are returned in registers (8 FP registers
5622 at most for '_Complex long double'). Return all complex integers
5623 in registers (4 at most for '_Complex long long').
5625 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5626 integers are returned like floats of the same size, that is in
5627 registers up to 8 bytes and in memory otherwise. Return all
5628 vector floats in memory like structure and unions; note that
5629 they always have BLKmode like the latter. */
5630 return (TYPE_MODE (type) == BLKmode
5631 || TYPE_MODE (type) == TFmode
5632 || (TREE_CODE (type) == VECTOR_TYPE
5633 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5635 /* Original SPARC 64-bit ABI says that structures and unions
5636 smaller than 32 bytes are returned in registers, as well as
5637 all other base types.
5639 Extended ABI (as implemented by the Sun compiler) says that all
5640 complex floats are returned in registers (8 FP registers at most
5641 for '_Complex long double'). Return all complex integers in
5642 registers (4 at most for '_Complex TItype').
5644 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5645 integers are returned like floats of the same size, that is in
5646 registers. Return all vector floats like structure and unions;
5647 note that they always have BLKmode like the latter. */
5648 return ((TYPE_MODE (type) == BLKmode
5649 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5652 /* Handle the TARGET_STRUCT_VALUE target hook.
5653 Return where to find the structure return value address. */
5656 sparc_struct_value_rtx (tree fndecl, int incoming)
5665 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5666 STRUCT_VALUE_OFFSET));
5668 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5669 STRUCT_VALUE_OFFSET));
5671 /* Only follow the SPARC ABI for fixed-size structure returns.
5672 Variable size structure returns are handled per the normal
5673 procedures in GCC. This is enabled by -mstd-struct-return */
5675 && sparc_std_struct_return
5676 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
5677 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
5679 /* We must check and adjust the return address, as it is
5680 optional as to whether the return object is really
5682 rtx ret_rtx = gen_rtx_REG (Pmode, 31);
5683 rtx scratch = gen_reg_rtx (SImode);
5684 rtx endlab = gen_label_rtx ();
5686 /* Calculate the return object size */
5687 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
5688 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
5689 /* Construct a temporary return value */
5690 rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
5692 /* Implement SPARC 32-bit psABI callee returns struck checking
5695 Fetch the instruction where we will return to and see if
5696 it's an unimp instruction (the most significant 10 bits
5698 emit_move_insn (scratch, gen_rtx_MEM (SImode,
5699 plus_constant (ret_rtx, 8)));
5700 /* Assume the size is valid and pre-adjust */
5701 emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5702 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
5703 emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5704 /* Assign stack temp:
5705 Write the address of the memory pointed to by temp_val into
5706 the memory pointed to by mem */
5707 emit_move_insn (mem, XEXP (temp_val, 0));
5708 emit_label (endlab);
5711 set_mem_alias_set (mem, struct_value_alias_set);
5716 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5717 For v9, function return values are subject to the same rules as arguments,
5718 except that up to 32 bytes may be returned in registers. */
5721 function_value (const_tree type, enum machine_mode mode, int incoming_p)
5723 /* Beware that the two values are swapped here wrt function_arg. */
5724 int regbase = (incoming_p
5725 ? SPARC_OUTGOING_INT_ARG_FIRST
5726 : SPARC_INCOMING_INT_ARG_FIRST);
5727 enum mode_class mclass = GET_MODE_CLASS (mode);
5730 /* Vector types deserve special treatment because they are polymorphic wrt
5731 their mode, depending upon whether VIS instructions are enabled. */
5732 if (type && TREE_CODE (type) == VECTOR_TYPE)
5734 HOST_WIDE_INT size = int_size_in_bytes (type);
5735 gcc_assert ((TARGET_ARCH32 && size <= 8)
5736 || (TARGET_ARCH64 && size <= 32));
5738 if (mode == BLKmode)
5739 return function_arg_vector_value (size,
5740 SPARC_FP_ARG_FIRST);
5742 mclass = MODE_FLOAT;
5745 if (TARGET_ARCH64 && type)
5747 /* Structures up to 32 bytes in size are returned in registers. */
5748 if (TREE_CODE (type) == RECORD_TYPE)
5750 HOST_WIDE_INT size = int_size_in_bytes (type);
5751 gcc_assert (size <= 32);
5753 return function_arg_record_value (type, mode, 0, 1, regbase);
5756 /* Unions up to 32 bytes in size are returned in integer registers. */
5757 else if (TREE_CODE (type) == UNION_TYPE)
5759 HOST_WIDE_INT size = int_size_in_bytes (type);
5760 gcc_assert (size <= 32);
5762 return function_arg_union_value (size, mode, 0, regbase);
5765 /* Objects that require it are returned in FP registers. */
5766 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5769 /* All other aggregate types are returned in an integer register in a
5770 mode corresponding to the size of the type. */
5771 else if (AGGREGATE_TYPE_P (type))
5773 /* All other aggregate types are passed in an integer register
5774 in a mode corresponding to the size of the type. */
5775 HOST_WIDE_INT size = int_size_in_bytes (type);
5776 gcc_assert (size <= 32);
5778 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5780 /* ??? We probably should have made the same ABI change in
5781 3.4.0 as the one we made for unions. The latter was
5782 required by the SCD though, while the former is not
5783 specified, so we favored compatibility and efficiency.
5785 Now we're stuck for aggregates larger than 16 bytes,
5786 because OImode vanished in the meantime. Let's not
5787 try to be unduly clever, and simply follow the ABI
5788 for unions in that case. */
5789 if (mode == BLKmode)
5790 return function_arg_union_value (size, mode, 0, regbase);
5795 /* This must match sparc_promote_function_mode.
5796 ??? Maybe 32-bit pointers should actually remain in Pmode? */
5797 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5801 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
5802 regno = SPARC_FP_ARG_FIRST;
5806 return gen_rtx_REG (mode, regno);
5809 /* Do what is necessary for `va_start'. We look at the current function
5810 to determine if stdarg or varargs is used and return the address of
5811 the first unnamed parameter. */
5814 sparc_builtin_saveregs (void)
5816 int first_reg = crtl->args.info.words;
5820 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5821 emit_move_insn (gen_rtx_MEM (word_mode,
5822 gen_rtx_PLUS (Pmode,
5824 GEN_INT (FIRST_PARM_OFFSET (0)
5827 gen_rtx_REG (word_mode,
5828 SPARC_INCOMING_INT_ARG_FIRST + regno));
5830 address = gen_rtx_PLUS (Pmode,
5832 GEN_INT (FIRST_PARM_OFFSET (0)
5833 + UNITS_PER_WORD * first_reg));
5838 /* Implement `va_start' for stdarg. */
5841 sparc_va_start (tree valist, rtx nextarg)
5843 nextarg = expand_builtin_saveregs ();
5844 std_expand_builtin_va_start (valist, nextarg);
5847 /* Implement `va_arg' for stdarg. */
5850 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
5853 HOST_WIDE_INT size, rsize, align;
5856 tree ptrtype = build_pointer_type (type);
5858 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5861 size = rsize = UNITS_PER_WORD;
5867 size = int_size_in_bytes (type);
5868 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5873 /* For SPARC64, objects requiring 16-byte alignment get it. */
5874 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5875 align = 2 * UNITS_PER_WORD;
5877 /* SPARC-V9 ABI states that structures up to 16 bytes in size
5878 are left-justified in their slots. */
5879 if (AGGREGATE_TYPE_P (type))
5882 size = rsize = UNITS_PER_WORD;
5892 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5893 size_int (align - 1));
5894 incr = fold_convert (sizetype, incr);
5895 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
5897 incr = fold_convert (ptr_type_node, incr);
5900 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5903 if (BYTES_BIG_ENDIAN && size < rsize)
5904 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5905 size_int (rsize - size));
5909 addr = fold_convert (build_pointer_type (ptrtype), addr);
5910 addr = build_va_arg_indirect_ref (addr);
5913 /* If the address isn't aligned properly for the type, we need a temporary.
5914 FIXME: This is inefficient, usually we can do this in registers. */
5915 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
5917 tree tmp = create_tmp_var (type, "va_arg_tmp");
5918 tree dest_addr = build_fold_addr_expr (tmp);
5919 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
5920 3, dest_addr, addr, size_int (rsize));
5921 TREE_ADDRESSABLE (tmp) = 1;
5922 gimplify_and_add (copy, pre_p);
5927 addr = fold_convert (ptrtype, addr);
5930 = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
5931 gimplify_assign (valist, incr, post_p);
5933 return build_va_arg_indirect_ref (addr);
5936 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5937 Specify whether the vector mode is supported by the hardware. */
5940 sparc_vector_mode_supported_p (enum machine_mode mode)
5942 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5945 /* Return the string to output an unconditional branch to LABEL, which is
5946 the operand number of the label.
5948 DEST is the destination insn (i.e. the label), INSN is the source. */
5951 output_ubranch (rtx dest, int label, rtx insn)
5953 static char string[64];
5954 bool v9_form = false;
5957 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5959 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5960 - INSN_ADDRESSES (INSN_UID (insn)));
5961 /* Leave some instructions for "slop". */
5962 if (delta >= -260000 && delta < 260000)
5967 strcpy (string, "ba%*,pt\t%%xcc, ");
5969 strcpy (string, "b%*\t");
5971 p = strchr (string, '\0');
5982 /* Return the string to output a conditional branch to LABEL, which is
5983 the operand number of the label. OP is the conditional expression.
5984 XEXP (OP, 0) is assumed to be a condition code register (integer or
5985 floating point) and its mode specifies what kind of comparison we made.
5987 DEST is the destination insn (i.e. the label), INSN is the source.
5989 REVERSED is nonzero if we should reverse the sense of the comparison.
5991 ANNUL is nonzero if we should generate an annulling branch. */
5994 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
5997 static char string[64];
5998 enum rtx_code code = GET_CODE (op);
5999 rtx cc_reg = XEXP (op, 0);
6000 enum machine_mode mode = GET_MODE (cc_reg);
6001 const char *labelno, *branch;
6002 int spaces = 8, far;
6005 /* v9 branches are limited to +-1MB. If it is too far away,
6018 fbne,a,pn %fcc2, .LC29
6026 far = TARGET_V9 && (get_attr_length (insn) >= 3);
6029 /* Reversal of FP compares takes care -- an ordered compare
6030 becomes an unordered compare and vice versa. */
6031 if (mode == CCFPmode || mode == CCFPEmode)
6032 code = reverse_condition_maybe_unordered (code);
6034 code = reverse_condition (code);
6037 /* Start by writing the branch condition. */
6038 if (mode == CCFPmode || mode == CCFPEmode)
6089 /* ??? !v9: FP branches cannot be preceded by another floating point
6090 insn. Because there is currently no concept of pre-delay slots,
6091 we can fix this only by always emitting a nop before a floating
6096 strcpy (string, "nop\n\t");
6097 strcat (string, branch);
6110 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6122 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6143 strcpy (string, branch);
6145 spaces -= strlen (branch);
6146 p = strchr (string, '\0');
6148 /* Now add the annulling, the label, and a possible noop. */
6161 if (! far && insn && INSN_ADDRESSES_SET_P ())
6163 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6164 - INSN_ADDRESSES (INSN_UID (insn)));
6165 /* Leave some instructions for "slop". */
6166 if (delta < -260000 || delta >= 260000)
6170 if (mode == CCFPmode || mode == CCFPEmode)
6172 static char v9_fcc_labelno[] = "%%fccX, ";
6173 /* Set the char indicating the number of the fcc reg to use. */
6174 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6175 labelno = v9_fcc_labelno;
6178 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6182 else if (mode == CCXmode || mode == CCX_NOOVmode)
6184 labelno = "%%xcc, ";
6189 labelno = "%%icc, ";
6194 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6197 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6210 strcpy (p, labelno);
6211 p = strchr (p, '\0');
6214 strcpy (p, ".+12\n\t nop\n\tb\t");
6215 /* Skip the next insn if requested or
6216 if we know that it will be a nop. */
6217 if (annul || ! final_sequence)
6231 /* Emit a library call comparison between floating point X and Y.
6232 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
6233 Return the new operator to be used in the comparison sequence.
6235 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6236 values as arguments instead of the TFmode registers themselves,
6237 that's why we cannot call emit_float_lib_cmp. */
6240 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6243 rtx slot0, slot1, result, tem, tem2, libfunc;
6244 enum machine_mode mode;
6245 enum rtx_code new_comparison;
6250 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
6254 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
6258 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
6262 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
6266 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
6270 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
6281 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
6294 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6295 emit_move_insn (slot0, x);
6302 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6303 emit_move_insn (slot1, y);
6306 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6307 emit_library_call (libfunc, LCT_NORMAL,
6309 XEXP (slot0, 0), Pmode,
6310 XEXP (slot1, 0), Pmode);
6315 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6316 emit_library_call (libfunc, LCT_NORMAL,
6318 x, TFmode, y, TFmode);
6323 /* Immediately move the result of the libcall into a pseudo
6324 register so reload doesn't clobber the value if it needs
6325 the return register for a spill reg. */
6326 result = gen_reg_rtx (mode);
6327 emit_move_insn (result, hard_libcall_value (mode, libfunc));
6332 return gen_rtx_NE (VOIDmode, result, const0_rtx);
6335 new_comparison = (comparison == UNORDERED ? EQ : NE);
6336 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
6339 new_comparison = (comparison == UNGT ? GT : NE);
6340 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
6342 return gen_rtx_NE (VOIDmode, result, const2_rtx);
6344 tem = gen_reg_rtx (mode);
6346 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6348 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6349 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
6352 tem = gen_reg_rtx (mode);
6354 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6356 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6357 tem2 = gen_reg_rtx (mode);
6359 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6361 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6362 new_comparison = (comparison == UNEQ ? EQ : NE);
6363 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
6369 /* Generate an unsigned DImode to FP conversion. This is the same code
6370 optabs would emit if we didn't have TFmode patterns. */
6373 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6375 rtx neglab, donelab, i0, i1, f0, in, out;
6378 in = force_reg (DImode, operands[1]);
6379 neglab = gen_label_rtx ();
6380 donelab = gen_label_rtx ();
6381 i0 = gen_reg_rtx (DImode);
6382 i1 = gen_reg_rtx (DImode);
6383 f0 = gen_reg_rtx (mode);
6385 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6387 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6388 emit_jump_insn (gen_jump (donelab));
6391 emit_label (neglab);
6393 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6394 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6395 emit_insn (gen_iordi3 (i0, i0, i1));
6396 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6397 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6399 emit_label (donelab);
6402 /* Generate an FP to unsigned DImode conversion. This is the same code
6403 optabs would emit if we didn't have TFmode patterns. */
6406 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6408 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6411 in = force_reg (mode, operands[1]);
6412 neglab = gen_label_rtx ();
6413 donelab = gen_label_rtx ();
6414 i0 = gen_reg_rtx (DImode);
6415 i1 = gen_reg_rtx (DImode);
6416 limit = gen_reg_rtx (mode);
6417 f0 = gen_reg_rtx (mode);
6419 emit_move_insn (limit,
6420 CONST_DOUBLE_FROM_REAL_VALUE (
6421 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6422 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6424 emit_insn (gen_rtx_SET (VOIDmode,
6426 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6427 emit_jump_insn (gen_jump (donelab));
6430 emit_label (neglab);
6432 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6433 emit_insn (gen_rtx_SET (VOIDmode,
6435 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6436 emit_insn (gen_movdi (i1, const1_rtx));
6437 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6438 emit_insn (gen_xordi3 (out, i0, i1));
6440 emit_label (donelab);
6443 /* Return the string to output a conditional branch to LABEL, testing
6444 register REG. LABEL is the operand number of the label; REG is the
6445 operand number of the reg. OP is the conditional expression. The mode
6446 of REG says what kind of comparison we made.
6448 DEST is the destination insn (i.e. the label), INSN is the source.
6450 REVERSED is nonzero if we should reverse the sense of the comparison.
6452 ANNUL is nonzero if we should generate an annulling branch. */
6455 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6456 int annul, rtx insn)
6458 static char string[64];
6459 enum rtx_code code = GET_CODE (op);
6460 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6465 /* branch on register are limited to +-128KB. If it is too far away,
6478 brgez,a,pn %o1, .LC29
6484 ba,pt %xcc, .LC29 */
6486 far = get_attr_length (insn) >= 3;
6488 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6490 code = reverse_condition (code);
6492 /* Only 64 bit versions of these instructions exist. */
6493 gcc_assert (mode == DImode);
6495 /* Start by writing the branch condition. */
6500 strcpy (string, "brnz");
6504 strcpy (string, "brz");
6508 strcpy (string, "brgez");
6512 strcpy (string, "brlz");
6516 strcpy (string, "brlez");
6520 strcpy (string, "brgz");
6527 p = strchr (string, '\0');
6529 /* Now add the annulling, reg, label, and nop. */
6536 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6539 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6544 *p = p < string + 8 ? '\t' : ' ';
6552 int veryfar = 1, delta;
6554 if (INSN_ADDRESSES_SET_P ())
6556 delta = (INSN_ADDRESSES (INSN_UID (dest))
6557 - INSN_ADDRESSES (INSN_UID (insn)));
6558 /* Leave some instructions for "slop". */
6559 if (delta >= -260000 && delta < 260000)
6563 strcpy (p, ".+12\n\t nop\n\t");
6564 /* Skip the next insn if requested or
6565 if we know that it will be a nop. */
6566 if (annul || ! final_sequence)
6576 strcpy (p, "ba,pt\t%%xcc, ");
6590 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6591 Such instructions cannot be used in the delay slot of return insn on v9.
6592 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6596 epilogue_renumber (register rtx *where, int test)
6598 register const char *fmt;
6600 register enum rtx_code code;
6605 code = GET_CODE (*where);
6610 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6612 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6613 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6621 /* Do not replace the frame pointer with the stack pointer because
6622 it can cause the delayed instruction to load below the stack.
6623 This occurs when instructions like:
6625 (set (reg/i:SI 24 %i0)
6626 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6627 (const_int -20 [0xffffffec])) 0))
6629 are in the return delayed slot. */
6631 if (GET_CODE (XEXP (*where, 0)) == REG
6632 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6633 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6634 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6639 if (SPARC_STACK_BIAS
6640 && GET_CODE (XEXP (*where, 0)) == REG
6641 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6649 fmt = GET_RTX_FORMAT (code);
6651 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6656 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6657 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6660 else if (fmt[i] == 'e'
6661 && epilogue_renumber (&(XEXP (*where, i)), test))
6667 /* Leaf functions and non-leaf functions have different needs. */
6670 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6673 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6675 static const int *const reg_alloc_orders[] = {
6676 reg_leaf_alloc_order,
6677 reg_nonleaf_alloc_order};
6680 order_regs_for_local_alloc (void)
6682 static int last_order_nonleaf = 1;
6684 if (df_regs_ever_live_p (15) != last_order_nonleaf)
6686 last_order_nonleaf = !last_order_nonleaf;
6687 memcpy ((char *) reg_alloc_order,
6688 (const char *) reg_alloc_orders[last_order_nonleaf],
6689 FIRST_PSEUDO_REGISTER * sizeof (int));
6693 /* Return 1 if REG and MEM are legitimate enough to allow the various
6694 mem<-->reg splits to be run. */
6697 sparc_splitdi_legitimate (rtx reg, rtx mem)
6699 /* Punt if we are here by mistake. */
6700 gcc_assert (reload_completed);
6702 /* We must have an offsettable memory reference. */
6703 if (! offsettable_memref_p (mem))
6706 /* If we have legitimate args for ldd/std, we do not want
6707 the split to happen. */
6708 if ((REGNO (reg) % 2) == 0
6709 && mem_min_alignment (mem, 8))
6716 /* Return 1 if x and y are some kind of REG and they refer to
6717 different hard registers. This test is guaranteed to be
6718 run after reload. */
6721 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6723 if (GET_CODE (x) != REG)
6725 if (GET_CODE (y) != REG)
6727 if (REGNO (x) == REGNO (y))
6732 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6733 This makes them candidates for using ldd and std insns.
6735 Note reg1 and reg2 *must* be hard registers. */
6738 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6740 /* We might have been passed a SUBREG. */
6741 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6744 if (REGNO (reg1) % 2 != 0)
6747 /* Integer ldd is deprecated in SPARC V9 */
6748 if (TARGET_V9 && REGNO (reg1) < 32)
6751 return (REGNO (reg1) == REGNO (reg2) - 1);
6754 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6757 This can only happen when addr1 and addr2, the addresses in mem1
6758 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6759 addr1 must also be aligned on a 64-bit boundary.
6761 Also iff dependent_reg_rtx is not null it should not be used to
6762 compute the address for mem1, i.e. we cannot optimize a sequence
6774 But, note that the transformation from:
6779 is perfectly fine. Thus, the peephole2 patterns always pass us
6780 the destination register of the first load, never the second one.
6782 For stores we don't have a similar problem, so dependent_reg_rtx is
6786 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6790 HOST_WIDE_INT offset1;
6792 /* The mems cannot be volatile. */
6793 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6796 /* MEM1 should be aligned on a 64-bit boundary. */
6797 if (MEM_ALIGN (mem1) < 64)
6800 addr1 = XEXP (mem1, 0);
6801 addr2 = XEXP (mem2, 0);
6803 /* Extract a register number and offset (if used) from the first addr. */
6804 if (GET_CODE (addr1) == PLUS)
6806 /* If not a REG, return zero. */
6807 if (GET_CODE (XEXP (addr1, 0)) != REG)
6811 reg1 = REGNO (XEXP (addr1, 0));
6812 /* The offset must be constant! */
6813 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6815 offset1 = INTVAL (XEXP (addr1, 1));
6818 else if (GET_CODE (addr1) != REG)
6822 reg1 = REGNO (addr1);
6823 /* This was a simple (mem (reg)) expression. Offset is 0. */
6827 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6828 if (GET_CODE (addr2) != PLUS)
6831 if (GET_CODE (XEXP (addr2, 0)) != REG
6832 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6835 if (reg1 != REGNO (XEXP (addr2, 0)))
6838 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6841 /* The first offset must be evenly divisible by 8 to ensure the
6842 address is 64 bit aligned. */
6843 if (offset1 % 8 != 0)
6846 /* The offset for the second addr must be 4 more than the first addr. */
6847 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6850 /* All the tests passed. addr1 and addr2 are valid for ldd and std
6855 /* Return 1 if reg is a pseudo, or is the first register in
6856 a hard register pair. This makes it suitable for use in
6857 ldd and std insns. */
6860 register_ok_for_ldd (rtx reg)
6862 /* We might have been passed a SUBREG. */
6866 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6867 return (REGNO (reg) % 2 == 0);
6872 /* Return 1 if OP is a memory whose address is known to be
6873 aligned to 8-byte boundary, or a pseudo during reload.
6874 This makes it suitable for use in ldd and std insns. */
6877 memory_ok_for_ldd (rtx op)
6881 /* In 64-bit mode, we assume that the address is word-aligned. */
6882 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
6885 if ((reload_in_progress || reload_completed)
6886 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
6889 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
6891 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
6900 /* Print operand X (an rtx) in assembler syntax to file FILE.
6901 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6902 For `%' followed by punctuation, CODE is the punctuation and X is null. */
6905 print_operand (FILE *file, rtx x, int code)
6910 /* Output an insn in a delay slot. */
6912 sparc_indent_opcode = 1;
6914 fputs ("\n\t nop", file);
6917 /* Output an annul flag if there's nothing for the delay slot and we
6918 are optimizing. This is always used with '(' below.
6919 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6920 this is a dbx bug. So, we only do this when optimizing.
6921 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6922 Always emit a nop in case the next instruction is a branch. */
6923 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6927 /* Output a 'nop' if there's nothing for the delay slot and we are
6928 not optimizing. This is always used with '*' above. */
6929 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6930 fputs ("\n\t nop", file);
6931 else if (final_sequence)
6932 sparc_indent_opcode = 1;
6935 /* Output the right displacement from the saved PC on function return.
6936 The caller may have placed an "unimp" insn immediately after the call
6937 so we have to account for it. This insn is used in the 32-bit ABI
6938 when calling a function that returns a non zero-sized structure. The
6939 64-bit ABI doesn't have it. Be careful to have this test be the same
6940 as that for the call. The exception is when sparc_std_struct_return
6941 is enabled, the psABI is followed exactly and the adjustment is made
6942 by the code in sparc_struct_value_rtx. The call emitted is the same
6943 when sparc_std_struct_return is enabled. */
6945 && cfun->returns_struct
6946 && !sparc_std_struct_return
6947 && DECL_SIZE (DECL_RESULT (current_function_decl))
6948 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6950 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6956 /* Output the Embedded Medium/Anywhere code model base register. */
6957 fputs (EMBMEDANY_BASE_REG, file);
6960 /* Print some local dynamic TLS name. */
6961 assemble_name (file, get_some_local_dynamic_name ());
6965 /* Adjust the operand to take into account a RESTORE operation. */
6966 if (GET_CODE (x) == CONST_INT)
6968 else if (GET_CODE (x) != REG)
6969 output_operand_lossage ("invalid %%Y operand");
6970 else if (REGNO (x) < 8)
6971 fputs (reg_names[REGNO (x)], file);
6972 else if (REGNO (x) >= 24 && REGNO (x) < 32)
6973 fputs (reg_names[REGNO (x)-16], file);
6975 output_operand_lossage ("invalid %%Y operand");
6978 /* Print out the low order register name of a register pair. */
6979 if (WORDS_BIG_ENDIAN)
6980 fputs (reg_names[REGNO (x)+1], file);
6982 fputs (reg_names[REGNO (x)], file);
6985 /* Print out the high order register name of a register pair. */
6986 if (WORDS_BIG_ENDIAN)
6987 fputs (reg_names[REGNO (x)], file);
6989 fputs (reg_names[REGNO (x)+1], file);
6992 /* Print out the second register name of a register pair or quad.
6993 I.e., R (%o0) => %o1. */
6994 fputs (reg_names[REGNO (x)+1], file);
6997 /* Print out the third register name of a register quad.
6998 I.e., S (%o0) => %o2. */
6999 fputs (reg_names[REGNO (x)+2], file);
7002 /* Print out the fourth register name of a register quad.
7003 I.e., T (%o0) => %o3. */
7004 fputs (reg_names[REGNO (x)+3], file);
7007 /* Print a condition code register. */
7008 if (REGNO (x) == SPARC_ICC_REG)
7010 /* We don't handle CC[X]_NOOVmode because they're not supposed
7012 if (GET_MODE (x) == CCmode)
7013 fputs ("%icc", file);
7014 else if (GET_MODE (x) == CCXmode)
7015 fputs ("%xcc", file);
7020 /* %fccN register */
7021 fputs (reg_names[REGNO (x)], file);
7024 /* Print the operand's address only. */
7025 output_address (XEXP (x, 0));
7028 /* In this case we need a register. Use %g0 if the
7029 operand is const0_rtx. */
7031 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
7033 fputs ("%g0", file);
7040 switch (GET_CODE (x))
7042 case IOR: fputs ("or", file); break;
7043 case AND: fputs ("and", file); break;
7044 case XOR: fputs ("xor", file); break;
7045 default: output_operand_lossage ("invalid %%A operand");
7050 switch (GET_CODE (x))
7052 case IOR: fputs ("orn", file); break;
7053 case AND: fputs ("andn", file); break;
7054 case XOR: fputs ("xnor", file); break;
7055 default: output_operand_lossage ("invalid %%B operand");
7059 /* These are used by the conditional move instructions. */
7063 enum rtx_code rc = GET_CODE (x);
7067 enum machine_mode mode = GET_MODE (XEXP (x, 0));
7068 if (mode == CCFPmode || mode == CCFPEmode)
7069 rc = reverse_condition_maybe_unordered (GET_CODE (x));
7071 rc = reverse_condition (GET_CODE (x));
7075 case NE: fputs ("ne", file); break;
7076 case EQ: fputs ("e", file); break;
7077 case GE: fputs ("ge", file); break;
7078 case GT: fputs ("g", file); break;
7079 case LE: fputs ("le", file); break;
7080 case LT: fputs ("l", file); break;
7081 case GEU: fputs ("geu", file); break;
7082 case GTU: fputs ("gu", file); break;
7083 case LEU: fputs ("leu", file); break;
7084 case LTU: fputs ("lu", file); break;
7085 case LTGT: fputs ("lg", file); break;
7086 case UNORDERED: fputs ("u", file); break;
7087 case ORDERED: fputs ("o", file); break;
7088 case UNLT: fputs ("ul", file); break;
7089 case UNLE: fputs ("ule", file); break;
7090 case UNGT: fputs ("ug", file); break;
7091 case UNGE: fputs ("uge", file); break;
7092 case UNEQ: fputs ("ue", file); break;
7093 default: output_operand_lossage (code == 'c'
7094 ? "invalid %%c operand"
7095 : "invalid %%C operand");
7100 /* These are used by the movr instruction pattern. */
7104 enum rtx_code rc = (code == 'd'
7105 ? reverse_condition (GET_CODE (x))
7109 case NE: fputs ("ne", file); break;
7110 case EQ: fputs ("e", file); break;
7111 case GE: fputs ("gez", file); break;
7112 case LT: fputs ("lz", file); break;
7113 case LE: fputs ("lez", file); break;
7114 case GT: fputs ("gz", file); break;
7115 default: output_operand_lossage (code == 'd'
7116 ? "invalid %%d operand"
7117 : "invalid %%D operand");
7124 /* Print a sign-extended character. */
7125 int i = trunc_int_for_mode (INTVAL (x), QImode);
7126 fprintf (file, "%d", i);
7131 /* Operand must be a MEM; write its address. */
7132 if (GET_CODE (x) != MEM)
7133 output_operand_lossage ("invalid %%f operand");
7134 output_address (XEXP (x, 0));
7139 /* Print a sign-extended 32-bit value. */
7141 if (GET_CODE(x) == CONST_INT)
7143 else if (GET_CODE(x) == CONST_DOUBLE)
7144 i = CONST_DOUBLE_LOW (x);
7147 output_operand_lossage ("invalid %%s operand");
7150 i = trunc_int_for_mode (i, SImode);
7151 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7156 /* Do nothing special. */
7160 /* Undocumented flag. */
7161 output_operand_lossage ("invalid operand output code");
7164 if (GET_CODE (x) == REG)
7165 fputs (reg_names[REGNO (x)], file);
7166 else if (GET_CODE (x) == MEM)
7169 /* Poor Sun assembler doesn't understand absolute addressing. */
7170 if (CONSTANT_P (XEXP (x, 0)))
7171 fputs ("%g0+", file);
7172 output_address (XEXP (x, 0));
7175 else if (GET_CODE (x) == HIGH)
7177 fputs ("%hi(", file);
7178 output_addr_const (file, XEXP (x, 0));
7181 else if (GET_CODE (x) == LO_SUM)
7183 print_operand (file, XEXP (x, 0), 0);
7184 if (TARGET_CM_MEDMID)
7185 fputs ("+%l44(", file);
7187 fputs ("+%lo(", file);
7188 output_addr_const (file, XEXP (x, 1));
7191 else if (GET_CODE (x) == CONST_DOUBLE
7192 && (GET_MODE (x) == VOIDmode
7193 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7195 if (CONST_DOUBLE_HIGH (x) == 0)
7196 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7197 else if (CONST_DOUBLE_HIGH (x) == -1
7198 && CONST_DOUBLE_LOW (x) < 0)
7199 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7201 output_operand_lossage ("long long constant not a valid immediate operand");
7203 else if (GET_CODE (x) == CONST_DOUBLE)
7204 output_operand_lossage ("floating point constant not a valid immediate operand");
7205 else { output_addr_const (file, x); }
7208 /* Target hook for assembling integer objects. The sparc version has
7209 special handling for aligned DI-mode objects. */
7212 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7214 /* ??? We only output .xword's for symbols and only then in environments
7215 where the assembler can handle them. */
7216 if (aligned_p && size == 8
7217 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7221 assemble_integer_with_op ("\t.xword\t", x);
7226 assemble_aligned_integer (4, const0_rtx);
7227 assemble_aligned_integer (4, x);
7231 return default_assemble_integer (x, size, aligned_p);
7234 /* Return the value of a code used in the .proc pseudo-op that says
7235 what kind of result this function returns. For non-C types, we pick
7236 the closest C type. */
7238 #ifndef SHORT_TYPE_SIZE
7239 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7242 #ifndef INT_TYPE_SIZE
7243 #define INT_TYPE_SIZE BITS_PER_WORD
7246 #ifndef LONG_TYPE_SIZE
7247 #define LONG_TYPE_SIZE BITS_PER_WORD
7250 #ifndef LONG_LONG_TYPE_SIZE
7251 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7254 #ifndef FLOAT_TYPE_SIZE
7255 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7258 #ifndef DOUBLE_TYPE_SIZE
7259 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7262 #ifndef LONG_DOUBLE_TYPE_SIZE
7263 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7267 sparc_type_code (register tree type)
7269 register unsigned long qualifiers = 0;
7270 register unsigned shift;
7272 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7273 setting more, since some assemblers will give an error for this. Also,
7274 we must be careful to avoid shifts of 32 bits or more to avoid getting
7275 unpredictable results. */
7277 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7279 switch (TREE_CODE (type))
7285 qualifiers |= (3 << shift);
7290 qualifiers |= (2 << shift);
7294 case REFERENCE_TYPE:
7296 qualifiers |= (1 << shift);
7300 return (qualifiers | 8);
7303 case QUAL_UNION_TYPE:
7304 return (qualifiers | 9);
7307 return (qualifiers | 10);
7310 return (qualifiers | 16);
7313 /* If this is a range type, consider it to be the underlying
7315 if (TREE_TYPE (type) != 0)
7318 /* Carefully distinguish all the standard types of C,
7319 without messing up if the language is not C. We do this by
7320 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7321 look at both the names and the above fields, but that's redundant.
7322 Any type whose size is between two C types will be considered
7323 to be the wider of the two types. Also, we do not have a
7324 special code to use for "long long", so anything wider than
7325 long is treated the same. Note that we can't distinguish
7326 between "int" and "long" in this code if they are the same
7327 size, but that's fine, since neither can the assembler. */
7329 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7330 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7332 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7333 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7335 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7336 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7339 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7342 /* If this is a range type, consider it to be the underlying
7344 if (TREE_TYPE (type) != 0)
7347 /* Carefully distinguish all the standard types of C,
7348 without messing up if the language is not C. */
7350 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7351 return (qualifiers | 6);
7354 return (qualifiers | 7);
7356 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7357 /* ??? We need to distinguish between double and float complex types,
7358 but I don't know how yet because I can't reach this code from
7359 existing front-ends. */
7360 return (qualifiers | 7); /* Who knows? */
7363 case BOOLEAN_TYPE: /* Boolean truth value type. */
7364 case LANG_TYPE: /* ? */
7368 gcc_unreachable (); /* Not a type! */
7375 /* Nested function support. */
7377 /* Emit RTL insns to initialize the variable parts of a trampoline.
7378 FNADDR is an RTX for the address of the function's pure code.
7379 CXT is an RTX for the static chain value for the function.
7381 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7382 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7383 (to store insns). This is a bit excessive. Perhaps a different
7384 mechanism would be better here.
7386 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7389 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
7391 /* SPARC 32-bit trampoline:
7394 sethi %hi(static), %g2
7396 or %g2, %lo(static), %g2
7398 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7399 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7403 (adjust_address (m_tramp, SImode, 0),
7404 expand_binop (SImode, ior_optab,
7405 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7406 size_int (10), 0, 1),
7407 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7408 NULL_RTX, 1, OPTAB_DIRECT));
7411 (adjust_address (m_tramp, SImode, 4),
7412 expand_binop (SImode, ior_optab,
7413 expand_shift (RSHIFT_EXPR, SImode, cxt,
7414 size_int (10), 0, 1),
7415 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7416 NULL_RTX, 1, OPTAB_DIRECT));
7419 (adjust_address (m_tramp, SImode, 8),
7420 expand_binop (SImode, ior_optab,
7421 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7422 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7423 NULL_RTX, 1, OPTAB_DIRECT));
7426 (adjust_address (m_tramp, SImode, 12),
7427 expand_binop (SImode, ior_optab,
7428 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7429 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7430 NULL_RTX, 1, OPTAB_DIRECT));
7432 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7433 aligned on a 16 byte boundary so one flush clears it all. */
7434 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
7435 if (sparc_cpu != PROCESSOR_ULTRASPARC
7436 && sparc_cpu != PROCESSOR_ULTRASPARC3
7437 && sparc_cpu != PROCESSOR_NIAGARA
7438 && sparc_cpu != PROCESSOR_NIAGARA2)
7439 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
7441 /* Call __enable_execute_stack after writing onto the stack to make sure
7442 the stack address is accessible. */
7443 #ifdef ENABLE_EXECUTE_STACK
7444 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7445 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
7450 /* The 64-bit version is simpler because it makes more sense to load the
7451 values as "immediate" data out of the trampoline. It's also easier since
7452 we can read the PC without clobbering a register. */
7455 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
7457 /* SPARC 64-bit trampoline:
7466 emit_move_insn (adjust_address (m_tramp, SImode, 0),
7467 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7468 emit_move_insn (adjust_address (m_tramp, SImode, 4),
7469 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7470 emit_move_insn (adjust_address (m_tramp, SImode, 8),
7471 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7472 emit_move_insn (adjust_address (m_tramp, SImode, 12),
7473 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7474 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
7475 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
7476 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
7478 if (sparc_cpu != PROCESSOR_ULTRASPARC
7479 && sparc_cpu != PROCESSOR_ULTRASPARC3
7480 && sparc_cpu != PROCESSOR_NIAGARA
7481 && sparc_cpu != PROCESSOR_NIAGARA2)
7482 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
7484 /* Call __enable_execute_stack after writing onto the stack to make sure
7485 the stack address is accessible. */
7486 #ifdef ENABLE_EXECUTE_STACK
7487 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7488 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
7492 /* Worker for TARGET_TRAMPOLINE_INIT. */
7495 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
7497 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
7498 cxt = force_reg (Pmode, cxt);
7500 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
7502 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
7505 /* Adjust the cost of a scheduling dependency. Return the new cost of
7506 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7509 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7511 enum attr_type insn_type;
7513 if (! recog_memoized (insn))
7516 insn_type = get_attr_type (insn);
7518 if (REG_NOTE_KIND (link) == 0)
7520 /* Data dependency; DEP_INSN writes a register that INSN reads some
7523 /* if a load, then the dependence must be on the memory address;
7524 add an extra "cycle". Note that the cost could be two cycles
7525 if the reg was written late in an instruction group; we ca not tell
7527 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7530 /* Get the delay only if the address of the store is the dependence. */
7531 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7533 rtx pat = PATTERN(insn);
7534 rtx dep_pat = PATTERN (dep_insn);
7536 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7537 return cost; /* This should not happen! */
7539 /* The dependency between the two instructions was on the data that
7540 is being stored. Assume that this implies that the address of the
7541 store is not dependent. */
7542 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7545 return cost + 3; /* An approximation. */
7548 /* A shift instruction cannot receive its data from an instruction
7549 in the same cycle; add a one cycle penalty. */
7550 if (insn_type == TYPE_SHIFT)
7551 return cost + 3; /* Split before cascade into shift. */
7555 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7556 INSN writes some cycles later. */
7558 /* These are only significant for the fpu unit; writing a fp reg before
7559 the fpu has finished with it stalls the processor. */
7561 /* Reusing an integer register causes no problems. */
7562 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7570 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7572 enum attr_type insn_type, dep_type;
7573 rtx pat = PATTERN(insn);
7574 rtx dep_pat = PATTERN (dep_insn);
7576 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7579 insn_type = get_attr_type (insn);
7580 dep_type = get_attr_type (dep_insn);
7582 switch (REG_NOTE_KIND (link))
7585 /* Data dependency; DEP_INSN writes a register that INSN reads some
7592 /* Get the delay iff the address of the store is the dependence. */
7593 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7596 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7603 /* If a load, then the dependence must be on the memory address. If
7604 the addresses aren't equal, then it might be a false dependency */
7605 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7607 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7608 || GET_CODE (SET_DEST (dep_pat)) != MEM
7609 || GET_CODE (SET_SRC (pat)) != MEM
7610 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7611 XEXP (SET_SRC (pat), 0)))
7619 /* Compare to branch latency is 0. There is no benefit from
7620 separating compare and branch. */
7621 if (dep_type == TYPE_COMPARE)
7623 /* Floating point compare to branch latency is less than
7624 compare to conditional move. */
7625 if (dep_type == TYPE_FPCMP)
7634 /* Anti-dependencies only penalize the fpu unit. */
7635 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7647 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7651 case PROCESSOR_SUPERSPARC:
7652 cost = supersparc_adjust_cost (insn, link, dep, cost);
7654 case PROCESSOR_HYPERSPARC:
7655 case PROCESSOR_SPARCLITE86X:
7656 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7665 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7666 int sched_verbose ATTRIBUTE_UNUSED,
7667 int max_ready ATTRIBUTE_UNUSED)
7671 sparc_use_sched_lookahead (void)
7673 if (sparc_cpu == PROCESSOR_NIAGARA
7674 || sparc_cpu == PROCESSOR_NIAGARA2)
7676 if (sparc_cpu == PROCESSOR_ULTRASPARC
7677 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7679 if ((1 << sparc_cpu) &
7680 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7681 (1 << PROCESSOR_SPARCLITE86X)))
7687 sparc_issue_rate (void)
7691 case PROCESSOR_NIAGARA:
7692 case PROCESSOR_NIAGARA2:
7696 /* Assume V9 processors are capable of at least dual-issue. */
7698 case PROCESSOR_SUPERSPARC:
7700 case PROCESSOR_HYPERSPARC:
7701 case PROCESSOR_SPARCLITE86X:
7703 case PROCESSOR_ULTRASPARC:
7704 case PROCESSOR_ULTRASPARC3:
7710 set_extends (rtx insn)
7712 register rtx pat = PATTERN (insn);
7714 switch (GET_CODE (SET_SRC (pat)))
7716 /* Load and some shift instructions zero extend. */
7719 /* sethi clears the high bits */
7721 /* LO_SUM is used with sethi. sethi cleared the high
7722 bits and the values used with lo_sum are positive */
7724 /* Store flag stores 0 or 1 */
7734 rtx op0 = XEXP (SET_SRC (pat), 0);
7735 rtx op1 = XEXP (SET_SRC (pat), 1);
7736 if (GET_CODE (op1) == CONST_INT)
7737 return INTVAL (op1) >= 0;
7738 if (GET_CODE (op0) != REG)
7740 if (sparc_check_64 (op0, insn) == 1)
7742 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7747 rtx op0 = XEXP (SET_SRC (pat), 0);
7748 rtx op1 = XEXP (SET_SRC (pat), 1);
7749 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7751 if (GET_CODE (op1) == CONST_INT)
7752 return INTVAL (op1) >= 0;
7753 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7756 return GET_MODE (SET_SRC (pat)) == SImode;
7757 /* Positive integers leave the high bits zero. */
7759 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7761 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7764 return - (GET_MODE (SET_SRC (pat)) == SImode);
7766 return sparc_check_64 (SET_SRC (pat), insn);
7772 /* We _ought_ to have only one kind per function, but... */
7773 static GTY(()) rtx sparc_addr_diff_list;
7774 static GTY(()) rtx sparc_addr_list;
7777 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7779 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7781 sparc_addr_diff_list
7782 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7784 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7788 sparc_output_addr_vec (rtx vec)
7790 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7791 int idx, vlen = XVECLEN (body, 0);
7793 #ifdef ASM_OUTPUT_ADDR_VEC_START
7794 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7797 #ifdef ASM_OUTPUT_CASE_LABEL
7798 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7801 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7804 for (idx = 0; idx < vlen; idx++)
7806 ASM_OUTPUT_ADDR_VEC_ELT
7807 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7810 #ifdef ASM_OUTPUT_ADDR_VEC_END
7811 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7816 sparc_output_addr_diff_vec (rtx vec)
7818 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7819 rtx base = XEXP (XEXP (body, 0), 0);
7820 int idx, vlen = XVECLEN (body, 1);
7822 #ifdef ASM_OUTPUT_ADDR_VEC_START
7823 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7826 #ifdef ASM_OUTPUT_CASE_LABEL
7827 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7830 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7833 for (idx = 0; idx < vlen; idx++)
7835 ASM_OUTPUT_ADDR_DIFF_ELT
7838 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7839 CODE_LABEL_NUMBER (base));
7842 #ifdef ASM_OUTPUT_ADDR_VEC_END
7843 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7848 sparc_output_deferred_case_vectors (void)
7853 if (sparc_addr_list == NULL_RTX
7854 && sparc_addr_diff_list == NULL_RTX)
7857 /* Align to cache line in the function's code section. */
7858 switch_to_section (current_function_section ());
7860 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7862 ASM_OUTPUT_ALIGN (asm_out_file, align);
7864 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7865 sparc_output_addr_vec (XEXP (t, 0));
7866 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7867 sparc_output_addr_diff_vec (XEXP (t, 0));
7869 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7872 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7873 unknown. Return 1 if the high bits are zero, -1 if the register is
7876 sparc_check_64 (rtx x, rtx insn)
7878 /* If a register is set only once it is safe to ignore insns this
7879 code does not know how to handle. The loop will either recognize
7880 the single set and return the correct value or fail to recognize
7885 gcc_assert (GET_CODE (x) == REG);
7887 if (GET_MODE (x) == DImode)
7888 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7890 if (flag_expensive_optimizations
7891 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
7897 insn = get_last_insn_anywhere ();
7902 while ((insn = PREV_INSN (insn)))
7904 switch (GET_CODE (insn))
7917 rtx pat = PATTERN (insn);
7918 if (GET_CODE (pat) != SET)
7920 if (rtx_equal_p (x, SET_DEST (pat)))
7921 return set_extends (insn);
7922 if (y && rtx_equal_p (y, SET_DEST (pat)))
7923 return set_extends (insn);
7924 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7932 /* Returns assembly code to perform a DImode shift using
7933 a 64-bit global or out register on SPARC-V8+. */
7935 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7937 static char asm_code[60];
7939 /* The scratch register is only required when the destination
7940 register is not a 64-bit global or out register. */
7941 if (which_alternative != 2)
7942 operands[3] = operands[0];
7944 /* We can only shift by constants <= 63. */
7945 if (GET_CODE (operands[2]) == CONST_INT)
7946 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7948 if (GET_CODE (operands[1]) == CONST_INT)
7950 output_asm_insn ("mov\t%1, %3", operands);
7954 output_asm_insn ("sllx\t%H1, 32, %3", operands);
7955 if (sparc_check_64 (operands[1], insn) <= 0)
7956 output_asm_insn ("srl\t%L1, 0, %L1", operands);
7957 output_asm_insn ("or\t%L1, %3, %3", operands);
7960 strcpy(asm_code, opcode);
7962 if (which_alternative != 2)
7963 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7965 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7968 /* Output rtl to increment the profiler label LABELNO
7969 for profiling a function entry. */
7972 sparc_profile_hook (int labelno)
7977 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7978 if (NO_PROFILE_COUNTERS)
7980 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
7984 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7985 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7986 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7990 #if defined (OBJECT_FORMAT_ELF)
7992 sparc_elf_asm_named_section (const char *name, unsigned int flags,
7993 tree decl ATTRIBUTE_UNUSED)
7995 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
7997 if (!(flags & SECTION_DEBUG))
7998 fputs (",#alloc", asm_out_file);
7999 if (flags & SECTION_WRITE)
8000 fputs (",#write", asm_out_file);
8001 if (flags & SECTION_TLS)
8002 fputs (",#tls", asm_out_file);
8003 if (flags & SECTION_CODE)
8004 fputs (",#execinstr", asm_out_file);
8006 /* ??? Handle SECTION_BSS. */
8008 fputc ('\n', asm_out_file);
8010 #endif /* OBJECT_FORMAT_ELF */
8012 /* We do not allow indirect calls to be optimized into sibling calls.
8014 We cannot use sibling calls when delayed branches are disabled
8015 because they will likely require the call delay slot to be filled.
8017 Also, on SPARC 32-bit we cannot emit a sibling call when the
8018 current function returns a structure. This is because the "unimp
8019 after call" convention would cause the callee to return to the
8020 wrong place. The generic code already disallows cases where the
8021 function being called returns a structure.
8023 It may seem strange how this last case could occur. Usually there
8024 is code after the call which jumps to epilogue code which dumps the
8025 return value into the struct return area. That ought to invalidate
8026 the sibling call right? Well, in the C++ case we can end up passing
8027 the pointer to the struct return area to a constructor (which returns
8028 void) and then nothing else happens. Such a sibling call would look
8029 valid without the added check here.
8031 VxWorks PIC PLT entries require the global pointer to be initialized
8032 on entry. We therefore can't emit sibling calls to them. */
8034 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8037 && flag_delayed_branch
8038 && (TARGET_ARCH64 || ! cfun->returns_struct)
8039 && !(TARGET_VXWORKS_RTP
8041 && !targetm.binds_local_p (decl)));
8044 /* libfunc renaming. */
8045 #include "config/gofast.h"
8048 sparc_init_libfuncs (void)
8052 /* Use the subroutines that Sun's library provides for integer
8053 multiply and divide. The `*' prevents an underscore from
8054 being prepended by the compiler. .umul is a little faster
8056 set_optab_libfunc (smul_optab, SImode, "*.umul");
8057 set_optab_libfunc (sdiv_optab, SImode, "*.div");
8058 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
8059 set_optab_libfunc (smod_optab, SImode, "*.rem");
8060 set_optab_libfunc (umod_optab, SImode, "*.urem");
8062 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
8063 set_optab_libfunc (add_optab, TFmode, "_Q_add");
8064 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
8065 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
8066 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
8067 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
8069 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
8070 is because with soft-float, the SFmode and DFmode sqrt
8071 instructions will be absent, and the compiler will notice and
8072 try to use the TFmode sqrt instruction for calls to the
8073 builtin function sqrt, but this fails. */
8075 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
8077 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
8078 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
8079 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
8080 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
8081 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
8082 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
8084 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
8085 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
8086 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
8087 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
8089 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
8090 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
8091 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
8092 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
8094 if (DITF_CONVERSION_LIBFUNCS)
8096 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
8097 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
8098 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
8099 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
8102 if (SUN_CONVERSION_LIBFUNCS)
8104 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
8105 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
8106 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
8107 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
8112 /* In the SPARC 64bit ABI, SImode multiply and divide functions
8113 do not exist in the library. Make sure the compiler does not
8114 emit calls to them by accident. (It should always use the
8115 hardware instructions.) */
8116 set_optab_libfunc (smul_optab, SImode, 0);
8117 set_optab_libfunc (sdiv_optab, SImode, 0);
8118 set_optab_libfunc (udiv_optab, SImode, 0);
8119 set_optab_libfunc (smod_optab, SImode, 0);
8120 set_optab_libfunc (umod_optab, SImode, 0);
8122 if (SUN_INTEGER_MULTIPLY_64)
8124 set_optab_libfunc (smul_optab, DImode, "__mul64");
8125 set_optab_libfunc (sdiv_optab, DImode, "__div64");
8126 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
8127 set_optab_libfunc (smod_optab, DImode, "__rem64");
8128 set_optab_libfunc (umod_optab, DImode, "__urem64");
8131 if (SUN_CONVERSION_LIBFUNCS)
8133 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
8134 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
8135 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
8136 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
8140 gofast_maybe_init_libfuncs ();
8143 #define def_builtin(NAME, CODE, TYPE) \
8144 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
8147 /* Implement the TARGET_INIT_BUILTINS target hook.
8148 Create builtin functions for special SPARC instructions. */
8151 sparc_init_builtins (void)
8154 sparc_vis_init_builtins ();
8157 /* Create builtin functions for VIS 1.0 instructions. */
8160 sparc_vis_init_builtins (void)
8162 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
8163 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
8164 tree v4hi = build_vector_type (intHI_type_node, 4);
8165 tree v2hi = build_vector_type (intHI_type_node, 2);
8166 tree v2si = build_vector_type (intSI_type_node, 2);
8168 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
8169 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
8170 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
8171 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
8172 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
8173 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
8174 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
8175 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
8176 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
8177 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
8178 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
8179 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
8180 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
8182 intDI_type_node, 0);
8183 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
8185 intDI_type_node, 0);
8186 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
8188 intSI_type_node, 0);
8189 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
8191 intDI_type_node, 0);
8193 /* Packing and expanding vectors. */
8194 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
8195 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
8196 v8qi_ftype_v2si_v8qi);
8197 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
8199 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
8200 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
8201 v8qi_ftype_v4qi_v4qi);
8203 /* Multiplications. */
8204 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
8205 v4hi_ftype_v4qi_v4hi);
8206 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
8207 v4hi_ftype_v4qi_v2hi);
8208 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
8209 v4hi_ftype_v4qi_v2hi);
8210 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
8211 v4hi_ftype_v8qi_v4hi);
8212 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
8213 v4hi_ftype_v8qi_v4hi);
8214 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
8215 v2si_ftype_v4qi_v2hi);
8216 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
8217 v2si_ftype_v4qi_v2hi);
8219 /* Data aligning. */
8220 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
8221 v4hi_ftype_v4hi_v4hi);
8222 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
8223 v8qi_ftype_v8qi_v8qi);
8224 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
8225 v2si_ftype_v2si_v2si);
8226 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
8229 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
8232 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
8235 /* Pixel distance. */
8236 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
8237 di_ftype_v8qi_v8qi_di);
8240 /* Handle TARGET_EXPAND_BUILTIN target hook.
8241 Expand builtin functions for sparc intrinsics. */
8244 sparc_expand_builtin (tree exp, rtx target,
8245 rtx subtarget ATTRIBUTE_UNUSED,
8246 enum machine_mode tmode ATTRIBUTE_UNUSED,
8247 int ignore ATTRIBUTE_UNUSED)
8250 call_expr_arg_iterator iter;
8251 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8252 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
8254 enum machine_mode mode[4];
8257 mode[0] = insn_data[icode].operand[0].mode;
8259 || GET_MODE (target) != mode[0]
8260 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
8261 op[0] = gen_reg_rtx (mode[0]);
8265 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8268 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8269 op[arg_count] = expand_normal (arg);
8271 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8273 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8279 pat = GEN_FCN (icode) (op[0], op[1]);
8282 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8285 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8300 sparc_vis_mul8x16 (int e8, int e16)
8302 return (e8 * e16 + 128) / 256;
8305 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8306 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8307 constants. A tree list with the results of the multiplications is returned,
8308 and each element in the list is of INNER_TYPE. */
8311 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8313 tree n_elts = NULL_TREE;
8318 case CODE_FOR_fmul8x16_vis:
8319 for (; elts0 && elts1;
8320 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8323 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8324 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8325 n_elts = tree_cons (NULL_TREE,
8326 build_int_cst (inner_type, val),
8331 case CODE_FOR_fmul8x16au_vis:
8332 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8334 for (; elts0; elts0 = TREE_CHAIN (elts0))
8337 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8339 n_elts = tree_cons (NULL_TREE,
8340 build_int_cst (inner_type, val),
8345 case CODE_FOR_fmul8x16al_vis:
8346 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8348 for (; elts0; elts0 = TREE_CHAIN (elts0))
8351 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8353 n_elts = tree_cons (NULL_TREE,
8354 build_int_cst (inner_type, val),
8363 return nreverse (n_elts);
8366 /* Handle TARGET_FOLD_BUILTIN target hook.
8367 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8368 result of the function call is ignored. NULL_TREE is returned if the
8369 function could not be folded. */
8372 sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
8374 tree arg0, arg1, arg2;
8375 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8376 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
8379 && icode != CODE_FOR_alignaddrsi_vis
8380 && icode != CODE_FOR_alignaddrdi_vis)
8381 return fold_convert (rtype, integer_zero_node);
8385 case CODE_FOR_fexpand_vis:
8386 arg0 = TREE_VALUE (arglist);
8389 if (TREE_CODE (arg0) == VECTOR_CST)
8391 tree inner_type = TREE_TYPE (rtype);
8392 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8393 tree n_elts = NULL_TREE;
8395 for (; elts; elts = TREE_CHAIN (elts))
8397 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8398 n_elts = tree_cons (NULL_TREE,
8399 build_int_cst (inner_type, val),
8402 return build_vector (rtype, nreverse (n_elts));
8406 case CODE_FOR_fmul8x16_vis:
8407 case CODE_FOR_fmul8x16au_vis:
8408 case CODE_FOR_fmul8x16al_vis:
8409 arg0 = TREE_VALUE (arglist);
8410 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8414 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8416 tree inner_type = TREE_TYPE (rtype);
8417 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8418 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8419 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
8422 return build_vector (rtype, n_elts);
8426 case CODE_FOR_fpmerge_vis:
8427 arg0 = TREE_VALUE (arglist);
8428 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8432 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8434 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8435 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8436 tree n_elts = NULL_TREE;
8438 for (; elts0 && elts1;
8439 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8441 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8442 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8445 return build_vector (rtype, nreverse (n_elts));
8449 case CODE_FOR_pdist_vis:
8450 arg0 = TREE_VALUE (arglist);
8451 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8452 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8457 if (TREE_CODE (arg0) == VECTOR_CST
8458 && TREE_CODE (arg1) == VECTOR_CST
8459 && TREE_CODE (arg2) == INTEGER_CST)
8462 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8463 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8464 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8465 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8467 for (; elts0 && elts1;
8468 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8470 unsigned HOST_WIDE_INT
8471 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8472 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8473 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8474 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8476 unsigned HOST_WIDE_INT l;
8479 overflow |= neg_double (low1, high1, &l, &h);
8480 overflow |= add_double (low0, high0, l, h, &l, &h);
8482 overflow |= neg_double (l, h, &l, &h);
8484 overflow |= add_double (low, high, l, h, &low, &high);
8487 gcc_assert (overflow == 0);
8489 return build_int_cst_wide (rtype, low, high);
8499 /* ??? This duplicates information provided to the compiler by the
8500 ??? scheduler description. Some day, teach genautomata to output
8501 ??? the latencies and then CSE will just use that. */
8504 sparc_rtx_costs (rtx x, int code, int outer_code, int *total,
8505 bool speed ATTRIBUTE_UNUSED)
8507 enum machine_mode mode = GET_MODE (x);
8508 bool float_mode_p = FLOAT_MODE_P (mode);
8513 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8531 if (GET_MODE (x) == VOIDmode
8532 && ((CONST_DOUBLE_HIGH (x) == 0
8533 && CONST_DOUBLE_LOW (x) < 0x1000)
8534 || (CONST_DOUBLE_HIGH (x) == -1
8535 && CONST_DOUBLE_LOW (x) < 0
8536 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8543 /* If outer-code was a sign or zero extension, a cost
8544 of COSTS_N_INSNS (1) was already added in. This is
8545 why we are subtracting it back out. */
8546 if (outer_code == ZERO_EXTEND)
8548 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8550 else if (outer_code == SIGN_EXTEND)
8552 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8554 else if (float_mode_p)
8556 *total = sparc_costs->float_load;
8560 *total = sparc_costs->int_load;
8568 *total = sparc_costs->float_plusminus;
8570 *total = COSTS_N_INSNS (1);
8575 *total = sparc_costs->float_mul;
8576 else if (! TARGET_HARD_MUL)
8577 *total = COSTS_N_INSNS (25);
8583 if (sparc_costs->int_mul_bit_factor)
8587 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8589 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8590 for (nbits = 0; value != 0; value &= value - 1)
8593 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8594 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8596 rtx x1 = XEXP (x, 1);
8597 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8598 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8600 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8602 for (; value2 != 0; value2 &= value2 - 1)
8610 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8611 bit_cost = COSTS_N_INSNS (bit_cost);
8615 *total = sparc_costs->int_mulX + bit_cost;
8617 *total = sparc_costs->int_mul + bit_cost;
8624 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8634 *total = sparc_costs->float_div_df;
8636 *total = sparc_costs->float_div_sf;
8641 *total = sparc_costs->int_divX;
8643 *total = sparc_costs->int_div;
8650 *total = COSTS_N_INSNS (1);
8657 case UNSIGNED_FLOAT:
8661 case FLOAT_TRUNCATE:
8662 *total = sparc_costs->float_move;
8667 *total = sparc_costs->float_sqrt_df;
8669 *total = sparc_costs->float_sqrt_sf;
8674 *total = sparc_costs->float_cmp;
8676 *total = COSTS_N_INSNS (1);
8681 *total = sparc_costs->float_cmove;
8683 *total = sparc_costs->int_cmove;
8687 /* Handle the NAND vector patterns. */
8688 if (sparc_vector_mode_supported_p (GET_MODE (x))
8689 && GET_CODE (XEXP (x, 0)) == NOT
8690 && GET_CODE (XEXP (x, 1)) == NOT)
8692 *total = COSTS_N_INSNS (1);
8703 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
8704 This is achieved by means of a manual dynamic stack space allocation in
8705 the current frame. We make the assumption that SEQ doesn't contain any
8706 function calls, with the possible exception of calls to the PIC helper. */
8709 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8711 /* We must preserve the lowest 16 words for the register save area. */
8712 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
8713 /* We really need only 2 words of fresh stack space. */
8714 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
8717 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
8718 SPARC_STACK_BIAS + offset));
8720 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
8721 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8723 emit_insn (gen_rtx_SET (VOIDmode,
8724 adjust_address (slot, word_mode, UNITS_PER_WORD),
8728 emit_insn (gen_rtx_SET (VOIDmode,
8730 adjust_address (slot, word_mode, UNITS_PER_WORD)));
8731 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8732 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
8735 /* Output the assembler code for a thunk function. THUNK_DECL is the
8736 declaration for the thunk function itself, FUNCTION is the decl for
8737 the target function. DELTA is an immediate constant offset to be
8738 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8739 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8742 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8743 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8746 rtx this_rtx, insn, funexp;
8747 unsigned int int_arg_first;
8749 reload_completed = 1;
8750 epilogue_completed = 1;
8752 emit_note (NOTE_INSN_PROLOGUE_END);
8754 if (flag_delayed_branch)
8756 /* We will emit a regular sibcall below, so we need to instruct
8757 output_sibcall that we are in a leaf function. */
8758 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8760 /* This will cause final.c to invoke leaf_renumber_regs so we
8761 must behave as if we were in a not-yet-leafified function. */
8762 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8766 /* We will emit the sibcall manually below, so we will need to
8767 manually spill non-leaf registers. */
8768 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8770 /* We really are in a leaf function. */
8771 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8774 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8775 returns a structure, the structure return pointer is there instead. */
8777 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8778 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
8780 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
8782 /* Add DELTA. When possible use a plain add, otherwise load it into
8783 a register first. */
8786 rtx delta_rtx = GEN_INT (delta);
8788 if (! SPARC_SIMM13_P (delta))
8790 rtx scratch = gen_rtx_REG (Pmode, 1);
8791 emit_move_insn (scratch, delta_rtx);
8792 delta_rtx = scratch;
8795 /* THIS_RTX += DELTA. */
8796 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
8799 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
8802 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8803 rtx scratch = gen_rtx_REG (Pmode, 1);
8805 gcc_assert (vcall_offset < 0);
8807 /* SCRATCH = *THIS_RTX. */
8808 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
8810 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8811 may not have any available scratch register at this point. */
8812 if (SPARC_SIMM13_P (vcall_offset))
8814 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8815 else if (! fixed_regs[5]
8816 /* The below sequence is made up of at least 2 insns,
8817 while the default method may need only one. */
8818 && vcall_offset < -8192)
8820 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8821 emit_move_insn (scratch2, vcall_offset_rtx);
8822 vcall_offset_rtx = scratch2;
8826 rtx increment = GEN_INT (-4096);
8828 /* VCALL_OFFSET is a negative number whose typical range can be
8829 estimated as -32768..0 in 32-bit mode. In almost all cases
8830 it is therefore cheaper to emit multiple add insns than
8831 spilling and loading the constant into a register (at least
8833 while (! SPARC_SIMM13_P (vcall_offset))
8835 emit_insn (gen_add2_insn (scratch, increment));
8836 vcall_offset += 4096;
8838 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8841 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
8842 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8843 gen_rtx_PLUS (Pmode,
8845 vcall_offset_rtx)));
8847 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
8848 emit_insn (gen_add2_insn (this_rtx, scratch));
8851 /* Generate a tail call to the target function. */
8852 if (! TREE_USED (function))
8854 assemble_external (function);
8855 TREE_USED (function) = 1;
8857 funexp = XEXP (DECL_RTL (function), 0);
8859 if (flag_delayed_branch)
8861 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8862 insn = emit_call_insn (gen_sibcall (funexp));
8863 SIBLING_CALL_P (insn) = 1;
8867 /* The hoops we have to jump through in order to generate a sibcall
8868 without using delay slots... */
8869 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
8873 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8874 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
8876 /* Delay emitting the PIC helper function because it needs to
8877 change the section and we are emitting assembly code. */
8878 load_pic_register (true); /* clobbers %o7 */
8879 scratch = legitimize_pic_address (funexp, scratch);
8882 emit_and_preserve (seq, spill_reg, spill_reg2);
8884 else if (TARGET_ARCH32)
8886 emit_insn (gen_rtx_SET (VOIDmode,
8888 gen_rtx_HIGH (SImode, funexp)));
8889 emit_insn (gen_rtx_SET (VOIDmode,
8891 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8893 else /* TARGET_ARCH64 */
8895 switch (sparc_cmodel)
8899 /* The destination can serve as a temporary. */
8900 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8905 /* The destination cannot serve as a temporary. */
8906 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8908 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8911 emit_and_preserve (seq, spill_reg, 0);
8919 emit_jump_insn (gen_indirect_jump (scratch));
8924 /* Run just enough of rest_of_compilation to get the insns emitted.
8925 There's not really enough bulk here to make other passes such as
8926 instruction scheduling worth while. Note that use_thunk calls
8927 assemble_start_function and assemble_end_function. */
8928 insn = get_insns ();
8929 insn_locators_alloc ();
8930 shorten_branches (insn);
8931 final_start_function (insn, file, 1);
8932 final (insn, file, 1);
8933 final_end_function ();
8935 reload_completed = 0;
8936 epilogue_completed = 0;
8939 /* Return true if sparc_output_mi_thunk would be able to output the
8940 assembler code for the thunk function specified by the arguments
8941 it is passed, and false otherwise. */
8943 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
8944 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8945 HOST_WIDE_INT vcall_offset,
8946 const_tree function ATTRIBUTE_UNUSED)
8948 /* Bound the loop used in the default method above. */
8949 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8952 /* How to allocate a 'struct machine_function'. */
8954 static struct machine_function *
8955 sparc_init_machine_status (void)
8957 return GGC_CNEW (struct machine_function);
8960 /* Locate some local-dynamic symbol still in use by this function
8961 so that we can print its name in local-dynamic base patterns. */
8964 get_some_local_dynamic_name (void)
8968 if (cfun->machine->some_ld_name)
8969 return cfun->machine->some_ld_name;
8971 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8973 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8974 return cfun->machine->some_ld_name;
8980 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8985 && GET_CODE (x) == SYMBOL_REF
8986 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8988 cfun->machine->some_ld_name = XSTR (x, 0);
8995 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
8996 This is called from dwarf2out.c to emit call frame instructions
8997 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
8999 sparc_dwarf_handle_frame_unspec (const char *label,
9000 rtx pattern ATTRIBUTE_UNUSED,
9001 int index ATTRIBUTE_UNUSED)
9003 gcc_assert (index == UNSPECV_SAVEW);
9004 dwarf2out_window_save (label);
9007 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
9008 We need to emit DTP-relative relocations. */
9011 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
9016 fputs ("\t.word\t%r_tls_dtpoff32(", file);
9019 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
9024 output_addr_const (file, x);
9028 /* Do whatever processing is required at the end of a file. */
9031 sparc_file_end (void)
9033 /* If we haven't emitted the special PIC helper function, do so now. */
9034 if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
9037 if (NEED_INDICATE_EXEC_STACK)
9038 file_end_indicate_exec_stack ();
9041 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9042 /* Implement TARGET_MANGLE_TYPE. */
9045 sparc_mangle_type (const_tree type)
9048 && TYPE_MAIN_VARIANT (type) == long_double_type_node
9049 && TARGET_LONG_DOUBLE_128)
9052 /* For all other types, use normal C++ mangling. */
9057 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
9058 compare and swap on the word containing the byte or half-word. */
9061 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
9063 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
9064 rtx addr = gen_reg_rtx (Pmode);
9065 rtx off = gen_reg_rtx (SImode);
9066 rtx oldv = gen_reg_rtx (SImode);
9067 rtx newv = gen_reg_rtx (SImode);
9068 rtx oldvalue = gen_reg_rtx (SImode);
9069 rtx newvalue = gen_reg_rtx (SImode);
9070 rtx res = gen_reg_rtx (SImode);
9071 rtx resv = gen_reg_rtx (SImode);
9072 rtx memsi, val, mask, end_label, loop_label, cc;
9074 emit_insn (gen_rtx_SET (VOIDmode, addr,
9075 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
9077 if (Pmode != SImode)
9078 addr1 = gen_lowpart (SImode, addr1);
9079 emit_insn (gen_rtx_SET (VOIDmode, off,
9080 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
9082 memsi = gen_rtx_MEM (SImode, addr);
9083 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
9084 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
9086 val = force_reg (SImode, memsi);
9088 emit_insn (gen_rtx_SET (VOIDmode, off,
9089 gen_rtx_XOR (SImode, off,
9090 GEN_INT (GET_MODE (mem) == QImode
9093 emit_insn (gen_rtx_SET (VOIDmode, off,
9094 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
9096 if (GET_MODE (mem) == QImode)
9097 mask = force_reg (SImode, GEN_INT (0xff));
9099 mask = force_reg (SImode, GEN_INT (0xffff));
9101 emit_insn (gen_rtx_SET (VOIDmode, mask,
9102 gen_rtx_ASHIFT (SImode, mask, off)));
9104 emit_insn (gen_rtx_SET (VOIDmode, val,
9105 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9108 oldval = gen_lowpart (SImode, oldval);
9109 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9110 gen_rtx_ASHIFT (SImode, oldval, off)));
9112 newval = gen_lowpart_common (SImode, newval);
9113 emit_insn (gen_rtx_SET (VOIDmode, newv,
9114 gen_rtx_ASHIFT (SImode, newval, off)));
9116 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9117 gen_rtx_AND (SImode, oldv, mask)));
9119 emit_insn (gen_rtx_SET (VOIDmode, newv,
9120 gen_rtx_AND (SImode, newv, mask)));
9122 end_label = gen_label_rtx ();
9123 loop_label = gen_label_rtx ();
9124 emit_label (loop_label);
9126 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
9127 gen_rtx_IOR (SImode, oldv, val)));
9129 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
9130 gen_rtx_IOR (SImode, newv, val)));
9132 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
9134 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
9136 emit_insn (gen_rtx_SET (VOIDmode, resv,
9137 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9140 cc = gen_compare_reg_1 (NE, resv, val);
9141 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
9143 /* Use cbranchcc4 to separate the compare and branch! */
9144 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
9145 cc, const0_rtx, loop_label));
9147 emit_label (end_label);
9149 emit_insn (gen_rtx_SET (VOIDmode, res,
9150 gen_rtx_AND (SImode, res, mask)));
9152 emit_insn (gen_rtx_SET (VOIDmode, res,
9153 gen_rtx_LSHIFTRT (SImode, res, off)));
9155 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
9158 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
9161 sparc_frame_pointer_required (void)
9163 return !(leaf_function_p () && only_leaf_regs_used ());
9166 /* The way this is structured, we can't eliminate SFP in favor of SP
9167 if the frame pointer is required: we want to use the SFP->HFP elimination
9168 in that case. But the test in update_eliminables doesn't know we are
9169 assuming below that we only do the former elimination. */
9172 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
9174 return (to == HARD_FRAME_POINTER_REGNUM
9175 || !targetm.frame_pointer_required ());
9178 #include "gt-sparc.h"