1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
38 #include "insn-attr.h"
49 #include "target-def.h"
50 #include "cfglayout.h"
52 #include "langhooks.h"
58 struct processor_costs cypress_costs = {
59 COSTS_N_INSNS (2), /* int load */
60 COSTS_N_INSNS (2), /* int signed load */
61 COSTS_N_INSNS (2), /* int zeroed load */
62 COSTS_N_INSNS (2), /* float load */
63 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
64 COSTS_N_INSNS (5), /* fadd, fsub */
65 COSTS_N_INSNS (1), /* fcmp */
66 COSTS_N_INSNS (1), /* fmov, fmovr */
67 COSTS_N_INSNS (7), /* fmul */
68 COSTS_N_INSNS (37), /* fdivs */
69 COSTS_N_INSNS (37), /* fdivd */
70 COSTS_N_INSNS (63), /* fsqrts */
71 COSTS_N_INSNS (63), /* fsqrtd */
72 COSTS_N_INSNS (1), /* imul */
73 COSTS_N_INSNS (1), /* imulX */
74 0, /* imul bit factor */
75 COSTS_N_INSNS (1), /* idiv */
76 COSTS_N_INSNS (1), /* idivX */
77 COSTS_N_INSNS (1), /* movcc/movr */
78 0, /* shift penalty */
82 struct processor_costs supersparc_costs = {
83 COSTS_N_INSNS (1), /* int load */
84 COSTS_N_INSNS (1), /* int signed load */
85 COSTS_N_INSNS (1), /* int zeroed load */
86 COSTS_N_INSNS (0), /* float load */
87 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
88 COSTS_N_INSNS (3), /* fadd, fsub */
89 COSTS_N_INSNS (3), /* fcmp */
90 COSTS_N_INSNS (1), /* fmov, fmovr */
91 COSTS_N_INSNS (3), /* fmul */
92 COSTS_N_INSNS (6), /* fdivs */
93 COSTS_N_INSNS (9), /* fdivd */
94 COSTS_N_INSNS (12), /* fsqrts */
95 COSTS_N_INSNS (12), /* fsqrtd */
96 COSTS_N_INSNS (4), /* imul */
97 COSTS_N_INSNS (4), /* imulX */
98 0, /* imul bit factor */
99 COSTS_N_INSNS (4), /* idiv */
100 COSTS_N_INSNS (4), /* idivX */
101 COSTS_N_INSNS (1), /* movcc/movr */
102 1, /* shift penalty */
106 struct processor_costs hypersparc_costs = {
107 COSTS_N_INSNS (1), /* int load */
108 COSTS_N_INSNS (1), /* int signed load */
109 COSTS_N_INSNS (1), /* int zeroed load */
110 COSTS_N_INSNS (1), /* float load */
111 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
112 COSTS_N_INSNS (1), /* fadd, fsub */
113 COSTS_N_INSNS (1), /* fcmp */
114 COSTS_N_INSNS (1), /* fmov, fmovr */
115 COSTS_N_INSNS (1), /* fmul */
116 COSTS_N_INSNS (8), /* fdivs */
117 COSTS_N_INSNS (12), /* fdivd */
118 COSTS_N_INSNS (17), /* fsqrts */
119 COSTS_N_INSNS (17), /* fsqrtd */
120 COSTS_N_INSNS (17), /* imul */
121 COSTS_N_INSNS (17), /* imulX */
122 0, /* imul bit factor */
123 COSTS_N_INSNS (17), /* idiv */
124 COSTS_N_INSNS (17), /* idivX */
125 COSTS_N_INSNS (1), /* movcc/movr */
126 0, /* shift penalty */
130 struct processor_costs sparclet_costs = {
131 COSTS_N_INSNS (3), /* int load */
132 COSTS_N_INSNS (3), /* int signed load */
133 COSTS_N_INSNS (1), /* int zeroed load */
134 COSTS_N_INSNS (1), /* float load */
135 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
136 COSTS_N_INSNS (1), /* fadd, fsub */
137 COSTS_N_INSNS (1), /* fcmp */
138 COSTS_N_INSNS (1), /* fmov, fmovr */
139 COSTS_N_INSNS (1), /* fmul */
140 COSTS_N_INSNS (1), /* fdivs */
141 COSTS_N_INSNS (1), /* fdivd */
142 COSTS_N_INSNS (1), /* fsqrts */
143 COSTS_N_INSNS (1), /* fsqrtd */
144 COSTS_N_INSNS (5), /* imul */
145 COSTS_N_INSNS (5), /* imulX */
146 0, /* imul bit factor */
147 COSTS_N_INSNS (5), /* idiv */
148 COSTS_N_INSNS (5), /* idivX */
149 COSTS_N_INSNS (1), /* movcc/movr */
150 0, /* shift penalty */
154 struct processor_costs ultrasparc_costs = {
155 COSTS_N_INSNS (2), /* int load */
156 COSTS_N_INSNS (3), /* int signed load */
157 COSTS_N_INSNS (2), /* int zeroed load */
158 COSTS_N_INSNS (2), /* float load */
159 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
160 COSTS_N_INSNS (4), /* fadd, fsub */
161 COSTS_N_INSNS (1), /* fcmp */
162 COSTS_N_INSNS (2), /* fmov, fmovr */
163 COSTS_N_INSNS (4), /* fmul */
164 COSTS_N_INSNS (13), /* fdivs */
165 COSTS_N_INSNS (23), /* fdivd */
166 COSTS_N_INSNS (13), /* fsqrts */
167 COSTS_N_INSNS (23), /* fsqrtd */
168 COSTS_N_INSNS (4), /* imul */
169 COSTS_N_INSNS (4), /* imulX */
170 2, /* imul bit factor */
171 COSTS_N_INSNS (37), /* idiv */
172 COSTS_N_INSNS (68), /* idivX */
173 COSTS_N_INSNS (2), /* movcc/movr */
174 2, /* shift penalty */
178 struct processor_costs ultrasparc3_costs = {
179 COSTS_N_INSNS (2), /* int load */
180 COSTS_N_INSNS (3), /* int signed load */
181 COSTS_N_INSNS (3), /* int zeroed load */
182 COSTS_N_INSNS (2), /* float load */
183 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
184 COSTS_N_INSNS (4), /* fadd, fsub */
185 COSTS_N_INSNS (5), /* fcmp */
186 COSTS_N_INSNS (3), /* fmov, fmovr */
187 COSTS_N_INSNS (4), /* fmul */
188 COSTS_N_INSNS (17), /* fdivs */
189 COSTS_N_INSNS (20), /* fdivd */
190 COSTS_N_INSNS (20), /* fsqrts */
191 COSTS_N_INSNS (29), /* fsqrtd */
192 COSTS_N_INSNS (6), /* imul */
193 COSTS_N_INSNS (6), /* imulX */
194 0, /* imul bit factor */
195 COSTS_N_INSNS (40), /* idiv */
196 COSTS_N_INSNS (71), /* idivX */
197 COSTS_N_INSNS (2), /* movcc/movr */
198 0, /* shift penalty */
202 struct processor_costs niagara_costs = {
203 COSTS_N_INSNS (3), /* int load */
204 COSTS_N_INSNS (3), /* int signed load */
205 COSTS_N_INSNS (3), /* int zeroed load */
206 COSTS_N_INSNS (9), /* float load */
207 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
208 COSTS_N_INSNS (8), /* fadd, fsub */
209 COSTS_N_INSNS (26), /* fcmp */
210 COSTS_N_INSNS (8), /* fmov, fmovr */
211 COSTS_N_INSNS (29), /* fmul */
212 COSTS_N_INSNS (54), /* fdivs */
213 COSTS_N_INSNS (83), /* fdivd */
214 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
215 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
216 COSTS_N_INSNS (11), /* imul */
217 COSTS_N_INSNS (11), /* imulX */
218 0, /* imul bit factor */
219 COSTS_N_INSNS (72), /* idiv */
220 COSTS_N_INSNS (72), /* idivX */
221 COSTS_N_INSNS (1), /* movcc/movr */
222 0, /* shift penalty */
226 struct processor_costs niagara2_costs = {
227 COSTS_N_INSNS (3), /* int load */
228 COSTS_N_INSNS (3), /* int signed load */
229 COSTS_N_INSNS (3), /* int zeroed load */
230 COSTS_N_INSNS (3), /* float load */
231 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
232 COSTS_N_INSNS (6), /* fadd, fsub */
233 COSTS_N_INSNS (6), /* fcmp */
234 COSTS_N_INSNS (6), /* fmov, fmovr */
235 COSTS_N_INSNS (6), /* fmul */
236 COSTS_N_INSNS (19), /* fdivs */
237 COSTS_N_INSNS (33), /* fdivd */
238 COSTS_N_INSNS (19), /* fsqrts */
239 COSTS_N_INSNS (33), /* fsqrtd */
240 COSTS_N_INSNS (5), /* imul */
241 COSTS_N_INSNS (5), /* imulX */
242 0, /* imul bit factor */
243 COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
244 COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
245 COSTS_N_INSNS (1), /* movcc/movr */
246 0, /* shift penalty */
249 const struct processor_costs *sparc_costs = &cypress_costs;
251 #ifdef HAVE_AS_RELAX_OPTION
252 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
253 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
254 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
255 somebody does not branch between the sethi and jmp. */
256 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
258 #define LEAF_SIBCALL_SLOT_RESERVED_P \
259 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
262 /* Global variables for machine-dependent things. */
264 /* Size of frame. Need to know this to emit return insns from leaf procedures.
265 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
266 reload pass. This is important as the value is later used for scheduling
267 (to see what can go in a delay slot).
268 APPARENT_FSIZE is the size of the stack less the register save area and less
269 the outgoing argument area. It is used when saving call preserved regs. */
270 static HOST_WIDE_INT apparent_fsize;
271 static HOST_WIDE_INT actual_fsize;
273 /* Number of live general or floating point registers needed to be
274 saved (as 4-byte quantities). */
275 static int num_gfregs;
277 /* The alias set for prologue/epilogue register save/restore. */
278 static GTY(()) alias_set_type sparc_sr_alias_set;
280 /* The alias set for the structure return value. */
281 static GTY(()) alias_set_type struct_value_alias_set;
283 /* Vector to say how input registers are mapped to output registers.
284 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
285 eliminate it. You must use -fomit-frame-pointer to get that. */
286 char leaf_reg_remap[] =
287 { 0, 1, 2, 3, 4, 5, 6, 7,
288 -1, -1, -1, -1, -1, -1, 14, -1,
289 -1, -1, -1, -1, -1, -1, -1, -1,
290 8, 9, 10, 11, 12, 13, -1, 15,
292 32, 33, 34, 35, 36, 37, 38, 39,
293 40, 41, 42, 43, 44, 45, 46, 47,
294 48, 49, 50, 51, 52, 53, 54, 55,
295 56, 57, 58, 59, 60, 61, 62, 63,
296 64, 65, 66, 67, 68, 69, 70, 71,
297 72, 73, 74, 75, 76, 77, 78, 79,
298 80, 81, 82, 83, 84, 85, 86, 87,
299 88, 89, 90, 91, 92, 93, 94, 95,
300 96, 97, 98, 99, 100};
302 /* Vector, indexed by hard register number, which contains 1
303 for a register that is allowable in a candidate for leaf
304 function treatment. */
305 char sparc_leaf_regs[] =
306 { 1, 1, 1, 1, 1, 1, 1, 1,
307 0, 0, 0, 0, 0, 0, 1, 0,
308 0, 0, 0, 0, 0, 0, 0, 0,
309 1, 1, 1, 1, 1, 1, 0, 1,
310 1, 1, 1, 1, 1, 1, 1, 1,
311 1, 1, 1, 1, 1, 1, 1, 1,
312 1, 1, 1, 1, 1, 1, 1, 1,
313 1, 1, 1, 1, 1, 1, 1, 1,
314 1, 1, 1, 1, 1, 1, 1, 1,
315 1, 1, 1, 1, 1, 1, 1, 1,
316 1, 1, 1, 1, 1, 1, 1, 1,
317 1, 1, 1, 1, 1, 1, 1, 1,
320 struct GTY(()) machine_function
322 /* Some local-dynamic TLS symbol name. */
323 const char *some_ld_name;
325 /* True if the current function is leaf and uses only leaf regs,
326 so that the SPARC leaf function optimization can be applied.
327 Private version of current_function_uses_only_leaf_regs, see
328 sparc_expand_prologue for the rationale. */
331 /* True if the data calculated by sparc_expand_prologue are valid. */
332 bool prologue_data_valid_p;
335 #define sparc_leaf_function_p cfun->machine->leaf_function_p
336 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
338 /* Register we pretend to think the frame pointer is allocated to.
339 Normally, this is %fp, but if we are in a leaf procedure, this
340 is %sp+"something". We record "something" separately as it may
341 be too big for reg+constant addressing. */
342 static rtx frame_base_reg;
343 static HOST_WIDE_INT frame_base_offset;
345 /* 1 if the next opcode is to be specially indented. */
346 int sparc_indent_opcode = 0;
348 static bool sparc_handle_option (size_t, const char *, int);
349 static void sparc_init_modes (void);
350 static void scan_record_type (tree, int *, int *, int *);
351 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
352 tree, int, int, int *, int *);
354 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
355 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
357 static void sparc_output_addr_vec (rtx);
358 static void sparc_output_addr_diff_vec (rtx);
359 static void sparc_output_deferred_case_vectors (void);
360 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
361 static rtx sparc_builtin_saveregs (void);
362 static int epilogue_renumber (rtx *, int);
363 static bool sparc_assemble_integer (rtx, unsigned int, int);
364 static int set_extends (rtx);
365 static void emit_pic_helper (void);
366 static void load_pic_register (bool);
367 static int save_or_restore_regs (int, int, rtx, int, int);
368 static void emit_save_or_restore_regs (int);
369 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
370 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
371 #ifdef OBJECT_FORMAT_ELF
372 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
375 static int sparc_adjust_cost (rtx, rtx, rtx, int);
376 static int sparc_issue_rate (void);
377 static void sparc_sched_init (FILE *, int, int);
378 static int sparc_use_sched_lookahead (void);
380 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
381 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
382 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
383 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
384 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
386 static bool sparc_function_ok_for_sibcall (tree, tree);
387 static void sparc_init_libfuncs (void);
388 static void sparc_init_builtins (void);
389 static void sparc_vis_init_builtins (void);
390 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
391 static tree sparc_fold_builtin (tree, tree, bool);
392 static int sparc_vis_mul8x16 (int, int);
393 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
394 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
395 HOST_WIDE_INT, tree);
396 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
397 HOST_WIDE_INT, const_tree);
398 static struct machine_function * sparc_init_machine_status (void);
399 static bool sparc_cannot_force_const_mem (rtx);
400 static rtx sparc_tls_get_addr (void);
401 static rtx sparc_tls_got (void);
402 static const char *get_some_local_dynamic_name (void);
403 static int get_some_local_dynamic_name_1 (rtx *, void *);
404 static bool sparc_rtx_costs (rtx, int, int, int *, bool);
405 static bool sparc_promote_prototypes (const_tree);
406 static rtx sparc_struct_value_rtx (tree, int);
407 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
408 int *, const_tree, int);
409 static bool sparc_return_in_memory (const_tree, const_tree);
410 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
411 static void sparc_va_start (tree, rtx);
412 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
413 static bool sparc_vector_mode_supported_p (enum machine_mode);
414 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
415 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
416 enum machine_mode, const_tree, bool);
417 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
418 enum machine_mode, tree, bool);
419 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
420 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
421 static void sparc_file_end (void);
422 static bool sparc_frame_pointer_required (void);
423 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
424 static const char *sparc_mangle_type (const_tree);
427 #ifdef SUBTARGET_ATTRIBUTE_TABLE
428 /* Table of valid machine attributes. */
429 static const struct attribute_spec sparc_attribute_table[] =
431 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
432 SUBTARGET_ATTRIBUTE_TABLE,
433 { NULL, 0, 0, false, false, false, NULL }
437 /* Option handling. */
440 enum cmodel sparc_cmodel;
442 char sparc_hard_reg_printed[8];
444 struct sparc_cpu_select sparc_select[] =
446 /* switch name, tune arch */
447 { (char *)0, "default", 1, 1 },
448 { (char *)0, "-mcpu=", 1, 1 },
449 { (char *)0, "-mtune=", 1, 0 },
453 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
454 enum processor_type sparc_cpu;
456 /* Whether
\fan FPU option was specified. */
457 static bool fpu_option_set = false;
459 /* Initialize the GCC target structure. */
461 /* The sparc default is to use .half rather than .short for aligned
462 HI objects. Use .word instead of .long on non-ELF systems. */
463 #undef TARGET_ASM_ALIGNED_HI_OP
464 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
465 #ifndef OBJECT_FORMAT_ELF
466 #undef TARGET_ASM_ALIGNED_SI_OP
467 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
470 #undef TARGET_ASM_UNALIGNED_HI_OP
471 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
472 #undef TARGET_ASM_UNALIGNED_SI_OP
473 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
474 #undef TARGET_ASM_UNALIGNED_DI_OP
475 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
477 /* The target hook has to handle DI-mode values. */
478 #undef TARGET_ASM_INTEGER
479 #define TARGET_ASM_INTEGER sparc_assemble_integer
481 #undef TARGET_ASM_FUNCTION_PROLOGUE
482 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
483 #undef TARGET_ASM_FUNCTION_EPILOGUE
484 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
486 #undef TARGET_SCHED_ADJUST_COST
487 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
488 #undef TARGET_SCHED_ISSUE_RATE
489 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
490 #undef TARGET_SCHED_INIT
491 #define TARGET_SCHED_INIT sparc_sched_init
492 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
493 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
495 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
496 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
498 #undef TARGET_INIT_LIBFUNCS
499 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
500 #undef TARGET_INIT_BUILTINS
501 #define TARGET_INIT_BUILTINS sparc_init_builtins
503 #undef TARGET_LEGITIMIZE_ADDRESS
504 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
506 #undef TARGET_EXPAND_BUILTIN
507 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
508 #undef TARGET_FOLD_BUILTIN
509 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
512 #undef TARGET_HAVE_TLS
513 #define TARGET_HAVE_TLS true
516 #undef TARGET_CANNOT_FORCE_CONST_MEM
517 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
519 #undef TARGET_ASM_OUTPUT_MI_THUNK
520 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
521 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
522 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
524 #undef TARGET_RTX_COSTS
525 #define TARGET_RTX_COSTS sparc_rtx_costs
526 #undef TARGET_ADDRESS_COST
527 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
529 #undef TARGET_PROMOTE_FUNCTION_MODE
530 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
532 #undef TARGET_PROMOTE_PROTOTYPES
533 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
535 #undef TARGET_STRUCT_VALUE_RTX
536 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
537 #undef TARGET_RETURN_IN_MEMORY
538 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
539 #undef TARGET_MUST_PASS_IN_STACK
540 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
541 #undef TARGET_PASS_BY_REFERENCE
542 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
543 #undef TARGET_ARG_PARTIAL_BYTES
544 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
546 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
547 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
548 #undef TARGET_STRICT_ARGUMENT_NAMING
549 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
551 #undef TARGET_EXPAND_BUILTIN_VA_START
552 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
553 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
554 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
556 #undef TARGET_VECTOR_MODE_SUPPORTED_P
557 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
559 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
560 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
562 #ifdef SUBTARGET_INSERT_ATTRIBUTES
563 #undef TARGET_INSERT_ATTRIBUTES
564 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
567 #ifdef SUBTARGET_ATTRIBUTE_TABLE
568 #undef TARGET_ATTRIBUTE_TABLE
569 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
572 #undef TARGET_RELAXED_ORDERING
573 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
575 #undef TARGET_DEFAULT_TARGET_FLAGS
576 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
577 #undef TARGET_HANDLE_OPTION
578 #define TARGET_HANDLE_OPTION sparc_handle_option
580 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
581 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
582 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
585 #undef TARGET_ASM_FILE_END
586 #define TARGET_ASM_FILE_END sparc_file_end
588 #undef TARGET_FRAME_POINTER_REQUIRED
589 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
591 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
592 #undef TARGET_MANGLE_TYPE
593 #define TARGET_MANGLE_TYPE sparc_mangle_type
596 #undef TARGET_LEGITIMATE_ADDRESS_P
597 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
599 struct gcc_target targetm = TARGET_INITIALIZER;
601 /* Implement TARGET_HANDLE_OPTION. */
604 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
609 case OPT_mhard_float:
610 case OPT_msoft_float:
611 fpu_option_set = true;
615 sparc_select[1].string = arg;
619 sparc_select[2].string = arg;
626 /* Validate and override various options, and do some machine dependent
630 sparc_override_options (void)
632 static struct code_model {
633 const char *const name;
634 const enum cmodel value;
635 } const cmodels[] = {
637 { "medlow", CM_MEDLOW },
638 { "medmid", CM_MEDMID },
639 { "medany", CM_MEDANY },
640 { "embmedany", CM_EMBMEDANY },
641 { NULL, (enum cmodel) 0 }
643 const struct code_model *cmodel;
644 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
645 static struct cpu_default {
647 const char *const name;
648 } const cpu_default[] = {
649 /* There must be one entry here for each TARGET_CPU value. */
650 { TARGET_CPU_sparc, "cypress" },
651 { TARGET_CPU_sparclet, "tsc701" },
652 { TARGET_CPU_sparclite, "f930" },
653 { TARGET_CPU_v8, "v8" },
654 { TARGET_CPU_hypersparc, "hypersparc" },
655 { TARGET_CPU_sparclite86x, "sparclite86x" },
656 { TARGET_CPU_supersparc, "supersparc" },
657 { TARGET_CPU_v9, "v9" },
658 { TARGET_CPU_ultrasparc, "ultrasparc" },
659 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
660 { TARGET_CPU_niagara, "niagara" },
661 { TARGET_CPU_niagara2, "niagara2" },
664 const struct cpu_default *def;
665 /* Table of values for -m{cpu,tune}=. */
666 static struct cpu_table {
667 const char *const name;
668 const enum processor_type processor;
671 } const cpu_table[] = {
672 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
673 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
674 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
675 /* TI TMS390Z55 supersparc */
676 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
677 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
678 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
679 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
680 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
681 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
682 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
683 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
685 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
687 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
688 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
689 /* TI ultrasparc I, II, IIi */
690 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
691 /* Although insns using %y are deprecated, it is a clear win on current
693 |MASK_DEPRECATED_V8_INSNS},
694 /* TI ultrasparc III */
695 /* ??? Check if %y issue still holds true in ultra3. */
696 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
698 { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
699 { "niagara2", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9},
700 { 0, (enum processor_type) 0, 0, 0 }
702 const struct cpu_table *cpu;
703 const struct sparc_cpu_select *sel;
706 #ifndef SPARC_BI_ARCH
707 /* Check for unsupported architecture size. */
708 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
709 error ("%s is not supported by this configuration",
710 DEFAULT_ARCH32_P ? "-m64" : "-m32");
713 /* We force all 64bit archs to use 128 bit long double */
714 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
716 error ("-mlong-double-64 not allowed with -m64");
717 target_flags |= MASK_LONG_DOUBLE_128;
720 /* Code model selection. */
721 sparc_cmodel = SPARC_DEFAULT_CMODEL;
725 sparc_cmodel = CM_32;
728 if (sparc_cmodel_string != NULL)
732 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
733 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
735 if (cmodel->name == NULL)
736 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
738 sparc_cmodel = cmodel->value;
741 error ("-mcmodel= is not supported on 32 bit systems");
744 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
746 /* Set the default CPU. */
747 for (def = &cpu_default[0]; def->name; ++def)
748 if (def->cpu == TARGET_CPU_DEFAULT)
750 gcc_assert (def->name);
751 sparc_select[0].string = def->name;
753 for (sel = &sparc_select[0]; sel->name; ++sel)
757 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
758 if (! strcmp (sel->string, cpu->name))
761 sparc_cpu = cpu->processor;
765 target_flags &= ~cpu->disable;
766 target_flags |= cpu->enable;
772 error ("bad value (%s) for %s switch", sel->string, sel->name);
776 /* If -mfpu or -mno-fpu was explicitly used, don't override with
777 the processor default. */
779 target_flags = (target_flags & ~MASK_FPU) | fpu;
781 /* Don't allow -mvis if FPU is disabled. */
783 target_flags &= ~MASK_VIS;
785 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
787 -m64 also implies v9. */
788 if (TARGET_VIS || TARGET_ARCH64)
790 target_flags |= MASK_V9;
791 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
794 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
795 if (TARGET_V9 && TARGET_ARCH32)
796 target_flags |= MASK_DEPRECATED_V8_INSNS;
798 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
799 if (! TARGET_V9 || TARGET_ARCH64)
800 target_flags &= ~MASK_V8PLUS;
802 /* Don't use stack biasing in 32 bit mode. */
804 target_flags &= ~MASK_STACK_BIAS;
806 /* Supply a default value for align_functions. */
807 if (align_functions == 0
808 && (sparc_cpu == PROCESSOR_ULTRASPARC
809 || sparc_cpu == PROCESSOR_ULTRASPARC3
810 || sparc_cpu == PROCESSOR_NIAGARA
811 || sparc_cpu == PROCESSOR_NIAGARA2))
812 align_functions = 32;
814 /* Validate PCC_STRUCT_RETURN. */
815 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
816 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
818 /* Only use .uaxword when compiling for a 64-bit target. */
820 targetm.asm_out.unaligned_op.di = NULL;
822 /* Do various machine dependent initializations. */
825 /* Acquire unique alias sets for our private stuff. */
826 sparc_sr_alias_set = new_alias_set ();
827 struct_value_alias_set = new_alias_set ();
829 /* Set up function hooks. */
830 init_machine_status = sparc_init_machine_status;
835 case PROCESSOR_CYPRESS:
836 sparc_costs = &cypress_costs;
839 case PROCESSOR_SPARCLITE:
840 case PROCESSOR_SUPERSPARC:
841 sparc_costs = &supersparc_costs;
845 case PROCESSOR_HYPERSPARC:
846 case PROCESSOR_SPARCLITE86X:
847 sparc_costs = &hypersparc_costs;
849 case PROCESSOR_SPARCLET:
850 case PROCESSOR_TSC701:
851 sparc_costs = &sparclet_costs;
854 case PROCESSOR_ULTRASPARC:
855 sparc_costs = &ultrasparc_costs;
857 case PROCESSOR_ULTRASPARC3:
858 sparc_costs = &ultrasparc3_costs;
860 case PROCESSOR_NIAGARA:
861 sparc_costs = &niagara_costs;
863 case PROCESSOR_NIAGARA2:
864 sparc_costs = &niagara2_costs;
868 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
869 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
870 target_flags |= MASK_LONG_DOUBLE_128;
873 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
874 set_param_value ("simultaneous-prefetches",
875 ((sparc_cpu == PROCESSOR_ULTRASPARC
876 || sparc_cpu == PROCESSOR_NIAGARA
877 || sparc_cpu == PROCESSOR_NIAGARA2)
879 : (sparc_cpu == PROCESSOR_ULTRASPARC3
881 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
882 set_param_value ("l1-cache-line-size",
883 ((sparc_cpu == PROCESSOR_ULTRASPARC
884 || sparc_cpu == PROCESSOR_ULTRASPARC3
885 || sparc_cpu == PROCESSOR_NIAGARA
886 || sparc_cpu == PROCESSOR_NIAGARA2)
890 /* Miscellaneous utilities. */
892 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
893 or branch on register contents instructions. */
896 v9_regcmp_p (enum rtx_code code)
898 return (code == EQ || code == NE || code == GE || code == LT
899 || code == LE || code == GT);
902 /* Nonzero if OP is a floating point constant which can
903 be loaded into an integer register using a single
904 sethi instruction. */
909 if (GET_CODE (op) == CONST_DOUBLE)
914 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
915 REAL_VALUE_TO_TARGET_SINGLE (r, i);
916 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
922 /* Nonzero if OP is a floating point constant which can
923 be loaded into an integer register using a single
929 if (GET_CODE (op) == CONST_DOUBLE)
934 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
935 REAL_VALUE_TO_TARGET_SINGLE (r, i);
936 return SPARC_SIMM13_P (i);
942 /* Nonzero if OP is a floating point constant which can
943 be loaded into an integer register using a high/losum
944 instruction sequence. */
947 fp_high_losum_p (rtx op)
949 /* The constraints calling this should only be in
950 SFmode move insns, so any constant which cannot
951 be moved using a single insn will do. */
952 if (GET_CODE (op) == CONST_DOUBLE)
957 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
958 REAL_VALUE_TO_TARGET_SINGLE (r, i);
959 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
965 /* Expand a move instruction. Return true if all work is done. */
968 sparc_expand_move (enum machine_mode mode, rtx *operands)
970 /* Handle sets of MEM first. */
971 if (GET_CODE (operands[0]) == MEM)
973 /* 0 is a register (or a pair of registers) on SPARC. */
974 if (register_or_zero_operand (operands[1], mode))
977 if (!reload_in_progress)
979 operands[0] = validize_mem (operands[0]);
980 operands[1] = force_reg (mode, operands[1]);
984 /* Fixup TLS cases. */
986 && CONSTANT_P (operands[1])
987 && GET_CODE (operands[1]) != HIGH
988 && sparc_tls_referenced_p (operands [1]))
990 rtx sym = operands[1];
993 if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == PLUS)
995 addend = XEXP (XEXP (sym, 0), 1);
996 sym = XEXP (XEXP (sym, 0), 0);
999 gcc_assert (SPARC_SYMBOL_REF_TLS_P (sym));
1001 sym = legitimize_tls_address (sym);
1004 sym = gen_rtx_PLUS (mode, sym, addend);
1005 sym = force_operand (sym, operands[0]);
1010 /* Fixup PIC cases. */
1011 if (flag_pic && CONSTANT_P (operands[1]))
1013 if (pic_address_needs_scratch (operands[1]))
1014 operands[1] = legitimize_pic_address (operands[1], mode, 0);
1016 /* VxWorks does not impose a fixed gap between segments; the run-time
1017 gap can be different from the object-file gap. We therefore can't
1018 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1019 are absolutely sure that X is in the same segment as the GOT.
1020 Unfortunately, the flexibility of linker scripts means that we
1021 can't be sure of that in general, so assume that _G_O_T_-relative
1022 accesses are never valid on VxWorks. */
1023 if (GET_CODE (operands[1]) == LABEL_REF && !TARGET_VXWORKS_RTP)
1027 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1033 gcc_assert (TARGET_ARCH64);
1034 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1039 if (symbolic_operand (operands[1], mode))
1041 operands[1] = legitimize_pic_address (operands[1],
1043 (reload_in_progress ?
1050 /* If we are trying to toss an integer constant into FP registers,
1051 or loading a FP or vector constant, force it into memory. */
1052 if (CONSTANT_P (operands[1])
1053 && REG_P (operands[0])
1054 && (SPARC_FP_REG_P (REGNO (operands[0]))
1055 || SCALAR_FLOAT_MODE_P (mode)
1056 || VECTOR_MODE_P (mode)))
1058 /* emit_group_store will send such bogosity to us when it is
1059 not storing directly into memory. So fix this up to avoid
1060 crashes in output_constant_pool. */
1061 if (operands [1] == const0_rtx)
1062 operands[1] = CONST0_RTX (mode);
1064 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1065 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1066 && const_zero_operand (operands[1], mode))
1069 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1070 /* We are able to build any SF constant in integer registers
1071 with at most 2 instructions. */
1073 /* And any DF constant in integer registers. */
1075 && (reload_completed || reload_in_progress))))
1078 operands[1] = force_const_mem (mode, operands[1]);
1079 if (!reload_in_progress)
1080 operands[1] = validize_mem (operands[1]);
1084 /* Accept non-constants and valid constants unmodified. */
1085 if (!CONSTANT_P (operands[1])
1086 || GET_CODE (operands[1]) == HIGH
1087 || input_operand (operands[1], mode))
1093 /* All QImode constants require only one insn, so proceed. */
1098 sparc_emit_set_const32 (operands[0], operands[1]);
1102 /* input_operand should have filtered out 32-bit mode. */
1103 sparc_emit_set_const64 (operands[0], operands[1]);
1113 /* Load OP1, a 32-bit constant, into OP0, a register.
1114 We know it can't be done in one insn when we get
1115 here, the move expander guarantees this. */
1118 sparc_emit_set_const32 (rtx op0, rtx op1)
1120 enum machine_mode mode = GET_MODE (op0);
1123 if (reload_in_progress || reload_completed)
1126 temp = gen_reg_rtx (mode);
1128 if (GET_CODE (op1) == CONST_INT)
1130 gcc_assert (!small_int_operand (op1, mode)
1131 && !const_high_operand (op1, mode));
1133 /* Emit them as real moves instead of a HIGH/LO_SUM,
1134 this way CSE can see everything and reuse intermediate
1135 values if it wants. */
1136 emit_insn (gen_rtx_SET (VOIDmode, temp,
1137 GEN_INT (INTVAL (op1)
1138 & ~(HOST_WIDE_INT)0x3ff)));
1140 emit_insn (gen_rtx_SET (VOIDmode,
1142 gen_rtx_IOR (mode, temp,
1143 GEN_INT (INTVAL (op1) & 0x3ff))));
1147 /* A symbol, emit in the traditional way. */
1148 emit_insn (gen_rtx_SET (VOIDmode, temp,
1149 gen_rtx_HIGH (mode, op1)));
1150 emit_insn (gen_rtx_SET (VOIDmode,
1151 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1155 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1156 If TEMP is nonzero, we are forbidden to use any other scratch
1157 registers. Otherwise, we are allowed to generate them as needed.
1159 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1160 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1163 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1165 rtx temp1, temp2, temp3, temp4, temp5;
1168 if (temp && GET_MODE (temp) == TImode)
1171 temp = gen_rtx_REG (DImode, REGNO (temp));
1174 /* SPARC-V9 code-model support. */
1175 switch (sparc_cmodel)
1178 /* The range spanned by all instructions in the object is less
1179 than 2^31 bytes (2GB) and the distance from any instruction
1180 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1181 than 2^31 bytes (2GB).
1183 The executable must be in the low 4TB of the virtual address
1186 sethi %hi(symbol), %temp1
1187 or %temp1, %lo(symbol), %reg */
1189 temp1 = temp; /* op0 is allowed. */
1191 temp1 = gen_reg_rtx (DImode);
1193 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1194 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1198 /* The range spanned by all instructions in the object is less
1199 than 2^31 bytes (2GB) and the distance from any instruction
1200 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1201 than 2^31 bytes (2GB).
1203 The executable must be in the low 16TB of the virtual address
1206 sethi %h44(symbol), %temp1
1207 or %temp1, %m44(symbol), %temp2
1208 sllx %temp2, 12, %temp3
1209 or %temp3, %l44(symbol), %reg */
1214 temp3 = temp; /* op0 is allowed. */
1218 temp1 = gen_reg_rtx (DImode);
1219 temp2 = gen_reg_rtx (DImode);
1220 temp3 = gen_reg_rtx (DImode);
1223 emit_insn (gen_seth44 (temp1, op1));
1224 emit_insn (gen_setm44 (temp2, temp1, op1));
1225 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1226 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1227 emit_insn (gen_setl44 (op0, temp3, op1));
1231 /* The range spanned by all instructions in the object is less
1232 than 2^31 bytes (2GB) and the distance from any instruction
1233 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1234 than 2^31 bytes (2GB).
1236 The executable can be placed anywhere in the virtual address
1239 sethi %hh(symbol), %temp1
1240 sethi %lm(symbol), %temp2
1241 or %temp1, %hm(symbol), %temp3
1242 sllx %temp3, 32, %temp4
1243 or %temp4, %temp2, %temp5
1244 or %temp5, %lo(symbol), %reg */
1247 /* It is possible that one of the registers we got for operands[2]
1248 might coincide with that of operands[0] (which is why we made
1249 it TImode). Pick the other one to use as our scratch. */
1250 if (rtx_equal_p (temp, op0))
1252 gcc_assert (ti_temp);
1253 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1256 temp2 = temp; /* op0 is _not_ allowed, see above. */
1263 temp1 = gen_reg_rtx (DImode);
1264 temp2 = gen_reg_rtx (DImode);
1265 temp3 = gen_reg_rtx (DImode);
1266 temp4 = gen_reg_rtx (DImode);
1267 temp5 = gen_reg_rtx (DImode);
1270 emit_insn (gen_sethh (temp1, op1));
1271 emit_insn (gen_setlm (temp2, op1));
1272 emit_insn (gen_sethm (temp3, temp1, op1));
1273 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1274 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1275 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1276 gen_rtx_PLUS (DImode, temp4, temp2)));
1277 emit_insn (gen_setlo (op0, temp5, op1));
1281 /* Old old old backwards compatibility kruft here.
1282 Essentially it is MEDLOW with a fixed 64-bit
1283 virtual base added to all data segment addresses.
1284 Text-segment stuff is computed like MEDANY, we can't
1285 reuse the code above because the relocation knobs
1288 Data segment: sethi %hi(symbol), %temp1
1289 add %temp1, EMBMEDANY_BASE_REG, %temp2
1290 or %temp2, %lo(symbol), %reg */
1291 if (data_segment_operand (op1, GET_MODE (op1)))
1295 temp1 = temp; /* op0 is allowed. */
1300 temp1 = gen_reg_rtx (DImode);
1301 temp2 = gen_reg_rtx (DImode);
1304 emit_insn (gen_embmedany_sethi (temp1, op1));
1305 emit_insn (gen_embmedany_brsum (temp2, temp1));
1306 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1309 /* Text segment: sethi %uhi(symbol), %temp1
1310 sethi %hi(symbol), %temp2
1311 or %temp1, %ulo(symbol), %temp3
1312 sllx %temp3, 32, %temp4
1313 or %temp4, %temp2, %temp5
1314 or %temp5, %lo(symbol), %reg */
1319 /* It is possible that one of the registers we got for operands[2]
1320 might coincide with that of operands[0] (which is why we made
1321 it TImode). Pick the other one to use as our scratch. */
1322 if (rtx_equal_p (temp, op0))
1324 gcc_assert (ti_temp);
1325 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1328 temp2 = temp; /* op0 is _not_ allowed, see above. */
1335 temp1 = gen_reg_rtx (DImode);
1336 temp2 = gen_reg_rtx (DImode);
1337 temp3 = gen_reg_rtx (DImode);
1338 temp4 = gen_reg_rtx (DImode);
1339 temp5 = gen_reg_rtx (DImode);
1342 emit_insn (gen_embmedany_textuhi (temp1, op1));
1343 emit_insn (gen_embmedany_texthi (temp2, op1));
1344 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1345 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1346 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1347 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1348 gen_rtx_PLUS (DImode, temp4, temp2)));
1349 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1358 #if HOST_BITS_PER_WIDE_INT == 32
1360 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1365 /* These avoid problems when cross compiling. If we do not
1366 go through all this hair then the optimizer will see
1367 invalid REG_EQUAL notes or in some cases none at all. */
1368 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1369 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1370 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1371 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1373 /* The optimizer is not to assume anything about exactly
1374 which bits are set for a HIGH, they are unspecified.
1375 Unfortunately this leads to many missed optimizations
1376 during CSE. We mask out the non-HIGH bits, and matches
1377 a plain movdi, to alleviate this problem. */
1379 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1381 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1385 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1387 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1391 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1393 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1397 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1399 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1402 /* Worker routines for 64-bit constant formation on arch64.
1403 One of the key things to be doing in these emissions is
1404 to create as many temp REGs as possible. This makes it
1405 possible for half-built constants to be used later when
1406 such values are similar to something required later on.
1407 Without doing this, the optimizer cannot see such
1410 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1411 unsigned HOST_WIDE_INT, int);
1414 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1415 unsigned HOST_WIDE_INT low_bits, int is_neg)
1417 unsigned HOST_WIDE_INT high_bits;
1420 high_bits = (~low_bits) & 0xffffffff;
1422 high_bits = low_bits;
1424 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1427 emit_insn (gen_rtx_SET (VOIDmode, op0,
1428 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1432 /* If we are XOR'ing with -1, then we should emit a one's complement
1433 instead. This way the combiner will notice logical operations
1434 such as ANDN later on and substitute. */
1435 if ((low_bits & 0x3ff) == 0x3ff)
1437 emit_insn (gen_rtx_SET (VOIDmode, op0,
1438 gen_rtx_NOT (DImode, temp)));
1442 emit_insn (gen_rtx_SET (VOIDmode, op0,
1443 gen_safe_XOR64 (temp,
1444 (-(HOST_WIDE_INT)0x400
1445 | (low_bits & 0x3ff)))));
1450 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1451 unsigned HOST_WIDE_INT, int);
1454 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1455 unsigned HOST_WIDE_INT high_bits,
1456 unsigned HOST_WIDE_INT low_immediate,
1461 if ((high_bits & 0xfffffc00) != 0)
1463 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1464 if ((high_bits & ~0xfffffc00) != 0)
1465 emit_insn (gen_rtx_SET (VOIDmode, op0,
1466 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1472 emit_insn (gen_safe_SET64 (temp, high_bits));
1476 /* Now shift it up into place. */
1477 emit_insn (gen_rtx_SET (VOIDmode, op0,
1478 gen_rtx_ASHIFT (DImode, temp2,
1479 GEN_INT (shift_count))));
1481 /* If there is a low immediate part piece, finish up by
1482 putting that in as well. */
1483 if (low_immediate != 0)
1484 emit_insn (gen_rtx_SET (VOIDmode, op0,
1485 gen_safe_OR64 (op0, low_immediate)));
1488 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1489 unsigned HOST_WIDE_INT);
1491 /* Full 64-bit constant decomposition. Even though this is the
1492 'worst' case, we still optimize a few things away. */
1494 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1495 unsigned HOST_WIDE_INT high_bits,
1496 unsigned HOST_WIDE_INT low_bits)
1500 if (reload_in_progress || reload_completed)
1503 sub_temp = gen_reg_rtx (DImode);
1505 if ((high_bits & 0xfffffc00) != 0)
1507 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1508 if ((high_bits & ~0xfffffc00) != 0)
1509 emit_insn (gen_rtx_SET (VOIDmode,
1511 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1517 emit_insn (gen_safe_SET64 (temp, high_bits));
1521 if (!reload_in_progress && !reload_completed)
1523 rtx temp2 = gen_reg_rtx (DImode);
1524 rtx temp3 = gen_reg_rtx (DImode);
1525 rtx temp4 = gen_reg_rtx (DImode);
1527 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1528 gen_rtx_ASHIFT (DImode, sub_temp,
1531 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1532 if ((low_bits & ~0xfffffc00) != 0)
1534 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1535 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1536 emit_insn (gen_rtx_SET (VOIDmode, op0,
1537 gen_rtx_PLUS (DImode, temp4, temp3)));
1541 emit_insn (gen_rtx_SET (VOIDmode, op0,
1542 gen_rtx_PLUS (DImode, temp4, temp2)));
1547 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1548 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1549 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1552 /* We are in the middle of reload, so this is really
1553 painful. However we do still make an attempt to
1554 avoid emitting truly stupid code. */
1555 if (low1 != const0_rtx)
1557 emit_insn (gen_rtx_SET (VOIDmode, op0,
1558 gen_rtx_ASHIFT (DImode, sub_temp,
1559 GEN_INT (to_shift))));
1560 emit_insn (gen_rtx_SET (VOIDmode, op0,
1561 gen_rtx_IOR (DImode, op0, low1)));
1569 if (low2 != const0_rtx)
1571 emit_insn (gen_rtx_SET (VOIDmode, op0,
1572 gen_rtx_ASHIFT (DImode, sub_temp,
1573 GEN_INT (to_shift))));
1574 emit_insn (gen_rtx_SET (VOIDmode, op0,
1575 gen_rtx_IOR (DImode, op0, low2)));
1583 emit_insn (gen_rtx_SET (VOIDmode, op0,
1584 gen_rtx_ASHIFT (DImode, sub_temp,
1585 GEN_INT (to_shift))));
1586 if (low3 != const0_rtx)
1587 emit_insn (gen_rtx_SET (VOIDmode, op0,
1588 gen_rtx_IOR (DImode, op0, low3)));
1593 /* Analyze a 64-bit constant for certain properties. */
1594 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1595 unsigned HOST_WIDE_INT,
1596 int *, int *, int *);
1599 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1600 unsigned HOST_WIDE_INT low_bits,
1601 int *hbsp, int *lbsp, int *abbasp)
1603 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1606 lowest_bit_set = highest_bit_set = -1;
1610 if ((lowest_bit_set == -1)
1611 && ((low_bits >> i) & 1))
1613 if ((highest_bit_set == -1)
1614 && ((high_bits >> (32 - i - 1)) & 1))
1615 highest_bit_set = (64 - i - 1);
1618 && ((highest_bit_set == -1)
1619 || (lowest_bit_set == -1)));
1625 if ((lowest_bit_set == -1)
1626 && ((high_bits >> i) & 1))
1627 lowest_bit_set = i + 32;
1628 if ((highest_bit_set == -1)
1629 && ((low_bits >> (32 - i - 1)) & 1))
1630 highest_bit_set = 32 - i - 1;
1633 && ((highest_bit_set == -1)
1634 || (lowest_bit_set == -1)));
1636 /* If there are no bits set this should have gone out
1637 as one instruction! */
1638 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1639 all_bits_between_are_set = 1;
1640 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1644 if ((low_bits & (1 << i)) != 0)
1649 if ((high_bits & (1 << (i - 32))) != 0)
1652 all_bits_between_are_set = 0;
1655 *hbsp = highest_bit_set;
1656 *lbsp = lowest_bit_set;
1657 *abbasp = all_bits_between_are_set;
1660 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1663 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1664 unsigned HOST_WIDE_INT low_bits)
1666 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1669 || high_bits == 0xffffffff)
1672 analyze_64bit_constant (high_bits, low_bits,
1673 &highest_bit_set, &lowest_bit_set,
1674 &all_bits_between_are_set);
1676 if ((highest_bit_set == 63
1677 || lowest_bit_set == 0)
1678 && all_bits_between_are_set != 0)
1681 if ((highest_bit_set - lowest_bit_set) < 21)
1687 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1688 unsigned HOST_WIDE_INT,
1691 static unsigned HOST_WIDE_INT
1692 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1693 unsigned HOST_WIDE_INT low_bits,
1694 int lowest_bit_set, int shift)
1696 HOST_WIDE_INT hi, lo;
1698 if (lowest_bit_set < 32)
1700 lo = (low_bits >> lowest_bit_set) << shift;
1701 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1706 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1708 gcc_assert (! (hi & lo));
1712 /* Here we are sure to be arch64 and this is an integer constant
1713 being loaded into a register. Emit the most efficient
1714 insn sequence possible. Detection of all the 1-insn cases
1715 has been done already. */
1717 sparc_emit_set_const64 (rtx op0, rtx op1)
1719 unsigned HOST_WIDE_INT high_bits, low_bits;
1720 int lowest_bit_set, highest_bit_set;
1721 int all_bits_between_are_set;
1724 /* Sanity check that we know what we are working with. */
1725 gcc_assert (TARGET_ARCH64
1726 && (GET_CODE (op0) == SUBREG
1727 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1729 if (reload_in_progress || reload_completed)
1732 if (GET_CODE (op1) != CONST_INT)
1734 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1739 temp = gen_reg_rtx (DImode);
1741 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1742 low_bits = (INTVAL (op1) & 0xffffffff);
1744 /* low_bits bits 0 --> 31
1745 high_bits bits 32 --> 63 */
1747 analyze_64bit_constant (high_bits, low_bits,
1748 &highest_bit_set, &lowest_bit_set,
1749 &all_bits_between_are_set);
1751 /* First try for a 2-insn sequence. */
1753 /* These situations are preferred because the optimizer can
1754 * do more things with them:
1756 * sllx %reg, shift, %reg
1758 * srlx %reg, shift, %reg
1759 * 3) mov some_small_const, %reg
1760 * sllx %reg, shift, %reg
1762 if (((highest_bit_set == 63
1763 || lowest_bit_set == 0)
1764 && all_bits_between_are_set != 0)
1765 || ((highest_bit_set - lowest_bit_set) < 12))
1767 HOST_WIDE_INT the_const = -1;
1768 int shift = lowest_bit_set;
1770 if ((highest_bit_set != 63
1771 && lowest_bit_set != 0)
1772 || all_bits_between_are_set == 0)
1775 create_simple_focus_bits (high_bits, low_bits,
1778 else if (lowest_bit_set == 0)
1779 shift = -(63 - highest_bit_set);
1781 gcc_assert (SPARC_SIMM13_P (the_const));
1782 gcc_assert (shift != 0);
1784 emit_insn (gen_safe_SET64 (temp, the_const));
1786 emit_insn (gen_rtx_SET (VOIDmode,
1788 gen_rtx_ASHIFT (DImode,
1792 emit_insn (gen_rtx_SET (VOIDmode,
1794 gen_rtx_LSHIFTRT (DImode,
1796 GEN_INT (-shift))));
1800 /* Now a range of 22 or less bits set somewhere.
1801 * 1) sethi %hi(focus_bits), %reg
1802 * sllx %reg, shift, %reg
1803 * 2) sethi %hi(focus_bits), %reg
1804 * srlx %reg, shift, %reg
1806 if ((highest_bit_set - lowest_bit_set) < 21)
1808 unsigned HOST_WIDE_INT focus_bits =
1809 create_simple_focus_bits (high_bits, low_bits,
1810 lowest_bit_set, 10);
1812 gcc_assert (SPARC_SETHI_P (focus_bits));
1813 gcc_assert (lowest_bit_set != 10);
1815 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1817 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1818 if (lowest_bit_set < 10)
1819 emit_insn (gen_rtx_SET (VOIDmode,
1821 gen_rtx_LSHIFTRT (DImode, temp,
1822 GEN_INT (10 - lowest_bit_set))));
1823 else if (lowest_bit_set > 10)
1824 emit_insn (gen_rtx_SET (VOIDmode,
1826 gen_rtx_ASHIFT (DImode, temp,
1827 GEN_INT (lowest_bit_set - 10))));
1831 /* 1) sethi %hi(low_bits), %reg
1832 * or %reg, %lo(low_bits), %reg
1833 * 2) sethi %hi(~low_bits), %reg
1834 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1837 || high_bits == 0xffffffff)
1839 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1840 (high_bits == 0xffffffff));
1844 /* Now, try 3-insn sequences. */
1846 /* 1) sethi %hi(high_bits), %reg
1847 * or %reg, %lo(high_bits), %reg
1848 * sllx %reg, 32, %reg
1852 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1856 /* We may be able to do something quick
1857 when the constant is negated, so try that. */
1858 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1859 (~low_bits) & 0xfffffc00))
1861 /* NOTE: The trailing bits get XOR'd so we need the
1862 non-negated bits, not the negated ones. */
1863 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1865 if ((((~high_bits) & 0xffffffff) == 0
1866 && ((~low_bits) & 0x80000000) == 0)
1867 || (((~high_bits) & 0xffffffff) == 0xffffffff
1868 && ((~low_bits) & 0x80000000) != 0))
1870 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1872 if ((SPARC_SETHI_P (fast_int)
1873 && (~high_bits & 0xffffffff) == 0)
1874 || SPARC_SIMM13_P (fast_int))
1875 emit_insn (gen_safe_SET64 (temp, fast_int));
1877 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1882 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1883 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1884 sparc_emit_set_const64 (temp, negated_const);
1887 /* If we are XOR'ing with -1, then we should emit a one's complement
1888 instead. This way the combiner will notice logical operations
1889 such as ANDN later on and substitute. */
1890 if (trailing_bits == 0x3ff)
1892 emit_insn (gen_rtx_SET (VOIDmode, op0,
1893 gen_rtx_NOT (DImode, temp)));
1897 emit_insn (gen_rtx_SET (VOIDmode,
1899 gen_safe_XOR64 (temp,
1900 (-0x400 | trailing_bits))));
1905 /* 1) sethi %hi(xxx), %reg
1906 * or %reg, %lo(xxx), %reg
1907 * sllx %reg, yyy, %reg
1909 * ??? This is just a generalized version of the low_bits==0
1910 * thing above, FIXME...
1912 if ((highest_bit_set - lowest_bit_set) < 32)
1914 unsigned HOST_WIDE_INT focus_bits =
1915 create_simple_focus_bits (high_bits, low_bits,
1918 /* We can't get here in this state. */
1919 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1921 /* So what we know is that the set bits straddle the
1922 middle of the 64-bit word. */
1923 sparc_emit_set_const64_quick2 (op0, temp,
1929 /* 1) sethi %hi(high_bits), %reg
1930 * or %reg, %lo(high_bits), %reg
1931 * sllx %reg, 32, %reg
1932 * or %reg, low_bits, %reg
1934 if (SPARC_SIMM13_P(low_bits)
1935 && ((int)low_bits > 0))
1937 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1941 /* The easiest way when all else fails, is full decomposition. */
1943 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1944 high_bits, low_bits, ~high_bits, ~low_bits);
1946 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1948 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1950 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1951 return the mode to be used for the comparison. For floating-point,
1952 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1953 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1954 processing is needed. */
1957 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1959 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1985 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1986 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1988 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1989 return CCX_NOOVmode;
1995 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2002 /* Emit the compare insn and return the CC reg for a CODE comparison
2003 with operands X and Y. */
2006 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2008 enum machine_mode mode;
2011 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2014 mode = SELECT_CC_MODE (code, x, y);
2016 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2017 fcc regs (cse can't tell they're really call clobbered regs and will
2018 remove a duplicate comparison even if there is an intervening function
2019 call - it will then try to reload the cc reg via an int reg which is why
2020 we need the movcc patterns). It is possible to provide the movcc
2021 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2022 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2023 to tell cse that CCFPE mode registers (even pseudos) are call
2026 /* ??? This is an experiment. Rather than making changes to cse which may
2027 or may not be easy/clean, we do our own cse. This is possible because
2028 we will generate hard registers. Cse knows they're call clobbered (it
2029 doesn't know the same thing about pseudos). If we guess wrong, no big
2030 deal, but if we win, great! */
2032 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2033 #if 1 /* experiment */
2036 /* We cycle through the registers to ensure they're all exercised. */
2037 static int next_fcc_reg = 0;
2038 /* Previous x,y for each fcc reg. */
2039 static rtx prev_args[4][2];
2041 /* Scan prev_args for x,y. */
2042 for (reg = 0; reg < 4; reg++)
2043 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2048 prev_args[reg][0] = x;
2049 prev_args[reg][1] = y;
2050 next_fcc_reg = (next_fcc_reg + 1) & 3;
2052 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2055 cc_reg = gen_reg_rtx (mode);
2056 #endif /* ! experiment */
2057 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2058 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2060 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2062 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2063 will only result in an unrecognizable insn so no point in asserting. */
2064 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2070 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2073 gen_compare_reg (rtx cmp)
2075 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2078 /* This function is used for v9 only.
2079 DEST is the target of the Scc insn.
2080 CODE is the code for an Scc's comparison.
2081 X and Y are the values we compare.
2083 This function is needed to turn
2086 (gt (reg:CCX 100 %icc)
2090 (gt:DI (reg:CCX 100 %icc)
2093 IE: The instruction recognizer needs to see the mode of the comparison to
2094 find the right instruction. We could use "gt:DI" right in the
2095 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2098 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2101 && (GET_MODE (x) == DImode
2102 || GET_MODE (dest) == DImode))
2105 /* Try to use the movrCC insns. */
2107 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2109 && v9_regcmp_p (compare_code))
2114 /* Special case for op0 != 0. This can be done with one instruction if
2117 if (compare_code == NE
2118 && GET_MODE (dest) == DImode
2119 && rtx_equal_p (op0, dest))
2121 emit_insn (gen_rtx_SET (VOIDmode, dest,
2122 gen_rtx_IF_THEN_ELSE (DImode,
2123 gen_rtx_fmt_ee (compare_code, DImode,
2130 if (reg_overlap_mentioned_p (dest, op0))
2132 /* Handle the case where dest == x.
2133 We "early clobber" the result. */
2134 op0 = gen_reg_rtx (GET_MODE (x));
2135 emit_move_insn (op0, x);
2138 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2139 if (GET_MODE (op0) != DImode)
2141 temp = gen_reg_rtx (DImode);
2142 convert_move (temp, op0, 0);
2146 emit_insn (gen_rtx_SET (VOIDmode, dest,
2147 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2148 gen_rtx_fmt_ee (compare_code, DImode,
2156 x = gen_compare_reg_1 (compare_code, x, y);
2159 gcc_assert (GET_MODE (x) != CC_NOOVmode
2160 && GET_MODE (x) != CCX_NOOVmode);
2162 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2163 emit_insn (gen_rtx_SET (VOIDmode, dest,
2164 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2165 gen_rtx_fmt_ee (compare_code,
2166 GET_MODE (x), x, y),
2167 const1_rtx, dest)));
2173 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2174 without jumps using the addx/subx instructions. */
2177 emit_scc_insn (rtx operands[])
2184 /* The quad-word fp compare library routines all return nonzero to indicate
2185 true, which is different from the equivalent libgcc routines, so we must
2186 handle them specially here. */
2187 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2189 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2190 GET_CODE (operands[1]));
2191 operands[2] = XEXP (operands[1], 0);
2192 operands[3] = XEXP (operands[1], 1);
2195 code = GET_CODE (operands[1]);
2199 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2200 more applications). The exception to this is "reg != 0" which can
2201 be done in one instruction on v9 (so we do it). */
2204 if (GET_MODE (x) == SImode)
2206 rtx pat = gen_seqsi_special (operands[0], x, y);
2210 else if (GET_MODE (x) == DImode)
2212 rtx pat = gen_seqdi_special (operands[0], x, y);
2220 if (GET_MODE (x) == SImode)
2222 rtx pat = gen_snesi_special (operands[0], x, y);
2226 else if (GET_MODE (x) == DImode)
2228 rtx pat = gen_snedi_special (operands[0], x, y);
2234 /* For the rest, on v9 we can use conditional moves. */
2238 if (gen_v9_scc (operands[0], code, x, y))
2242 /* We can do LTU and GEU using the addx/subx instructions too. And
2243 for GTU/LEU, if both operands are registers swap them and fall
2244 back to the easy case. */
2245 if (code == GTU || code == LEU)
2247 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2248 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2253 code = swap_condition (code);
2257 if (code == LTU || code == GEU)
2259 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2260 gen_rtx_fmt_ee (code, SImode,
2261 gen_compare_reg_1 (code, x, y),
2266 /* Nope, do branches. */
2270 /* Emit a conditional jump insn for the v9 architecture using comparison code
2271 CODE and jump target LABEL.
2272 This function exists to take advantage of the v9 brxx insns. */
2275 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2277 emit_jump_insn (gen_rtx_SET (VOIDmode,
2279 gen_rtx_IF_THEN_ELSE (VOIDmode,
2280 gen_rtx_fmt_ee (code, GET_MODE (op0),
2282 gen_rtx_LABEL_REF (VOIDmode, label),
2287 emit_conditional_branch_insn (rtx operands[])
2289 /* The quad-word fp compare library routines all return nonzero to indicate
2290 true, which is different from the equivalent libgcc routines, so we must
2291 handle them specially here. */
2292 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2294 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2295 GET_CODE (operands[0]));
2296 operands[1] = XEXP (operands[0], 0);
2297 operands[2] = XEXP (operands[0], 1);
2300 if (TARGET_ARCH64 && operands[2] == const0_rtx
2301 && GET_CODE (operands[1]) == REG
2302 && GET_MODE (operands[1]) == DImode)
2304 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2308 operands[1] = gen_compare_reg (operands[0]);
2309 operands[2] = const0_rtx;
2310 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2311 operands[1], operands[2]);
2312 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2317 /* Generate a DFmode part of a hard TFmode register.
2318 REG is the TFmode hard register, LOW is 1 for the
2319 low 64bit of the register and 0 otherwise.
2322 gen_df_reg (rtx reg, int low)
2324 int regno = REGNO (reg);
2326 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2327 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2328 return gen_rtx_REG (DFmode, regno);
2331 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2332 Unlike normal calls, TFmode operands are passed by reference. It is
2333 assumed that no more than 3 operands are required. */
2336 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2338 rtx ret_slot = NULL, arg[3], func_sym;
2341 /* We only expect to be called for conversions, unary, and binary ops. */
2342 gcc_assert (nargs == 2 || nargs == 3);
2344 for (i = 0; i < nargs; ++i)
2346 rtx this_arg = operands[i];
2349 /* TFmode arguments and return values are passed by reference. */
2350 if (GET_MODE (this_arg) == TFmode)
2352 int force_stack_temp;
2354 force_stack_temp = 0;
2355 if (TARGET_BUGGY_QP_LIB && i == 0)
2356 force_stack_temp = 1;
2358 if (GET_CODE (this_arg) == MEM
2359 && ! force_stack_temp)
2360 this_arg = XEXP (this_arg, 0);
2361 else if (CONSTANT_P (this_arg)
2362 && ! force_stack_temp)
2364 this_slot = force_const_mem (TFmode, this_arg);
2365 this_arg = XEXP (this_slot, 0);
2369 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2371 /* Operand 0 is the return value. We'll copy it out later. */
2373 emit_move_insn (this_slot, this_arg);
2375 ret_slot = this_slot;
2377 this_arg = XEXP (this_slot, 0);
2384 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2386 if (GET_MODE (operands[0]) == TFmode)
2389 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2390 arg[0], GET_MODE (arg[0]),
2391 arg[1], GET_MODE (arg[1]));
2393 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2394 arg[0], GET_MODE (arg[0]),
2395 arg[1], GET_MODE (arg[1]),
2396 arg[2], GET_MODE (arg[2]));
2399 emit_move_insn (operands[0], ret_slot);
2405 gcc_assert (nargs == 2);
2407 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2408 GET_MODE (operands[0]), 1,
2409 arg[1], GET_MODE (arg[1]));
2411 if (ret != operands[0])
2412 emit_move_insn (operands[0], ret);
2416 /* Expand soft-float TFmode calls to sparc abi routines. */
2419 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2441 emit_soft_tfmode_libcall (func, 3, operands);
2445 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2449 gcc_assert (code == SQRT);
2452 emit_soft_tfmode_libcall (func, 2, operands);
2456 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2463 switch (GET_MODE (operands[1]))
2476 case FLOAT_TRUNCATE:
2477 switch (GET_MODE (operands[0]))
2491 switch (GET_MODE (operands[1]))
2496 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2506 case UNSIGNED_FLOAT:
2507 switch (GET_MODE (operands[1]))
2512 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2523 switch (GET_MODE (operands[0]))
2537 switch (GET_MODE (operands[0]))
2554 emit_soft_tfmode_libcall (func, 2, operands);
2557 /* Expand a hard-float tfmode operation. All arguments must be in
2561 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2565 if (GET_RTX_CLASS (code) == RTX_UNARY)
2567 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2568 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2572 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2573 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2574 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2575 operands[1], operands[2]);
2578 if (register_operand (operands[0], VOIDmode))
2581 dest = gen_reg_rtx (GET_MODE (operands[0]));
2583 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2585 if (dest != operands[0])
2586 emit_move_insn (operands[0], dest);
2590 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2592 if (TARGET_HARD_QUAD)
2593 emit_hard_tfmode_operation (code, operands);
2595 emit_soft_tfmode_binop (code, operands);
2599 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2601 if (TARGET_HARD_QUAD)
2602 emit_hard_tfmode_operation (code, operands);
2604 emit_soft_tfmode_unop (code, operands);
2608 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2610 if (TARGET_HARD_QUAD)
2611 emit_hard_tfmode_operation (code, operands);
2613 emit_soft_tfmode_cvt (code, operands);
2616 /* Return nonzero if a branch/jump/call instruction will be emitting
2617 nop into its delay slot. */
2620 empty_delay_slot (rtx insn)
2624 /* If no previous instruction (should not happen), return true. */
2625 if (PREV_INSN (insn) == NULL)
2628 seq = NEXT_INSN (PREV_INSN (insn));
2629 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2635 /* Return nonzero if TRIAL can go into the call delay slot. */
2638 tls_call_delay (rtx trial)
2643 call __tls_get_addr, %tgd_call (foo)
2644 add %l7, %o0, %o0, %tgd_add (foo)
2645 while Sun as/ld does not. */
2646 if (TARGET_GNU_TLS || !TARGET_TLS)
2649 pat = PATTERN (trial);
2651 /* We must reject tgd_add{32|64}, i.e.
2652 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2653 and tldm_add{32|64}, i.e.
2654 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2656 if (GET_CODE (pat) == SET
2657 && GET_CODE (SET_SRC (pat)) == PLUS)
2659 rtx unspec = XEXP (SET_SRC (pat), 1);
2661 if (GET_CODE (unspec) == UNSPEC
2662 && (XINT (unspec, 1) == UNSPEC_TLSGD
2663 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2670 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2671 instruction. RETURN_P is true if the v9 variant 'return' is to be
2672 considered in the test too.
2674 TRIAL must be a SET whose destination is a REG appropriate for the
2675 'restore' instruction or, if RETURN_P is true, for the 'return'
2679 eligible_for_restore_insn (rtx trial, bool return_p)
2681 rtx pat = PATTERN (trial);
2682 rtx src = SET_SRC (pat);
2684 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2685 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2686 && arith_operand (src, GET_MODE (src)))
2689 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2691 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2694 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2695 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2696 && arith_double_operand (src, GET_MODE (src)))
2697 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2699 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2700 else if (! TARGET_FPU && register_operand (src, SFmode))
2703 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2704 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2707 /* If we have the 'return' instruction, anything that does not use
2708 local or output registers and can go into a delay slot wins. */
2709 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2710 && (get_attr_in_uncond_branch_delay (trial)
2711 == IN_UNCOND_BRANCH_DELAY_TRUE))
2714 /* The 'restore src1,src2,dest' pattern for SImode. */
2715 else if (GET_CODE (src) == PLUS
2716 && register_operand (XEXP (src, 0), SImode)
2717 && arith_operand (XEXP (src, 1), SImode))
2720 /* The 'restore src1,src2,dest' pattern for DImode. */
2721 else if (GET_CODE (src) == PLUS
2722 && register_operand (XEXP (src, 0), DImode)
2723 && arith_double_operand (XEXP (src, 1), DImode))
2726 /* The 'restore src1,%lo(src2),dest' pattern. */
2727 else if (GET_CODE (src) == LO_SUM
2728 && ! TARGET_CM_MEDMID
2729 && ((register_operand (XEXP (src, 0), SImode)
2730 && immediate_operand (XEXP (src, 1), SImode))
2732 && register_operand (XEXP (src, 0), DImode)
2733 && immediate_operand (XEXP (src, 1), DImode))))
2736 /* The 'restore src,src,dest' pattern. */
2737 else if (GET_CODE (src) == ASHIFT
2738 && (register_operand (XEXP (src, 0), SImode)
2739 || register_operand (XEXP (src, 0), DImode))
2740 && XEXP (src, 1) == const1_rtx)
2746 /* Return nonzero if TRIAL can go into the function return's
2750 eligible_for_return_delay (rtx trial)
2754 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2757 if (get_attr_length (trial) != 1)
2760 /* If there are any call-saved registers, we should scan TRIAL if it
2761 does not reference them. For now just make it easy. */
2765 /* If the function uses __builtin_eh_return, the eh_return machinery
2766 occupies the delay slot. */
2767 if (crtl->calls_eh_return)
2770 /* In the case of a true leaf function, anything can go into the slot. */
2771 if (sparc_leaf_function_p)
2772 return get_attr_in_uncond_branch_delay (trial)
2773 == IN_UNCOND_BRANCH_DELAY_TRUE;
2775 pat = PATTERN (trial);
2777 /* Otherwise, only operations which can be done in tandem with
2778 a `restore' or `return' insn can go into the delay slot. */
2779 if (GET_CODE (SET_DEST (pat)) != REG
2780 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2783 /* If this instruction sets up floating point register and we have a return
2784 instruction, it can probably go in. But restore will not work
2786 if (REGNO (SET_DEST (pat)) >= 32)
2788 && ! epilogue_renumber (&pat, 1)
2789 && (get_attr_in_uncond_branch_delay (trial)
2790 == IN_UNCOND_BRANCH_DELAY_TRUE));
2792 return eligible_for_restore_insn (trial, true);
2795 /* Return nonzero if TRIAL can go into the sibling call's
2799 eligible_for_sibcall_delay (rtx trial)
2803 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2806 if (get_attr_length (trial) != 1)
2809 pat = PATTERN (trial);
2811 if (sparc_leaf_function_p)
2813 /* If the tail call is done using the call instruction,
2814 we have to restore %o7 in the delay slot. */
2815 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2818 /* %g1 is used to build the function address */
2819 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2825 /* Otherwise, only operations which can be done in tandem with
2826 a `restore' insn can go into the delay slot. */
2827 if (GET_CODE (SET_DEST (pat)) != REG
2828 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2829 || REGNO (SET_DEST (pat)) >= 32)
2832 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2834 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2837 return eligible_for_restore_insn (trial, false);
2841 short_branch (int uid1, int uid2)
2843 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2845 /* Leave a few words of "slop". */
2846 if (delta >= -1023 && delta <= 1022)
2852 /* Return nonzero if REG is not used after INSN.
2853 We assume REG is a reload reg, and therefore does
2854 not live past labels or calls or jumps. */
2856 reg_unused_after (rtx reg, rtx insn)
2858 enum rtx_code code, prev_code = UNKNOWN;
2860 while ((insn = NEXT_INSN (insn)))
2862 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2865 code = GET_CODE (insn);
2866 if (GET_CODE (insn) == CODE_LABEL)
2871 rtx set = single_set (insn);
2872 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2875 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2877 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2885 /* Determine if it's legal to put X into the constant pool. This
2886 is not possible if X contains the address of a symbol that is
2887 not constant (TLS) or not known at final link time (PIC). */
2890 sparc_cannot_force_const_mem (rtx x)
2892 switch (GET_CODE (x))
2897 /* Accept all non-symbolic constants. */
2901 /* Labels are OK iff we are non-PIC. */
2902 return flag_pic != 0;
2905 /* 'Naked' TLS symbol references are never OK,
2906 non-TLS symbols are OK iff we are non-PIC. */
2907 if (SYMBOL_REF_TLS_MODEL (x))
2910 return flag_pic != 0;
2913 return sparc_cannot_force_const_mem (XEXP (x, 0));
2916 return sparc_cannot_force_const_mem (XEXP (x, 0))
2917 || sparc_cannot_force_const_mem (XEXP (x, 1));
2926 static GTY(()) char pic_helper_symbol_name[256];
2927 static GTY(()) rtx pic_helper_symbol;
2928 static GTY(()) bool pic_helper_emitted_p = false;
2929 static GTY(()) rtx global_offset_table;
2931 /* Ensure that we are not using patterns that are not OK with PIC. */
2939 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2940 && (GET_CODE (recog_data.operand[i]) != CONST
2941 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2942 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2943 == global_offset_table)
2944 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2952 /* Return true if X is an address which needs a temporary register when
2953 reloaded while generating PIC code. */
2956 pic_address_needs_scratch (rtx x)
2958 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2959 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2960 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2961 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2962 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2968 /* Determine if a given RTX is a valid constant. We already know this
2969 satisfies CONSTANT_P. */
2972 legitimate_constant_p (rtx x)
2976 switch (GET_CODE (x))
2979 /* TLS symbols are not constant. */
2980 if (SYMBOL_REF_TLS_MODEL (x))
2985 inner = XEXP (x, 0);
2987 /* Offsets of TLS symbols are never valid.
2988 Discourage CSE from creating them. */
2989 if (GET_CODE (inner) == PLUS
2990 && SPARC_SYMBOL_REF_TLS_P (XEXP (inner, 0)))
2995 if (GET_MODE (x) == VOIDmode)
2998 /* Floating point constants are generally not ok.
2999 The only exception is 0.0 in VIS. */
3001 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
3002 && const_zero_operand (x, GET_MODE (x)))
3008 /* Vector constants are generally not ok.
3009 The only exception is 0 in VIS. */
3011 && const_zero_operand (x, GET_MODE (x)))
3023 /* Determine if a given RTX is a valid constant address. */
3026 constant_address_p (rtx x)
3028 switch (GET_CODE (x))
3036 if (flag_pic && pic_address_needs_scratch (x))
3038 return legitimate_constant_p (x);
3041 return !flag_pic && legitimate_constant_p (x);
3048 /* Nonzero if the constant value X is a legitimate general operand
3049 when generating PIC code. It is given that flag_pic is on and
3050 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3053 legitimate_pic_operand_p (rtx x)
3055 if (pic_address_needs_scratch (x))
3057 if (SPARC_SYMBOL_REF_TLS_P (x)
3058 || (GET_CODE (x) == CONST
3059 && GET_CODE (XEXP (x, 0)) == PLUS
3060 && SPARC_SYMBOL_REF_TLS_P (XEXP (XEXP (x, 0), 0))))
3065 /* Return nonzero if ADDR is a valid memory address.
3066 STRICT specifies whether strict register checking applies. */
3069 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3071 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3073 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3075 else if (GET_CODE (addr) == PLUS)
3077 rs1 = XEXP (addr, 0);
3078 rs2 = XEXP (addr, 1);
3080 /* Canonicalize. REG comes first, if there are no regs,
3081 LO_SUM comes first. */
3083 && GET_CODE (rs1) != SUBREG
3085 || GET_CODE (rs2) == SUBREG
3086 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3088 rs1 = XEXP (addr, 1);
3089 rs2 = XEXP (addr, 0);
3093 && rs1 == pic_offset_table_rtx
3095 && GET_CODE (rs2) != SUBREG
3096 && GET_CODE (rs2) != LO_SUM
3097 && GET_CODE (rs2) != MEM
3098 && ! SPARC_SYMBOL_REF_TLS_P (rs2)
3099 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3100 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3102 || GET_CODE (rs1) == SUBREG)
3103 && RTX_OK_FOR_OFFSET_P (rs2)))
3108 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3109 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3111 /* We prohibit REG + REG for TFmode when there are no quad move insns
3112 and we consequently need to split. We do this because REG+REG
3113 is not an offsettable address. If we get the situation in reload
3114 where source and destination of a movtf pattern are both MEMs with
3115 REG+REG address, then only one of them gets converted to an
3116 offsettable address. */
3118 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3121 /* We prohibit REG + REG on ARCH32 if not optimizing for
3122 DFmode/DImode because then mem_min_alignment is likely to be zero
3123 after reload and the forced split would lack a matching splitter
3125 if (TARGET_ARCH32 && !optimize
3126 && (mode == DFmode || mode == DImode))
3129 else if (USE_AS_OFFSETABLE_LO10
3130 && GET_CODE (rs1) == LO_SUM
3132 && ! TARGET_CM_MEDMID
3133 && RTX_OK_FOR_OLO10_P (rs2))
3136 imm1 = XEXP (rs1, 1);
3137 rs1 = XEXP (rs1, 0);
3138 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3142 else if (GET_CODE (addr) == LO_SUM)
3144 rs1 = XEXP (addr, 0);
3145 imm1 = XEXP (addr, 1);
3147 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3150 /* We can't allow TFmode in 32-bit mode, because an offset greater
3151 than the alignment (8) may cause the LO_SUM to overflow. */
3152 if (mode == TFmode && TARGET_ARCH32)
3155 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3160 if (GET_CODE (rs1) == SUBREG)
3161 rs1 = SUBREG_REG (rs1);
3167 if (GET_CODE (rs2) == SUBREG)
3168 rs2 = SUBREG_REG (rs2);
3175 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3176 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3181 if ((REGNO (rs1) >= 32
3182 && REGNO (rs1) != FRAME_POINTER_REGNUM
3183 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3185 && (REGNO (rs2) >= 32
3186 && REGNO (rs2) != FRAME_POINTER_REGNUM
3187 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3193 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3195 static GTY(()) rtx sparc_tls_symbol;
3198 sparc_tls_get_addr (void)
3200 if (!sparc_tls_symbol)
3201 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3203 return sparc_tls_symbol;
3207 sparc_tls_got (void)
3212 crtl->uses_pic_offset_table = 1;
3213 return pic_offset_table_rtx;
3216 if (!global_offset_table)
3217 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3218 temp = gen_reg_rtx (Pmode);
3219 emit_move_insn (temp, global_offset_table);
3223 /* Return 1 if *X is a thread-local symbol. */
3226 sparc_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
3228 return SPARC_SYMBOL_REF_TLS_P (*x);
3231 /* Return 1 if X contains a thread-local symbol. */
3234 sparc_tls_referenced_p (rtx x)
3236 if (!TARGET_HAVE_TLS)
3239 return for_each_rtx (&x, &sparc_tls_symbol_ref_1, 0);
3242 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3243 this (thread-local) address. */
3246 legitimize_tls_address (rtx addr)
3248 rtx temp1, temp2, temp3, ret, o0, got, insn;
3250 gcc_assert (can_create_pseudo_p ());
3252 if (GET_CODE (addr) == SYMBOL_REF)
3253 switch (SYMBOL_REF_TLS_MODEL (addr))
3255 case TLS_MODEL_GLOBAL_DYNAMIC:
3257 temp1 = gen_reg_rtx (SImode);
3258 temp2 = gen_reg_rtx (SImode);
3259 ret = gen_reg_rtx (Pmode);
3260 o0 = gen_rtx_REG (Pmode, 8);
3261 got = sparc_tls_got ();
3262 emit_insn (gen_tgd_hi22 (temp1, addr));
3263 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3266 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3267 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3272 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3273 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3276 CALL_INSN_FUNCTION_USAGE (insn)
3277 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3278 CALL_INSN_FUNCTION_USAGE (insn));
3279 insn = get_insns ();
3281 emit_libcall_block (insn, ret, o0, addr);
3284 case TLS_MODEL_LOCAL_DYNAMIC:
3286 temp1 = gen_reg_rtx (SImode);
3287 temp2 = gen_reg_rtx (SImode);
3288 temp3 = gen_reg_rtx (Pmode);
3289 ret = gen_reg_rtx (Pmode);
3290 o0 = gen_rtx_REG (Pmode, 8);
3291 got = sparc_tls_got ();
3292 emit_insn (gen_tldm_hi22 (temp1));
3293 emit_insn (gen_tldm_lo10 (temp2, temp1));
3296 emit_insn (gen_tldm_add32 (o0, got, temp2));
3297 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3302 emit_insn (gen_tldm_add64 (o0, got, temp2));
3303 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3306 CALL_INSN_FUNCTION_USAGE (insn)
3307 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3308 CALL_INSN_FUNCTION_USAGE (insn));
3309 insn = get_insns ();
3311 emit_libcall_block (insn, temp3, o0,
3312 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3313 UNSPEC_TLSLD_BASE));
3314 temp1 = gen_reg_rtx (SImode);
3315 temp2 = gen_reg_rtx (SImode);
3316 emit_insn (gen_tldo_hix22 (temp1, addr));
3317 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3319 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3321 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3324 case TLS_MODEL_INITIAL_EXEC:
3325 temp1 = gen_reg_rtx (SImode);
3326 temp2 = gen_reg_rtx (SImode);
3327 temp3 = gen_reg_rtx (Pmode);
3328 got = sparc_tls_got ();
3329 emit_insn (gen_tie_hi22 (temp1, addr));
3330 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3332 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3334 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3337 ret = gen_reg_rtx (Pmode);
3339 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3342 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3346 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3349 case TLS_MODEL_LOCAL_EXEC:
3350 temp1 = gen_reg_rtx (Pmode);
3351 temp2 = gen_reg_rtx (Pmode);
3354 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3355 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3359 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3360 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3362 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3370 gcc_unreachable (); /* for now ... */
3376 /* Legitimize PIC addresses. If the address is already position-independent,
3377 we return ORIG. Newly generated position-independent addresses go into a
3378 reg. This is REG if nonzero, otherwise we allocate register(s) as
3382 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3385 if (GET_CODE (orig) == SYMBOL_REF
3386 /* See the comment in sparc_expand_move. */
3387 || (TARGET_VXWORKS_RTP && GET_CODE (orig) == LABEL_REF))
3389 rtx pic_ref, address;
3394 gcc_assert (! reload_in_progress && ! reload_completed);
3395 reg = gen_reg_rtx (Pmode);
3400 /* If not during reload, allocate another temp reg here for loading
3401 in the address, so that these instructions can be optimized
3403 rtx temp_reg = ((reload_in_progress || reload_completed)
3404 ? reg : gen_reg_rtx (Pmode));
3406 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3407 won't get confused into thinking that these two instructions
3408 are loading in the true address of the symbol. If in the
3409 future a PIC rtx exists, that should be used instead. */
3412 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3413 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3417 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3418 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3425 pic_ref = gen_const_mem (Pmode,
3426 gen_rtx_PLUS (Pmode,
3427 pic_offset_table_rtx, address));
3428 crtl->uses_pic_offset_table = 1;
3429 insn = emit_move_insn (reg, pic_ref);
3430 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3432 set_unique_reg_note (insn, REG_EQUAL, orig);
3435 else if (GET_CODE (orig) == CONST)
3439 if (GET_CODE (XEXP (orig, 0)) == PLUS
3440 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3445 gcc_assert (! reload_in_progress && ! reload_completed);
3446 reg = gen_reg_rtx (Pmode);
3449 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3450 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3451 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3452 base == reg ? 0 : reg);
3454 if (GET_CODE (offset) == CONST_INT)
3456 if (SMALL_INT (offset))
3457 return plus_constant (base, INTVAL (offset));
3458 else if (! reload_in_progress && ! reload_completed)
3459 offset = force_reg (Pmode, offset);
3461 /* If we reach here, then something is seriously wrong. */
3464 return gen_rtx_PLUS (Pmode, base, offset);
3466 else if (GET_CODE (orig) == LABEL_REF)
3467 /* ??? Why do we do this? */
3468 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3469 the register is live instead, in case it is eliminated. */
3470 crtl->uses_pic_offset_table = 1;
3475 /* Try machine-dependent ways of modifying an illegitimate address X
3476 to be legitimate. If we find one, return the new, valid address.
3478 OLDX is the address as it was before break_out_memory_refs was called.
3479 In some cases it is useful to look at this to decide what needs to be done.
3481 MODE is the mode of the operand pointed to by X.
3483 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3486 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3487 enum machine_mode mode)
3491 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3492 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3493 force_operand (XEXP (x, 0), NULL_RTX));
3494 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3495 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3496 force_operand (XEXP (x, 1), NULL_RTX));
3497 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3498 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3500 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3501 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3502 force_operand (XEXP (x, 1), NULL_RTX));
3504 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3507 if (SPARC_SYMBOL_REF_TLS_P (x))
3508 x = legitimize_tls_address (x);
3510 x = legitimize_pic_address (x, mode, 0);
3511 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3512 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3513 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3514 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3515 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3516 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3517 else if (GET_CODE (x) == SYMBOL_REF
3518 || GET_CODE (x) == CONST
3519 || GET_CODE (x) == LABEL_REF)
3520 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3524 /* Emit the special PIC helper function. */
3527 emit_pic_helper (void)
3529 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3532 switch_to_section (text_section);
3534 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3536 ASM_OUTPUT_ALIGN (asm_out_file, align);
3537 ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3538 if (flag_delayed_branch)
3539 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3540 pic_name, pic_name);
3542 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3543 pic_name, pic_name);
3545 pic_helper_emitted_p = true;
3548 /* Emit code to load the PIC register. */
3551 load_pic_register (bool delay_pic_helper)
3553 int orig_flag_pic = flag_pic;
3555 if (TARGET_VXWORKS_RTP)
3557 emit_insn (gen_vxworks_load_got ());
3558 emit_use (pic_offset_table_rtx);
3562 /* If we haven't initialized the special PIC symbols, do so now. */
3563 if (!pic_helper_symbol_name[0])
3565 ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3566 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3567 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3570 /* If we haven't emitted the special PIC helper function, do so now unless
3571 we are requested to delay it. */
3572 if (!delay_pic_helper && !pic_helper_emitted_p)
3577 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3578 pic_helper_symbol));
3580 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3581 pic_helper_symbol));
3582 flag_pic = orig_flag_pic;
3584 /* Need to emit this whether or not we obey regdecls,
3585 since setjmp/longjmp can cause life info to screw up.
3586 ??? In the case where we don't obey regdecls, this is not sufficient
3587 since we may not fall out the bottom. */
3588 emit_use (pic_offset_table_rtx);
3591 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3592 address of the call target. */
3595 sparc_emit_call_insn (rtx pat, rtx addr)
3599 insn = emit_call_insn (pat);
3601 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3602 if (TARGET_VXWORKS_RTP
3604 && GET_CODE (addr) == SYMBOL_REF
3605 && (SYMBOL_REF_DECL (addr)
3606 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3607 : !SYMBOL_REF_LOCAL_P (addr)))
3609 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3610 crtl->uses_pic_offset_table = 1;
3614 /* Return 1 if RTX is a MEM which is known to be aligned to at
3615 least a DESIRED byte boundary. */
3618 mem_min_alignment (rtx mem, int desired)
3620 rtx addr, base, offset;
3622 /* If it's not a MEM we can't accept it. */
3623 if (GET_CODE (mem) != MEM)
3627 if (!TARGET_UNALIGNED_DOUBLES
3628 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3631 /* ??? The rest of the function predates MEM_ALIGN so
3632 there is probably a bit of redundancy. */
3633 addr = XEXP (mem, 0);
3634 base = offset = NULL_RTX;
3635 if (GET_CODE (addr) == PLUS)
3637 if (GET_CODE (XEXP (addr, 0)) == REG)
3639 base = XEXP (addr, 0);
3641 /* What we are saying here is that if the base
3642 REG is aligned properly, the compiler will make
3643 sure any REG based index upon it will be so
3645 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3646 offset = XEXP (addr, 1);
3648 offset = const0_rtx;
3651 else if (GET_CODE (addr) == REG)
3654 offset = const0_rtx;
3657 if (base != NULL_RTX)
3659 int regno = REGNO (base);
3661 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3663 /* Check if the compiler has recorded some information
3664 about the alignment of the base REG. If reload has
3665 completed, we already matched with proper alignments.
3666 If not running global_alloc, reload might give us
3667 unaligned pointer to local stack though. */
3669 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3670 || (optimize && reload_completed))
3671 && (INTVAL (offset) & (desired - 1)) == 0)
3676 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3680 else if (! TARGET_UNALIGNED_DOUBLES
3681 || CONSTANT_P (addr)
3682 || GET_CODE (addr) == LO_SUM)
3684 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3685 is true, in which case we can only assume that an access is aligned if
3686 it is to a constant address, or the address involves a LO_SUM. */
3690 /* An obviously unaligned address. */
3695 /* Vectors to keep interesting information about registers where it can easily
3696 be got. We used to use the actual mode value as the bit number, but there
3697 are more than 32 modes now. Instead we use two tables: one indexed by
3698 hard register number, and one indexed by mode. */
3700 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3701 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3702 mapped into one sparc_mode_class mode. */
3704 enum sparc_mode_class {
3705 S_MODE, D_MODE, T_MODE, O_MODE,
3706 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3710 /* Modes for single-word and smaller quantities. */
3711 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3713 /* Modes for double-word and smaller quantities. */
3714 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3716 /* Modes for quad-word and smaller quantities. */
3717 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3719 /* Modes for 8-word and smaller quantities. */
3720 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3722 /* Modes for single-float quantities. We must allow any single word or
3723 smaller quantity. This is because the fix/float conversion instructions
3724 take integer inputs/outputs from the float registers. */
3725 #define SF_MODES (S_MODES)
3727 /* Modes for double-float and smaller quantities. */
3728 #define DF_MODES (S_MODES | D_MODES)
3730 /* Modes for double-float only quantities. */
3731 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3733 /* Modes for quad-float only quantities. */
3734 #define TF_ONLY_MODES (1 << (int) TF_MODE)
3736 /* Modes for quad-float and smaller quantities. */
3737 #define TF_MODES (DF_MODES | TF_ONLY_MODES)
3739 /* Modes for quad-float and double-float quantities. */
3740 #define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
3742 /* Modes for quad-float pair only quantities. */
3743 #define OF_ONLY_MODES (1 << (int) OF_MODE)
3745 /* Modes for quad-float pairs and smaller quantities. */
3746 #define OF_MODES (TF_MODES | OF_ONLY_MODES)
3748 #define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
3750 /* Modes for condition codes. */
3751 #define CC_MODES (1 << (int) CC_MODE)
3752 #define CCFP_MODES (1 << (int) CCFP_MODE)
3754 /* Value is 1 if register/mode pair is acceptable on sparc.
3755 The funny mixture of D and T modes is because integer operations
3756 do not specially operate on tetra quantities, so non-quad-aligned
3757 registers can hold quadword quantities (except %o4 and %i4 because
3758 they cross fixed registers). */
3760 /* This points to either the 32 bit or the 64 bit version. */
3761 const int *hard_regno_mode_classes;
3763 static const int hard_32bit_mode_classes[] = {
3764 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3765 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3766 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3767 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3769 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3770 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3771 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3772 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3774 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3775 and none can hold SFmode/SImode values. */
3776 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3777 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3778 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3779 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3782 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3788 static const int hard_64bit_mode_classes[] = {
3789 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3790 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3791 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3792 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3794 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3795 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3796 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3797 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3799 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3800 and none can hold SFmode/SImode values. */
3801 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3802 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3803 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3804 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3807 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3813 int sparc_mode_class [NUM_MACHINE_MODES];
3815 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3818 sparc_init_modes (void)
3822 for (i = 0; i < NUM_MACHINE_MODES; i++)
3824 switch (GET_MODE_CLASS (i))
3827 case MODE_PARTIAL_INT:
3828 case MODE_COMPLEX_INT:
3829 if (GET_MODE_SIZE (i) <= 4)
3830 sparc_mode_class[i] = 1 << (int) S_MODE;
3831 else if (GET_MODE_SIZE (i) == 8)
3832 sparc_mode_class[i] = 1 << (int) D_MODE;
3833 else if (GET_MODE_SIZE (i) == 16)
3834 sparc_mode_class[i] = 1 << (int) T_MODE;
3835 else if (GET_MODE_SIZE (i) == 32)
3836 sparc_mode_class[i] = 1 << (int) O_MODE;
3838 sparc_mode_class[i] = 0;
3840 case MODE_VECTOR_INT:
3841 if (GET_MODE_SIZE (i) <= 4)
3842 sparc_mode_class[i] = 1 << (int)SF_MODE;
3843 else if (GET_MODE_SIZE (i) == 8)
3844 sparc_mode_class[i] = 1 << (int)DF_MODE;
3847 case MODE_COMPLEX_FLOAT:
3848 if (GET_MODE_SIZE (i) <= 4)
3849 sparc_mode_class[i] = 1 << (int) SF_MODE;
3850 else if (GET_MODE_SIZE (i) == 8)
3851 sparc_mode_class[i] = 1 << (int) DF_MODE;
3852 else if (GET_MODE_SIZE (i) == 16)
3853 sparc_mode_class[i] = 1 << (int) TF_MODE;
3854 else if (GET_MODE_SIZE (i) == 32)
3855 sparc_mode_class[i] = 1 << (int) OF_MODE;
3857 sparc_mode_class[i] = 0;
3860 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3861 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3863 sparc_mode_class[i] = 1 << (int) CC_MODE;
3866 sparc_mode_class[i] = 0;
3872 hard_regno_mode_classes = hard_64bit_mode_classes;
3874 hard_regno_mode_classes = hard_32bit_mode_classes;
3876 /* Initialize the array used by REGNO_REG_CLASS. */
3877 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3879 if (i < 16 && TARGET_V8PLUS)
3880 sparc_regno_reg_class[i] = I64_REGS;
3881 else if (i < 32 || i == FRAME_POINTER_REGNUM)
3882 sparc_regno_reg_class[i] = GENERAL_REGS;
3884 sparc_regno_reg_class[i] = FP_REGS;
3886 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3888 sparc_regno_reg_class[i] = FPCC_REGS;
3890 sparc_regno_reg_class[i] = NO_REGS;
3894 /* Compute the frame size required by the function. This function is called
3895 during the reload pass and also by sparc_expand_prologue. */
3898 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3900 int outgoing_args_size = (crtl->outgoing_args_size
3901 + REG_PARM_STACK_SPACE (current_function_decl));
3902 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
3907 for (i = 0; i < 8; i++)
3908 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3913 for (i = 0; i < 8; i += 2)
3914 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3915 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3919 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3920 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3921 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3924 /* Set up values for use in prologue and epilogue. */
3925 num_gfregs = n_regs;
3930 && crtl->outgoing_args_size == 0)
3931 actual_fsize = apparent_fsize = 0;
3934 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
3935 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3936 apparent_fsize += n_regs * 4;
3937 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3940 /* Make sure nothing can clobber our register windows.
3941 If a SAVE must be done, or there is a stack-local variable,
3942 the register window area must be allocated. */
3943 if (! leaf_function_p || size > 0)
3944 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3946 return SPARC_STACK_ALIGN (actual_fsize);
3949 /* Output any necessary .register pseudo-ops. */
3952 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3954 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3960 /* Check if %g[2367] were used without
3961 .register being printed for them already. */
3962 for (i = 2; i < 8; i++)
3964 if (df_regs_ever_live_p (i)
3965 && ! sparc_hard_reg_printed [i])
3967 sparc_hard_reg_printed [i] = 1;
3968 /* %g7 is used as TLS base register, use #ignore
3969 for it instead of #scratch. */
3970 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
3971 i == 7 ? "ignore" : "scratch");
3978 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3979 as needed. LOW should be double-word aligned for 32-bit registers.
3980 Return the new OFFSET. */
3983 #define SORR_RESTORE 1
3986 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3991 if (TARGET_ARCH64 && high <= 32)
3993 for (i = low; i < high; i++)
3995 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3997 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
3998 set_mem_alias_set (mem, sparc_sr_alias_set);
3999 if (action == SORR_SAVE)
4001 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4002 RTX_FRAME_RELATED_P (insn) = 1;
4004 else /* action == SORR_RESTORE */
4005 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4012 for (i = low; i < high; i += 2)
4014 bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i];
4015 bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1];
4016 enum machine_mode mode;
4021 mode = i < 32 ? DImode : DFmode;
4026 mode = i < 32 ? SImode : SFmode;
4031 mode = i < 32 ? SImode : SFmode;
4038 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
4039 set_mem_alias_set (mem, sparc_sr_alias_set);
4040 if (action == SORR_SAVE)
4042 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4043 RTX_FRAME_RELATED_P (insn) = 1;
4045 else /* action == SORR_RESTORE */
4046 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4048 /* Always preserve double-word alignment. */
4049 offset = (offset + 7) & -8;
4056 /* Emit code to save call-saved registers. */
4059 emit_save_or_restore_regs (int action)
4061 HOST_WIDE_INT offset;
4064 offset = frame_base_offset - apparent_fsize;
4066 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
4068 /* ??? This might be optimized a little as %g1 might already have a
4069 value close enough that a single add insn will do. */
4070 /* ??? Although, all of this is probably only a temporary fix
4071 because if %g1 can hold a function result, then
4072 sparc_expand_epilogue will lose (the result will be
4074 base = gen_rtx_REG (Pmode, 1);
4075 emit_move_insn (base, GEN_INT (offset));
4076 emit_insn (gen_rtx_SET (VOIDmode,
4078 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
4082 base = frame_base_reg;
4084 offset = save_or_restore_regs (0, 8, base, offset, action);
4085 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
4088 /* Generate a save_register_window insn. */
4091 gen_save_register_window (rtx increment)
4094 return gen_save_register_windowdi (increment);
4096 return gen_save_register_windowsi (increment);
4099 /* Generate an increment for the stack pointer. */
4102 gen_stack_pointer_inc (rtx increment)
4104 return gen_rtx_SET (VOIDmode,
4106 gen_rtx_PLUS (Pmode,
4111 /* Generate a decrement for the stack pointer. */
4114 gen_stack_pointer_dec (rtx decrement)
4116 return gen_rtx_SET (VOIDmode,
4118 gen_rtx_MINUS (Pmode,
4123 /* Expand the function prologue. The prologue is responsible for reserving
4124 storage for the frame, saving the call-saved registers and loading the
4125 PIC register if needed. */
4128 sparc_expand_prologue (void)
4133 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4134 on the final value of the flag means deferring the prologue/epilogue
4135 expansion until just before the second scheduling pass, which is too
4136 late to emit multiple epilogues or return insns.
4138 Of course we are making the assumption that the value of the flag
4139 will not change between now and its final value. Of the three parts
4140 of the formula, only the last one can reasonably vary. Let's take a
4141 closer look, after assuming that the first two ones are set to true
4142 (otherwise the last value is effectively silenced).
4144 If only_leaf_regs_used returns false, the global predicate will also
4145 be false so the actual frame size calculated below will be positive.
4146 As a consequence, the save_register_window insn will be emitted in
4147 the instruction stream; now this insn explicitly references %fp
4148 which is not a leaf register so only_leaf_regs_used will always
4149 return false subsequently.
4151 If only_leaf_regs_used returns true, we hope that the subsequent
4152 optimization passes won't cause non-leaf registers to pop up. For
4153 example, the regrename pass has special provisions to not rename to
4154 non-leaf registers in a leaf function. */
4155 sparc_leaf_function_p
4156 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
4158 /* Need to use actual_fsize, since we are also allocating
4159 space for our callee (and our own register save area). */
4161 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4163 /* Advertise that the data calculated just above are now valid. */
4164 sparc_prologue_data_valid_p = true;
4166 if (sparc_leaf_function_p)
4168 frame_base_reg = stack_pointer_rtx;
4169 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4173 frame_base_reg = hard_frame_pointer_rtx;
4174 frame_base_offset = SPARC_STACK_BIAS;
4177 if (actual_fsize == 0)
4179 else if (sparc_leaf_function_p)
4181 if (actual_fsize <= 4096)
4182 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4183 else if (actual_fsize <= 8192)
4185 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4186 /* %sp is still the CFA register. */
4187 RTX_FRAME_RELATED_P (insn) = 1;
4189 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4193 rtx reg = gen_rtx_REG (Pmode, 1);
4194 emit_move_insn (reg, GEN_INT (-actual_fsize));
4195 insn = emit_insn (gen_stack_pointer_inc (reg));
4196 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4197 gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4200 RTX_FRAME_RELATED_P (insn) = 1;
4204 if (actual_fsize <= 4096)
4205 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4206 else if (actual_fsize <= 8192)
4208 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
4209 /* %sp is not the CFA register anymore. */
4210 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4214 rtx reg = gen_rtx_REG (Pmode, 1);
4215 emit_move_insn (reg, GEN_INT (-actual_fsize));
4216 insn = emit_insn (gen_save_register_window (reg));
4219 RTX_FRAME_RELATED_P (insn) = 1;
4220 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4221 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4225 emit_save_or_restore_regs (SORR_SAVE);
4227 /* Load the PIC register if needed. */
4228 if (flag_pic && crtl->uses_pic_offset_table)
4229 load_pic_register (false);
4232 /* This function generates the assembly code for function entry, which boils
4233 down to emitting the necessary .register directives. */
4236 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4238 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4239 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4241 sparc_output_scratch_registers (file);
4244 /* Expand the function epilogue, either normal or part of a sibcall.
4245 We emit all the instructions except the return or the call. */
4248 sparc_expand_epilogue (void)
4251 emit_save_or_restore_regs (SORR_RESTORE);
4253 if (actual_fsize == 0)
4255 else if (sparc_leaf_function_p)
4257 if (actual_fsize <= 4096)
4258 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4259 else if (actual_fsize <= 8192)
4261 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4262 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4266 rtx reg = gen_rtx_REG (Pmode, 1);
4267 emit_move_insn (reg, GEN_INT (-actual_fsize));
4268 emit_insn (gen_stack_pointer_dec (reg));
4273 /* Return true if it is appropriate to emit `return' instructions in the
4274 body of a function. */
4277 sparc_can_use_return_insn_p (void)
4279 return sparc_prologue_data_valid_p
4280 && (actual_fsize == 0 || !sparc_leaf_function_p);
4283 /* This function generates the assembly code for function exit. */
4286 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4288 /* If code does not drop into the epilogue, we have to still output
4289 a dummy nop for the sake of sane backtraces. Otherwise, if the
4290 last two instructions of a function were "call foo; dslot;" this
4291 can make the return PC of foo (i.e. address of call instruction
4292 plus 8) point to the first instruction in the next function. */
4294 rtx insn, last_real_insn;
4296 insn = get_last_insn ();
4298 last_real_insn = prev_real_insn (insn);
4300 && GET_CODE (last_real_insn) == INSN
4301 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4302 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4304 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4305 fputs("\tnop\n", file);
4307 sparc_output_deferred_case_vectors ();
4310 /* Output a 'restore' instruction. */
4313 output_restore (rtx pat)
4319 fputs ("\t restore\n", asm_out_file);
4323 gcc_assert (GET_CODE (pat) == SET);
4325 operands[0] = SET_DEST (pat);
4326 pat = SET_SRC (pat);
4328 switch (GET_CODE (pat))
4331 operands[1] = XEXP (pat, 0);
4332 operands[2] = XEXP (pat, 1);
4333 output_asm_insn (" restore %r1, %2, %Y0", operands);
4336 operands[1] = XEXP (pat, 0);
4337 operands[2] = XEXP (pat, 1);
4338 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4341 operands[1] = XEXP (pat, 0);
4342 gcc_assert (XEXP (pat, 1) == const1_rtx);
4343 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4347 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4352 /* Output a return. */
4355 output_return (rtx insn)
4357 if (sparc_leaf_function_p)
4359 /* This is a leaf function so we don't have to bother restoring the
4360 register window, which frees us from dealing with the convoluted
4361 semantics of restore/return. We simply output the jump to the
4362 return address and the insn in the delay slot (if any). */
4364 gcc_assert (! crtl->calls_eh_return);
4366 return "jmp\t%%o7+%)%#";
4370 /* This is a regular function so we have to restore the register window.
4371 We may have a pending insn for the delay slot, which will be either
4372 combined with the 'restore' instruction or put in the delay slot of
4373 the 'return' instruction. */
4375 if (crtl->calls_eh_return)
4377 /* If the function uses __builtin_eh_return, the eh_return
4378 machinery occupies the delay slot. */
4379 gcc_assert (! final_sequence);
4381 if (! flag_delayed_branch)
4382 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4385 fputs ("\treturn\t%i7+8\n", asm_out_file);
4387 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4389 if (flag_delayed_branch)
4390 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4392 fputs ("\t nop\n", asm_out_file);
4394 else if (final_sequence)
4398 delay = NEXT_INSN (insn);
4401 pat = PATTERN (delay);
4403 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4405 epilogue_renumber (&pat, 0);
4406 return "return\t%%i7+%)%#";
4410 output_asm_insn ("jmp\t%%i7+%)", NULL);
4411 output_restore (pat);
4412 PATTERN (delay) = gen_blockage ();
4413 INSN_CODE (delay) = -1;
4418 /* The delay slot is empty. */
4420 return "return\t%%i7+%)\n\t nop";
4421 else if (flag_delayed_branch)
4422 return "jmp\t%%i7+%)\n\t restore";
4424 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4431 /* Output a sibling call. */
4434 output_sibcall (rtx insn, rtx call_operand)
4438 gcc_assert (flag_delayed_branch);
4440 operands[0] = call_operand;
4442 if (sparc_leaf_function_p)
4444 /* This is a leaf function so we don't have to bother restoring the
4445 register window. We simply output the jump to the function and
4446 the insn in the delay slot (if any). */
4448 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4451 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4454 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4455 it into branch if possible. */
4456 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4461 /* This is a regular function so we have to restore the register window.
4462 We may have a pending insn for the delay slot, which will be combined
4463 with the 'restore' instruction. */
4465 output_asm_insn ("call\t%a0, 0", operands);
4469 rtx delay = NEXT_INSN (insn);
4472 output_restore (PATTERN (delay));
4474 PATTERN (delay) = gen_blockage ();
4475 INSN_CODE (delay) = -1;
4478 output_restore (NULL_RTX);
4484 /* Functions for handling argument passing.
4486 For 32-bit, the first 6 args are normally in registers and the rest are
4487 pushed. Any arg that starts within the first 6 words is at least
4488 partially passed in a register unless its data type forbids.
4490 For 64-bit, the argument registers are laid out as an array of 16 elements
4491 and arguments are added sequentially. The first 6 int args and up to the
4492 first 16 fp args (depending on size) are passed in regs.
4494 Slot Stack Integral Float Float in structure Double Long Double
4495 ---- ----- -------- ----- ------------------ ------ -----------
4496 15 [SP+248] %f31 %f30,%f31 %d30
4497 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4498 13 [SP+232] %f27 %f26,%f27 %d26
4499 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4500 11 [SP+216] %f23 %f22,%f23 %d22
4501 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4502 9 [SP+200] %f19 %f18,%f19 %d18
4503 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4504 7 [SP+184] %f15 %f14,%f15 %d14
4505 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4506 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4507 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4508 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4509 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4510 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4511 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4513 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4515 Integral arguments are always passed as 64-bit quantities appropriately
4518 Passing of floating point values is handled as follows.
4519 If a prototype is in scope:
4520 If the value is in a named argument (i.e. not a stdarg function or a
4521 value not part of the `...') then the value is passed in the appropriate
4523 If the value is part of the `...' and is passed in one of the first 6
4524 slots then the value is passed in the appropriate int reg.
4525 If the value is part of the `...' and is not passed in one of the first 6
4526 slots then the value is passed in memory.
4527 If a prototype is not in scope:
4528 If the value is one of the first 6 arguments the value is passed in the
4529 appropriate integer reg and the appropriate fp reg.
4530 If the value is not one of the first 6 arguments the value is passed in
4531 the appropriate fp reg and in memory.
4534 Summary of the calling conventions implemented by GCC on SPARC:
4537 size argument return value
4539 small integer <4 int. reg. int. reg.
4540 word 4 int. reg. int. reg.
4541 double word 8 int. reg. int. reg.
4543 _Complex small integer <8 int. reg. int. reg.
4544 _Complex word 8 int. reg. int. reg.
4545 _Complex double word 16 memory int. reg.
4547 vector integer <=8 int. reg. FP reg.
4548 vector integer >8 memory memory
4550 float 4 int. reg. FP reg.
4551 double 8 int. reg. FP reg.
4552 long double 16 memory memory
4554 _Complex float 8 memory FP reg.
4555 _Complex double 16 memory FP reg.
4556 _Complex long double 32 memory FP reg.
4558 vector float any memory memory
4560 aggregate any memory memory
4565 size argument return value
4567 small integer <8 int. reg. int. reg.
4568 word 8 int. reg. int. reg.
4569 double word 16 int. reg. int. reg.
4571 _Complex small integer <16 int. reg. int. reg.
4572 _Complex word 16 int. reg. int. reg.
4573 _Complex double word 32 memory int. reg.
4575 vector integer <=16 FP reg. FP reg.
4576 vector integer 16<s<=32 memory FP reg.
4577 vector integer >32 memory memory
4579 float 4 FP reg. FP reg.
4580 double 8 FP reg. FP reg.
4581 long double 16 FP reg. FP reg.
4583 _Complex float 8 FP reg. FP reg.
4584 _Complex double 16 FP reg. FP reg.
4585 _Complex long double 32 memory FP reg.
4587 vector float <=16 FP reg. FP reg.
4588 vector float 16<s<=32 memory FP reg.
4589 vector float >32 memory memory
4591 aggregate <=16 reg. reg.
4592 aggregate 16<s<=32 memory reg.
4593 aggregate >32 memory memory
4597 Note #1: complex floating-point types follow the extended SPARC ABIs as
4598 implemented by the Sun compiler.
4600 Note #2: integral vector types follow the scalar floating-point types
4601 conventions to match what is implemented by the Sun VIS SDK.
4603 Note #3: floating-point vector types follow the aggregate types
4607 /* Maximum number of int regs for args. */
4608 #define SPARC_INT_ARG_MAX 6
4609 /* Maximum number of fp regs for args. */
4610 #define SPARC_FP_ARG_MAX 16
4612 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4614 /* Handle the INIT_CUMULATIVE_ARGS macro.
4615 Initialize a variable CUM of type CUMULATIVE_ARGS
4616 for a call to a function whose data type is FNTYPE.
4617 For a library call, FNTYPE is 0. */
4620 init_cumulative_args (struct sparc_args *cum, tree fntype,
4621 rtx libname ATTRIBUTE_UNUSED,
4622 tree fndecl ATTRIBUTE_UNUSED)
4625 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4626 cum->libcall_p = fntype == 0;
4629 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4630 When a prototype says `char' or `short', really pass an `int'. */
4633 sparc_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED)
4635 return TARGET_ARCH32 ? true : false;
4638 /* Handle promotion of pointer and integer arguments. */
4640 static enum machine_mode
4641 sparc_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
4642 enum machine_mode mode,
4643 int *punsignedp ATTRIBUTE_UNUSED,
4644 const_tree fntype ATTRIBUTE_UNUSED,
4645 int for_return ATTRIBUTE_UNUSED)
4647 if (POINTER_TYPE_P (type))
4649 *punsignedp = POINTERS_EXTEND_UNSIGNED;
4653 /* For TARGET_ARCH64 we need this, as we don't have instructions
4654 for arithmetic operations which do zero/sign extension at the same time,
4655 so without this we end up with a srl/sra after every assignment to an
4656 user variable, which means very very bad code. */
4659 && GET_MODE_CLASS (mode) == MODE_INT
4660 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4668 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4671 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4673 return TARGET_ARCH64 ? true : false;
4676 /* Scan the record type TYPE and return the following predicates:
4677 - INTREGS_P: the record contains at least one field or sub-field
4678 that is eligible for promotion in integer registers.
4679 - FP_REGS_P: the record contains at least one field or sub-field
4680 that is eligible for promotion in floating-point registers.
4681 - PACKED_P: the record contains at least one field that is packed.
4683 Sub-fields are not taken into account for the PACKED_P predicate. */
4686 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4690 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4692 if (TREE_CODE (field) == FIELD_DECL)
4694 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4695 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4696 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4697 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4703 if (packed_p && DECL_PACKED (field))
4709 /* Compute the slot number to pass an argument in.
4710 Return the slot number or -1 if passing on the stack.
4712 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4713 the preceding args and about the function being called.
4714 MODE is the argument's machine mode.
4715 TYPE is the data type of the argument (as a tree).
4716 This is null for libcalls where that information may
4718 NAMED is nonzero if this argument is a named parameter
4719 (otherwise it is an extra parameter matching an ellipsis).
4720 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4721 *PREGNO records the register number to use if scalar type.
4722 *PPADDING records the amount of padding needed in words. */
4725 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4726 tree type, int named, int incoming_p,
4727 int *pregno, int *ppadding)
4729 int regbase = (incoming_p
4730 ? SPARC_INCOMING_INT_ARG_FIRST
4731 : SPARC_OUTGOING_INT_ARG_FIRST);
4732 int slotno = cum->words;
4733 enum mode_class mclass;
4738 if (type && TREE_ADDRESSABLE (type))
4744 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4747 /* For SPARC64, objects requiring 16-byte alignment get it. */
4749 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4750 && (slotno & 1) != 0)
4751 slotno++, *ppadding = 1;
4753 mclass = GET_MODE_CLASS (mode);
4754 if (type && TREE_CODE (type) == VECTOR_TYPE)
4756 /* Vector types deserve special treatment because they are
4757 polymorphic wrt their mode, depending upon whether VIS
4758 instructions are enabled. */
4759 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4761 /* The SPARC port defines no floating-point vector modes. */
4762 gcc_assert (mode == BLKmode);
4766 /* Integral vector types should either have a vector
4767 mode or an integral mode, because we are guaranteed
4768 by pass_by_reference that their size is not greater
4769 than 16 bytes and TImode is 16-byte wide. */
4770 gcc_assert (mode != BLKmode);
4772 /* Vector integers are handled like floats according to
4774 mclass = MODE_FLOAT;
4781 case MODE_COMPLEX_FLOAT:
4782 case MODE_VECTOR_INT:
4783 if (TARGET_ARCH64 && TARGET_FPU && named)
4785 if (slotno >= SPARC_FP_ARG_MAX)
4787 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4788 /* Arguments filling only one single FP register are
4789 right-justified in the outer double FP register. */
4790 if (GET_MODE_SIZE (mode) <= 4)
4797 case MODE_COMPLEX_INT:
4798 if (slotno >= SPARC_INT_ARG_MAX)
4800 regno = regbase + slotno;
4804 if (mode == VOIDmode)
4805 /* MODE is VOIDmode when generating the actual call. */
4808 gcc_assert (mode == BLKmode);
4812 || (TREE_CODE (type) != VECTOR_TYPE
4813 && TREE_CODE (type) != RECORD_TYPE))
4815 if (slotno >= SPARC_INT_ARG_MAX)
4817 regno = regbase + slotno;
4819 else /* TARGET_ARCH64 && type */
4821 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4823 /* First see what kinds of registers we would need. */
4824 if (TREE_CODE (type) == VECTOR_TYPE)
4827 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4829 /* The ABI obviously doesn't specify how packed structures
4830 are passed. These are defined to be passed in int regs
4831 if possible, otherwise memory. */
4832 if (packed_p || !named)
4833 fpregs_p = 0, intregs_p = 1;
4835 /* If all arg slots are filled, then must pass on stack. */
4836 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4839 /* If there are only int args and all int arg slots are filled,
4840 then must pass on stack. */
4841 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4844 /* Note that even if all int arg slots are filled, fp members may
4845 still be passed in regs if such regs are available.
4846 *PREGNO isn't set because there may be more than one, it's up
4847 to the caller to compute them. */
4860 /* Handle recursive register counting for structure field layout. */
4862 struct function_arg_record_value_parms
4864 rtx ret; /* return expression being built. */
4865 int slotno; /* slot number of the argument. */
4866 int named; /* whether the argument is named. */
4867 int regbase; /* regno of the base register. */
4868 int stack; /* 1 if part of the argument is on the stack. */
4869 int intoffset; /* offset of the first pending integer field. */
4870 unsigned int nregs; /* number of words passed in registers. */
4873 static void function_arg_record_value_3
4874 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4875 static void function_arg_record_value_2
4876 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4877 static void function_arg_record_value_1
4878 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4879 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
4880 static rtx function_arg_union_value (int, enum machine_mode, int, int);
4882 /* A subroutine of function_arg_record_value. Traverse the structure
4883 recursively and determine how many registers will be required. */
4886 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
4887 struct function_arg_record_value_parms *parms,
4892 /* We need to compute how many registers are needed so we can
4893 allocate the PARALLEL but before we can do that we need to know
4894 whether there are any packed fields. The ABI obviously doesn't
4895 specify how structures are passed in this case, so they are
4896 defined to be passed in int regs if possible, otherwise memory,
4897 regardless of whether there are fp values present. */
4900 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4902 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4909 /* Compute how many registers we need. */
4910 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4912 if (TREE_CODE (field) == FIELD_DECL)
4914 HOST_WIDE_INT bitpos = startbitpos;
4916 if (DECL_SIZE (field) != 0)
4918 if (integer_zerop (DECL_SIZE (field)))
4921 if (host_integerp (bit_position (field), 1))
4922 bitpos += int_bit_position (field);
4925 /* ??? FIXME: else assume zero offset. */
4927 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4928 function_arg_record_value_1 (TREE_TYPE (field),
4932 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4933 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4938 if (parms->intoffset != -1)
4940 unsigned int startbit, endbit;
4941 int intslots, this_slotno;
4943 startbit = parms->intoffset & -BITS_PER_WORD;
4944 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4946 intslots = (endbit - startbit) / BITS_PER_WORD;
4947 this_slotno = parms->slotno + parms->intoffset
4950 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4952 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4953 /* We need to pass this field on the stack. */
4957 parms->nregs += intslots;
4958 parms->intoffset = -1;
4961 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4962 If it wasn't true we wouldn't be here. */
4963 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4964 && DECL_MODE (field) == BLKmode)
4965 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4966 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4973 if (parms->intoffset == -1)
4974 parms->intoffset = bitpos;
4980 /* A subroutine of function_arg_record_value. Assign the bits of the
4981 structure between parms->intoffset and bitpos to integer registers. */
4984 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4985 struct function_arg_record_value_parms *parms)
4987 enum machine_mode mode;
4989 unsigned int startbit, endbit;
4990 int this_slotno, intslots, intoffset;
4993 if (parms->intoffset == -1)
4996 intoffset = parms->intoffset;
4997 parms->intoffset = -1;
4999 startbit = intoffset & -BITS_PER_WORD;
5000 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5001 intslots = (endbit - startbit) / BITS_PER_WORD;
5002 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
5004 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
5008 /* If this is the trailing part of a word, only load that much into
5009 the register. Otherwise load the whole register. Note that in
5010 the latter case we may pick up unwanted bits. It's not a problem
5011 at the moment but may wish to revisit. */
5013 if (intoffset % BITS_PER_WORD != 0)
5014 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5019 intoffset /= BITS_PER_UNIT;
5022 regno = parms->regbase + this_slotno;
5023 reg = gen_rtx_REG (mode, regno);
5024 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5025 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5028 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5033 while (intslots > 0);
5036 /* A subroutine of function_arg_record_value. Traverse the structure
5037 recursively and assign bits to floating point registers. Track which
5038 bits in between need integer registers; invoke function_arg_record_value_3
5039 to make that happen. */
5042 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
5043 struct function_arg_record_value_parms *parms,
5049 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5051 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5058 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5060 if (TREE_CODE (field) == FIELD_DECL)
5062 HOST_WIDE_INT bitpos = startbitpos;
5064 if (DECL_SIZE (field) != 0)
5066 if (integer_zerop (DECL_SIZE (field)))
5069 if (host_integerp (bit_position (field), 1))
5070 bitpos += int_bit_position (field);
5073 /* ??? FIXME: else assume zero offset. */
5075 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5076 function_arg_record_value_2 (TREE_TYPE (field),
5080 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5081 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5086 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5087 int regno, nregs, pos;
5088 enum machine_mode mode = DECL_MODE (field);
5091 function_arg_record_value_3 (bitpos, parms);
5093 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5096 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5097 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5099 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5101 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5107 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5108 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5110 reg = gen_rtx_REG (mode, regno);
5111 pos = bitpos / BITS_PER_UNIT;
5112 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5113 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5117 regno += GET_MODE_SIZE (mode) / 4;
5118 reg = gen_rtx_REG (mode, regno);
5119 pos += GET_MODE_SIZE (mode);
5120 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5121 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5127 if (parms->intoffset == -1)
5128 parms->intoffset = bitpos;
5134 /* Used by function_arg and function_value to implement the complex
5135 conventions of the 64-bit ABI for passing and returning structures.
5136 Return an expression valid as a return value for the two macros
5137 FUNCTION_ARG and FUNCTION_VALUE.
5139 TYPE is the data type of the argument (as a tree).
5140 This is null for libcalls where that information may
5142 MODE is the argument's machine mode.
5143 SLOTNO is the index number of the argument's slot in the parameter array.
5144 NAMED is nonzero if this argument is a named parameter
5145 (otherwise it is an extra parameter matching an ellipsis).
5146 REGBASE is the regno of the base register for the parameter array. */
5149 function_arg_record_value (const_tree type, enum machine_mode mode,
5150 int slotno, int named, int regbase)
5152 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5153 struct function_arg_record_value_parms parms;
5156 parms.ret = NULL_RTX;
5157 parms.slotno = slotno;
5158 parms.named = named;
5159 parms.regbase = regbase;
5162 /* Compute how many registers we need. */
5164 parms.intoffset = 0;
5165 function_arg_record_value_1 (type, 0, &parms, false);
5167 /* Take into account pending integer fields. */
5168 if (parms.intoffset != -1)
5170 unsigned int startbit, endbit;
5171 int intslots, this_slotno;
5173 startbit = parms.intoffset & -BITS_PER_WORD;
5174 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5175 intslots = (endbit - startbit) / BITS_PER_WORD;
5176 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5178 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5180 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5181 /* We need to pass this field on the stack. */
5185 parms.nregs += intslots;
5187 nregs = parms.nregs;
5189 /* Allocate the vector and handle some annoying special cases. */
5192 /* ??? Empty structure has no value? Duh? */
5195 /* Though there's nothing really to store, return a word register
5196 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5197 leads to breakage due to the fact that there are zero bytes to
5199 return gen_rtx_REG (mode, regbase);
5203 /* ??? C++ has structures with no fields, and yet a size. Give up
5204 for now and pass everything back in integer registers. */
5205 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5207 if (nregs + slotno > SPARC_INT_ARG_MAX)
5208 nregs = SPARC_INT_ARG_MAX - slotno;
5210 gcc_assert (nregs != 0);
5212 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5214 /* If at least one field must be passed on the stack, generate
5215 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5216 also be passed on the stack. We can't do much better because the
5217 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5218 of structures for which the fields passed exclusively in registers
5219 are not at the beginning of the structure. */
5221 XVECEXP (parms.ret, 0, 0)
5222 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5224 /* Fill in the entries. */
5226 parms.intoffset = 0;
5227 function_arg_record_value_2 (type, 0, &parms, false);
5228 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5230 gcc_assert (parms.nregs == nregs);
5235 /* Used by function_arg and function_value to implement the conventions
5236 of the 64-bit ABI for passing and returning unions.
5237 Return an expression valid as a return value for the two macros
5238 FUNCTION_ARG and FUNCTION_VALUE.
5240 SIZE is the size in bytes of the union.
5241 MODE is the argument's machine mode.
5242 REGNO is the hard register the union will be passed in. */
5245 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5248 int nwords = ROUND_ADVANCE (size), i;
5251 /* See comment in previous function for empty structures. */
5253 return gen_rtx_REG (mode, regno);
5255 if (slotno == SPARC_INT_ARG_MAX - 1)
5258 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5260 for (i = 0; i < nwords; i++)
5262 /* Unions are passed left-justified. */
5263 XVECEXP (regs, 0, i)
5264 = gen_rtx_EXPR_LIST (VOIDmode,
5265 gen_rtx_REG (word_mode, regno),
5266 GEN_INT (UNITS_PER_WORD * i));
5273 /* Used by function_arg and function_value to implement the conventions
5274 for passing and returning large (BLKmode) vectors.
5275 Return an expression valid as a return value for the two macros
5276 FUNCTION_ARG and FUNCTION_VALUE.
5278 SIZE is the size in bytes of the vector (at least 8 bytes).
5279 REGNO is the FP hard register the vector will be passed in. */
5282 function_arg_vector_value (int size, int regno)
5284 int i, nregs = size / 8;
5287 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5289 for (i = 0; i < nregs; i++)
5291 XVECEXP (regs, 0, i)
5292 = gen_rtx_EXPR_LIST (VOIDmode,
5293 gen_rtx_REG (DImode, regno + 2*i),
5300 /* Handle the FUNCTION_ARG macro.
5301 Determine where to put an argument to a function.
5302 Value is zero to push the argument on the stack,
5303 or a hard register in which to store the argument.
5305 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5306 the preceding args and about the function being called.
5307 MODE is the argument's machine mode.
5308 TYPE is the data type of the argument (as a tree).
5309 This is null for libcalls where that information may
5311 NAMED is nonzero if this argument is a named parameter
5312 (otherwise it is an extra parameter matching an ellipsis).
5313 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5316 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5317 tree type, int named, int incoming_p)
5319 int regbase = (incoming_p
5320 ? SPARC_INCOMING_INT_ARG_FIRST
5321 : SPARC_OUTGOING_INT_ARG_FIRST);
5322 int slotno, regno, padding;
5323 enum mode_class mclass = GET_MODE_CLASS (mode);
5325 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5330 /* Vector types deserve special treatment because they are polymorphic wrt
5331 their mode, depending upon whether VIS instructions are enabled. */
5332 if (type && TREE_CODE (type) == VECTOR_TYPE)
5334 HOST_WIDE_INT size = int_size_in_bytes (type);
5335 gcc_assert ((TARGET_ARCH32 && size <= 8)
5336 || (TARGET_ARCH64 && size <= 16));
5338 if (mode == BLKmode)
5339 return function_arg_vector_value (size,
5340 SPARC_FP_ARG_FIRST + 2*slotno);
5342 mclass = MODE_FLOAT;
5346 return gen_rtx_REG (mode, regno);
5348 /* Structures up to 16 bytes in size are passed in arg slots on the stack
5349 and are promoted to registers if possible. */
5350 if (type && TREE_CODE (type) == RECORD_TYPE)
5352 HOST_WIDE_INT size = int_size_in_bytes (type);
5353 gcc_assert (size <= 16);
5355 return function_arg_record_value (type, mode, slotno, named, regbase);
5358 /* Unions up to 16 bytes in size are passed in integer registers. */
5359 else if (type && TREE_CODE (type) == UNION_TYPE)
5361 HOST_WIDE_INT size = int_size_in_bytes (type);
5362 gcc_assert (size <= 16);
5364 return function_arg_union_value (size, mode, slotno, regno);
5367 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5368 but also have the slot allocated for them.
5369 If no prototype is in scope fp values in register slots get passed
5370 in two places, either fp regs and int regs or fp regs and memory. */
5371 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5372 && SPARC_FP_REG_P (regno))
5374 rtx reg = gen_rtx_REG (mode, regno);
5375 if (cum->prototype_p || cum->libcall_p)
5377 /* "* 2" because fp reg numbers are recorded in 4 byte
5380 /* ??? This will cause the value to be passed in the fp reg and
5381 in the stack. When a prototype exists we want to pass the
5382 value in the reg but reserve space on the stack. That's an
5383 optimization, and is deferred [for a bit]. */
5384 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5385 return gen_rtx_PARALLEL (mode,
5387 gen_rtx_EXPR_LIST (VOIDmode,
5388 NULL_RTX, const0_rtx),
5389 gen_rtx_EXPR_LIST (VOIDmode,
5393 /* ??? It seems that passing back a register even when past
5394 the area declared by REG_PARM_STACK_SPACE will allocate
5395 space appropriately, and will not copy the data onto the
5396 stack, exactly as we desire.
5398 This is due to locate_and_pad_parm being called in
5399 expand_call whenever reg_parm_stack_space > 0, which
5400 while beneficial to our example here, would seem to be
5401 in error from what had been intended. Ho hum... -- r~ */
5409 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5413 /* On incoming, we don't need to know that the value
5414 is passed in %f0 and %i0, and it confuses other parts
5415 causing needless spillage even on the simplest cases. */
5419 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5420 + (regno - SPARC_FP_ARG_FIRST) / 2);
5422 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5423 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5425 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5429 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5430 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5431 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5436 /* All other aggregate types are passed in an integer register in a mode
5437 corresponding to the size of the type. */
5438 else if (type && AGGREGATE_TYPE_P (type))
5440 HOST_WIDE_INT size = int_size_in_bytes (type);
5441 gcc_assert (size <= 16);
5443 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5446 return gen_rtx_REG (mode, regno);
5449 /* For an arg passed partly in registers and partly in memory,
5450 this is the number of bytes of registers used.
5451 For args passed entirely in registers or entirely in memory, zero.
5453 Any arg that starts in the first 6 regs but won't entirely fit in them
5454 needs partial registers on v8. On v9, structures with integer
5455 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5456 values that begin in the last fp reg [where "last fp reg" varies with the
5457 mode] will be split between that reg and memory. */
5460 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5461 tree type, bool named)
5463 int slotno, regno, padding;
5465 /* We pass 0 for incoming_p here, it doesn't matter. */
5466 slotno = function_arg_slotno (cum, mode, type, named, 0, ®no, &padding);
5473 if ((slotno + (mode == BLKmode
5474 ? ROUND_ADVANCE (int_size_in_bytes (type))
5475 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5476 > SPARC_INT_ARG_MAX)
5477 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5481 /* We are guaranteed by pass_by_reference that the size of the
5482 argument is not greater than 16 bytes, so we only need to return
5483 one word if the argument is partially passed in registers. */
5485 if (type && AGGREGATE_TYPE_P (type))
5487 int size = int_size_in_bytes (type);
5489 if (size > UNITS_PER_WORD
5490 && slotno == SPARC_INT_ARG_MAX - 1)
5491 return UNITS_PER_WORD;
5493 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5494 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5495 && ! (TARGET_FPU && named)))
5497 /* The complex types are passed as packed types. */
5498 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5499 && slotno == SPARC_INT_ARG_MAX - 1)
5500 return UNITS_PER_WORD;
5502 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5504 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5506 return UNITS_PER_WORD;
5513 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5514 Specify whether to pass the argument by reference. */
5517 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5518 enum machine_mode mode, const_tree type,
5519 bool named ATTRIBUTE_UNUSED)
5522 /* Original SPARC 32-bit ABI says that structures and unions,
5523 and quad-precision floats are passed by reference. For Pascal,
5524 also pass arrays by reference. All other base types are passed
5527 Extended ABI (as implemented by the Sun compiler) says that all
5528 complex floats are passed by reference. Pass complex integers
5529 in registers up to 8 bytes. More generally, enforce the 2-word
5530 cap for passing arguments in registers.
5532 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5533 integers are passed like floats of the same size, that is in
5534 registers up to 8 bytes. Pass all vector floats by reference
5535 like structure and unions. */
5536 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5538 /* Catch CDImode, TFmode, DCmode and TCmode. */
5539 || GET_MODE_SIZE (mode) > 8
5541 && TREE_CODE (type) == VECTOR_TYPE
5542 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5544 /* Original SPARC 64-bit ABI says that structures and unions
5545 smaller than 16 bytes are passed in registers, as well as
5546 all other base types.
5548 Extended ABI (as implemented by the Sun compiler) says that
5549 complex floats are passed in registers up to 16 bytes. Pass
5550 all complex integers in registers up to 16 bytes. More generally,
5551 enforce the 2-word cap for passing arguments in registers.
5553 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5554 integers are passed like floats of the same size, that is in
5555 registers (up to 16 bytes). Pass all vector floats like structure
5558 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5559 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5560 /* Catch CTImode and TCmode. */
5561 || GET_MODE_SIZE (mode) > 16);
5564 /* Handle the FUNCTION_ARG_ADVANCE macro.
5565 Update the data in CUM to advance over an argument
5566 of mode MODE and data type TYPE.
5567 TYPE is null for libcalls where that information may not be available. */
5570 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5571 tree type, int named)
5573 int slotno, regno, padding;
5575 /* We pass 0 for incoming_p here, it doesn't matter. */
5576 slotno = function_arg_slotno (cum, mode, type, named, 0, ®no, &padding);
5578 /* If register required leading padding, add it. */
5580 cum->words += padding;
5584 cum->words += (mode != BLKmode
5585 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5586 : ROUND_ADVANCE (int_size_in_bytes (type)));
5590 if (type && AGGREGATE_TYPE_P (type))
5592 int size = int_size_in_bytes (type);
5596 else if (size <= 16)
5598 else /* passed by reference */
5603 cum->words += (mode != BLKmode
5604 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5605 : ROUND_ADVANCE (int_size_in_bytes (type)));
5610 /* Handle the FUNCTION_ARG_PADDING macro.
5611 For the 64 bit ABI structs are always stored left shifted in their
5615 function_arg_padding (enum machine_mode mode, const_tree type)
5617 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5620 /* Fall back to the default. */
5621 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5624 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5625 Specify whether to return the return value in memory. */
5628 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5631 /* Original SPARC 32-bit ABI says that structures and unions,
5632 and quad-precision floats are returned in memory. All other
5633 base types are returned in registers.
5635 Extended ABI (as implemented by the Sun compiler) says that
5636 all complex floats are returned in registers (8 FP registers
5637 at most for '_Complex long double'). Return all complex integers
5638 in registers (4 at most for '_Complex long long').
5640 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5641 integers are returned like floats of the same size, that is in
5642 registers up to 8 bytes and in memory otherwise. Return all
5643 vector floats in memory like structure and unions; note that
5644 they always have BLKmode like the latter. */
5645 return (TYPE_MODE (type) == BLKmode
5646 || TYPE_MODE (type) == TFmode
5647 || (TREE_CODE (type) == VECTOR_TYPE
5648 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5650 /* Original SPARC 64-bit ABI says that structures and unions
5651 smaller than 32 bytes are returned in registers, as well as
5652 all other base types.
5654 Extended ABI (as implemented by the Sun compiler) says that all
5655 complex floats are returned in registers (8 FP registers at most
5656 for '_Complex long double'). Return all complex integers in
5657 registers (4 at most for '_Complex TItype').
5659 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5660 integers are returned like floats of the same size, that is in
5661 registers. Return all vector floats like structure and unions;
5662 note that they always have BLKmode like the latter. */
5663 return ((TYPE_MODE (type) == BLKmode
5664 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5667 /* Handle the TARGET_STRUCT_VALUE target hook.
5668 Return where to find the structure return value address. */
5671 sparc_struct_value_rtx (tree fndecl, int incoming)
5680 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5681 STRUCT_VALUE_OFFSET));
5683 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5684 STRUCT_VALUE_OFFSET));
5686 /* Only follow the SPARC ABI for fixed-size structure returns.
5687 Variable size structure returns are handled per the normal
5688 procedures in GCC. This is enabled by -mstd-struct-return */
5690 && sparc_std_struct_return
5691 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
5692 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
5694 /* We must check and adjust the return address, as it is
5695 optional as to whether the return object is really
5697 rtx ret_rtx = gen_rtx_REG (Pmode, 31);
5698 rtx scratch = gen_reg_rtx (SImode);
5699 rtx endlab = gen_label_rtx ();
5701 /* Calculate the return object size */
5702 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
5703 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
5704 /* Construct a temporary return value */
5705 rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
5707 /* Implement SPARC 32-bit psABI callee returns struck checking
5710 Fetch the instruction where we will return to and see if
5711 it's an unimp instruction (the most significant 10 bits
5713 emit_move_insn (scratch, gen_rtx_MEM (SImode,
5714 plus_constant (ret_rtx, 8)));
5715 /* Assume the size is valid and pre-adjust */
5716 emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5717 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
5718 emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5719 /* Assign stack temp:
5720 Write the address of the memory pointed to by temp_val into
5721 the memory pointed to by mem */
5722 emit_move_insn (mem, XEXP (temp_val, 0));
5723 emit_label (endlab);
5726 set_mem_alias_set (mem, struct_value_alias_set);
5731 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5732 For v9, function return values are subject to the same rules as arguments,
5733 except that up to 32 bytes may be returned in registers. */
5736 function_value (const_tree type, enum machine_mode mode, int incoming_p)
5738 /* Beware that the two values are swapped here wrt function_arg. */
5739 int regbase = (incoming_p
5740 ? SPARC_OUTGOING_INT_ARG_FIRST
5741 : SPARC_INCOMING_INT_ARG_FIRST);
5742 enum mode_class mclass = GET_MODE_CLASS (mode);
5745 /* Vector types deserve special treatment because they are polymorphic wrt
5746 their mode, depending upon whether VIS instructions are enabled. */
5747 if (type && TREE_CODE (type) == VECTOR_TYPE)
5749 HOST_WIDE_INT size = int_size_in_bytes (type);
5750 gcc_assert ((TARGET_ARCH32 && size <= 8)
5751 || (TARGET_ARCH64 && size <= 32));
5753 if (mode == BLKmode)
5754 return function_arg_vector_value (size,
5755 SPARC_FP_ARG_FIRST);
5757 mclass = MODE_FLOAT;
5760 if (TARGET_ARCH64 && type)
5762 /* Structures up to 32 bytes in size are returned in registers. */
5763 if (TREE_CODE (type) == RECORD_TYPE)
5765 HOST_WIDE_INT size = int_size_in_bytes (type);
5766 gcc_assert (size <= 32);
5768 return function_arg_record_value (type, mode, 0, 1, regbase);
5771 /* Unions up to 32 bytes in size are returned in integer registers. */
5772 else if (TREE_CODE (type) == UNION_TYPE)
5774 HOST_WIDE_INT size = int_size_in_bytes (type);
5775 gcc_assert (size <= 32);
5777 return function_arg_union_value (size, mode, 0, regbase);
5780 /* Objects that require it are returned in FP registers. */
5781 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5784 /* All other aggregate types are returned in an integer register in a
5785 mode corresponding to the size of the type. */
5786 else if (AGGREGATE_TYPE_P (type))
5788 /* All other aggregate types are passed in an integer register
5789 in a mode corresponding to the size of the type. */
5790 HOST_WIDE_INT size = int_size_in_bytes (type);
5791 gcc_assert (size <= 32);
5793 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5795 /* ??? We probably should have made the same ABI change in
5796 3.4.0 as the one we made for unions. The latter was
5797 required by the SCD though, while the former is not
5798 specified, so we favored compatibility and efficiency.
5800 Now we're stuck for aggregates larger than 16 bytes,
5801 because OImode vanished in the meantime. Let's not
5802 try to be unduly clever, and simply follow the ABI
5803 for unions in that case. */
5804 if (mode == BLKmode)
5805 return function_arg_union_value (size, mode, 0, regbase);
5810 /* This must match sparc_promote_function_mode.
5811 ??? Maybe 32-bit pointers should actually remain in Pmode? */
5812 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5816 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
5817 regno = SPARC_FP_ARG_FIRST;
5821 return gen_rtx_REG (mode, regno);
5824 /* Do what is necessary for `va_start'. We look at the current function
5825 to determine if stdarg or varargs is used and return the address of
5826 the first unnamed parameter. */
5829 sparc_builtin_saveregs (void)
5831 int first_reg = crtl->args.info.words;
5835 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5836 emit_move_insn (gen_rtx_MEM (word_mode,
5837 gen_rtx_PLUS (Pmode,
5839 GEN_INT (FIRST_PARM_OFFSET (0)
5842 gen_rtx_REG (word_mode,
5843 SPARC_INCOMING_INT_ARG_FIRST + regno));
5845 address = gen_rtx_PLUS (Pmode,
5847 GEN_INT (FIRST_PARM_OFFSET (0)
5848 + UNITS_PER_WORD * first_reg));
5853 /* Implement `va_start' for stdarg. */
5856 sparc_va_start (tree valist, rtx nextarg)
5858 nextarg = expand_builtin_saveregs ();
5859 std_expand_builtin_va_start (valist, nextarg);
5862 /* Implement `va_arg' for stdarg. */
5865 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
5868 HOST_WIDE_INT size, rsize, align;
5871 tree ptrtype = build_pointer_type (type);
5873 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5876 size = rsize = UNITS_PER_WORD;
5882 size = int_size_in_bytes (type);
5883 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5888 /* For SPARC64, objects requiring 16-byte alignment get it. */
5889 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5890 align = 2 * UNITS_PER_WORD;
5892 /* SPARC-V9 ABI states that structures up to 16 bytes in size
5893 are left-justified in their slots. */
5894 if (AGGREGATE_TYPE_P (type))
5897 size = rsize = UNITS_PER_WORD;
5907 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5908 size_int (align - 1));
5909 incr = fold_convert (sizetype, incr);
5910 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
5912 incr = fold_convert (ptr_type_node, incr);
5915 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5918 if (BYTES_BIG_ENDIAN && size < rsize)
5919 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5920 size_int (rsize - size));
5924 addr = fold_convert (build_pointer_type (ptrtype), addr);
5925 addr = build_va_arg_indirect_ref (addr);
5928 /* If the address isn't aligned properly for the type, we need a temporary.
5929 FIXME: This is inefficient, usually we can do this in registers. */
5930 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
5932 tree tmp = create_tmp_var (type, "va_arg_tmp");
5933 tree dest_addr = build_fold_addr_expr (tmp);
5934 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
5935 3, dest_addr, addr, size_int (rsize));
5936 TREE_ADDRESSABLE (tmp) = 1;
5937 gimplify_and_add (copy, pre_p);
5942 addr = fold_convert (ptrtype, addr);
5945 = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
5946 gimplify_assign (valist, incr, post_p);
5948 return build_va_arg_indirect_ref (addr);
5951 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5952 Specify whether the vector mode is supported by the hardware. */
5955 sparc_vector_mode_supported_p (enum machine_mode mode)
5957 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5960 /* Return the string to output an unconditional branch to LABEL, which is
5961 the operand number of the label.
5963 DEST is the destination insn (i.e. the label), INSN is the source. */
5966 output_ubranch (rtx dest, int label, rtx insn)
5968 static char string[64];
5969 bool v9_form = false;
5972 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5974 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5975 - INSN_ADDRESSES (INSN_UID (insn)));
5976 /* Leave some instructions for "slop". */
5977 if (delta >= -260000 && delta < 260000)
5982 strcpy (string, "ba%*,pt\t%%xcc, ");
5984 strcpy (string, "b%*\t");
5986 p = strchr (string, '\0');
5997 /* Return the string to output a conditional branch to LABEL, which is
5998 the operand number of the label. OP is the conditional expression.
5999 XEXP (OP, 0) is assumed to be a condition code register (integer or
6000 floating point) and its mode specifies what kind of comparison we made.
6002 DEST is the destination insn (i.e. the label), INSN is the source.
6004 REVERSED is nonzero if we should reverse the sense of the comparison.
6006 ANNUL is nonzero if we should generate an annulling branch. */
6009 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
6012 static char string[64];
6013 enum rtx_code code = GET_CODE (op);
6014 rtx cc_reg = XEXP (op, 0);
6015 enum machine_mode mode = GET_MODE (cc_reg);
6016 const char *labelno, *branch;
6017 int spaces = 8, far;
6020 /* v9 branches are limited to +-1MB. If it is too far away,
6033 fbne,a,pn %fcc2, .LC29
6041 far = TARGET_V9 && (get_attr_length (insn) >= 3);
6044 /* Reversal of FP compares takes care -- an ordered compare
6045 becomes an unordered compare and vice versa. */
6046 if (mode == CCFPmode || mode == CCFPEmode)
6047 code = reverse_condition_maybe_unordered (code);
6049 code = reverse_condition (code);
6052 /* Start by writing the branch condition. */
6053 if (mode == CCFPmode || mode == CCFPEmode)
6104 /* ??? !v9: FP branches cannot be preceded by another floating point
6105 insn. Because there is currently no concept of pre-delay slots,
6106 we can fix this only by always emitting a nop before a floating
6111 strcpy (string, "nop\n\t");
6112 strcat (string, branch);
6125 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6137 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6158 strcpy (string, branch);
6160 spaces -= strlen (branch);
6161 p = strchr (string, '\0');
6163 /* Now add the annulling, the label, and a possible noop. */
6176 if (! far && insn && INSN_ADDRESSES_SET_P ())
6178 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6179 - INSN_ADDRESSES (INSN_UID (insn)));
6180 /* Leave some instructions for "slop". */
6181 if (delta < -260000 || delta >= 260000)
6185 if (mode == CCFPmode || mode == CCFPEmode)
6187 static char v9_fcc_labelno[] = "%%fccX, ";
6188 /* Set the char indicating the number of the fcc reg to use. */
6189 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6190 labelno = v9_fcc_labelno;
6193 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6197 else if (mode == CCXmode || mode == CCX_NOOVmode)
6199 labelno = "%%xcc, ";
6204 labelno = "%%icc, ";
6209 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6212 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6225 strcpy (p, labelno);
6226 p = strchr (p, '\0');
6229 strcpy (p, ".+12\n\t nop\n\tb\t");
6230 /* Skip the next insn if requested or
6231 if we know that it will be a nop. */
6232 if (annul || ! final_sequence)
6246 /* Emit a library call comparison between floating point X and Y.
6247 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
6248 Return the new operator to be used in the comparison sequence.
6250 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6251 values as arguments instead of the TFmode registers themselves,
6252 that's why we cannot call emit_float_lib_cmp. */
6255 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6258 rtx slot0, slot1, result, tem, tem2;
6259 enum machine_mode mode;
6260 enum rtx_code new_comparison;
6265 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
6269 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
6273 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
6277 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
6281 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
6285 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
6296 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
6309 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6310 emit_move_insn (slot0, x);
6317 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6318 emit_move_insn (slot1, y);
6321 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6323 XEXP (slot0, 0), Pmode,
6324 XEXP (slot1, 0), Pmode);
6329 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6331 x, TFmode, y, TFmode);
6336 /* Immediately move the result of the libcall into a pseudo
6337 register so reload doesn't clobber the value if it needs
6338 the return register for a spill reg. */
6339 result = gen_reg_rtx (mode);
6340 emit_move_insn (result, hard_libcall_value (mode));
6345 return gen_rtx_NE (VOIDmode, result, const0_rtx);
6348 new_comparison = (comparison == UNORDERED ? EQ : NE);
6349 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
6352 new_comparison = (comparison == UNGT ? GT : NE);
6353 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
6355 return gen_rtx_NE (VOIDmode, result, const2_rtx);
6357 tem = gen_reg_rtx (mode);
6359 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6361 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6362 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
6365 tem = gen_reg_rtx (mode);
6367 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6369 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6370 tem2 = gen_reg_rtx (mode);
6372 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6374 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6375 new_comparison = (comparison == UNEQ ? EQ : NE);
6376 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
6382 /* Generate an unsigned DImode to FP conversion. This is the same code
6383 optabs would emit if we didn't have TFmode patterns. */
6386 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6388 rtx neglab, donelab, i0, i1, f0, in, out;
6391 in = force_reg (DImode, operands[1]);
6392 neglab = gen_label_rtx ();
6393 donelab = gen_label_rtx ();
6394 i0 = gen_reg_rtx (DImode);
6395 i1 = gen_reg_rtx (DImode);
6396 f0 = gen_reg_rtx (mode);
6398 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6400 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6401 emit_jump_insn (gen_jump (donelab));
6404 emit_label (neglab);
6406 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6407 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6408 emit_insn (gen_iordi3 (i0, i0, i1));
6409 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6410 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6412 emit_label (donelab);
6415 /* Generate an FP to unsigned DImode conversion. This is the same code
6416 optabs would emit if we didn't have TFmode patterns. */
6419 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6421 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6424 in = force_reg (mode, operands[1]);
6425 neglab = gen_label_rtx ();
6426 donelab = gen_label_rtx ();
6427 i0 = gen_reg_rtx (DImode);
6428 i1 = gen_reg_rtx (DImode);
6429 limit = gen_reg_rtx (mode);
6430 f0 = gen_reg_rtx (mode);
6432 emit_move_insn (limit,
6433 CONST_DOUBLE_FROM_REAL_VALUE (
6434 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6435 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6437 emit_insn (gen_rtx_SET (VOIDmode,
6439 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6440 emit_jump_insn (gen_jump (donelab));
6443 emit_label (neglab);
6445 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6446 emit_insn (gen_rtx_SET (VOIDmode,
6448 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6449 emit_insn (gen_movdi (i1, const1_rtx));
6450 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6451 emit_insn (gen_xordi3 (out, i0, i1));
6453 emit_label (donelab);
6456 /* Return the string to output a conditional branch to LABEL, testing
6457 register REG. LABEL is the operand number of the label; REG is the
6458 operand number of the reg. OP is the conditional expression. The mode
6459 of REG says what kind of comparison we made.
6461 DEST is the destination insn (i.e. the label), INSN is the source.
6463 REVERSED is nonzero if we should reverse the sense of the comparison.
6465 ANNUL is nonzero if we should generate an annulling branch. */
6468 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6469 int annul, rtx insn)
6471 static char string[64];
6472 enum rtx_code code = GET_CODE (op);
6473 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6478 /* branch on register are limited to +-128KB. If it is too far away,
6491 brgez,a,pn %o1, .LC29
6497 ba,pt %xcc, .LC29 */
6499 far = get_attr_length (insn) >= 3;
6501 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6503 code = reverse_condition (code);
6505 /* Only 64 bit versions of these instructions exist. */
6506 gcc_assert (mode == DImode);
6508 /* Start by writing the branch condition. */
6513 strcpy (string, "brnz");
6517 strcpy (string, "brz");
6521 strcpy (string, "brgez");
6525 strcpy (string, "brlz");
6529 strcpy (string, "brlez");
6533 strcpy (string, "brgz");
6540 p = strchr (string, '\0');
6542 /* Now add the annulling, reg, label, and nop. */
6549 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6552 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6557 *p = p < string + 8 ? '\t' : ' ';
6565 int veryfar = 1, delta;
6567 if (INSN_ADDRESSES_SET_P ())
6569 delta = (INSN_ADDRESSES (INSN_UID (dest))
6570 - INSN_ADDRESSES (INSN_UID (insn)));
6571 /* Leave some instructions for "slop". */
6572 if (delta >= -260000 && delta < 260000)
6576 strcpy (p, ".+12\n\t nop\n\t");
6577 /* Skip the next insn if requested or
6578 if we know that it will be a nop. */
6579 if (annul || ! final_sequence)
6589 strcpy (p, "ba,pt\t%%xcc, ");
6603 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6604 Such instructions cannot be used in the delay slot of return insn on v9.
6605 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6609 epilogue_renumber (register rtx *where, int test)
6611 register const char *fmt;
6613 register enum rtx_code code;
6618 code = GET_CODE (*where);
6623 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6625 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6626 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6634 /* Do not replace the frame pointer with the stack pointer because
6635 it can cause the delayed instruction to load below the stack.
6636 This occurs when instructions like:
6638 (set (reg/i:SI 24 %i0)
6639 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6640 (const_int -20 [0xffffffec])) 0))
6642 are in the return delayed slot. */
6644 if (GET_CODE (XEXP (*where, 0)) == REG
6645 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6646 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6647 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6652 if (SPARC_STACK_BIAS
6653 && GET_CODE (XEXP (*where, 0)) == REG
6654 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6662 fmt = GET_RTX_FORMAT (code);
6664 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6669 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6670 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6673 else if (fmt[i] == 'e'
6674 && epilogue_renumber (&(XEXP (*where, i)), test))
6680 /* Leaf functions and non-leaf functions have different needs. */
6683 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6686 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6688 static const int *const reg_alloc_orders[] = {
6689 reg_leaf_alloc_order,
6690 reg_nonleaf_alloc_order};
6693 order_regs_for_local_alloc (void)
6695 static int last_order_nonleaf = 1;
6697 if (df_regs_ever_live_p (15) != last_order_nonleaf)
6699 last_order_nonleaf = !last_order_nonleaf;
6700 memcpy ((char *) reg_alloc_order,
6701 (const char *) reg_alloc_orders[last_order_nonleaf],
6702 FIRST_PSEUDO_REGISTER * sizeof (int));
6706 /* Return 1 if REG and MEM are legitimate enough to allow the various
6707 mem<-->reg splits to be run. */
6710 sparc_splitdi_legitimate (rtx reg, rtx mem)
6712 /* Punt if we are here by mistake. */
6713 gcc_assert (reload_completed);
6715 /* We must have an offsettable memory reference. */
6716 if (! offsettable_memref_p (mem))
6719 /* If we have legitimate args for ldd/std, we do not want
6720 the split to happen. */
6721 if ((REGNO (reg) % 2) == 0
6722 && mem_min_alignment (mem, 8))
6729 /* Return 1 if x and y are some kind of REG and they refer to
6730 different hard registers. This test is guaranteed to be
6731 run after reload. */
6734 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6736 if (GET_CODE (x) != REG)
6738 if (GET_CODE (y) != REG)
6740 if (REGNO (x) == REGNO (y))
6745 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6746 This makes them candidates for using ldd and std insns.
6748 Note reg1 and reg2 *must* be hard registers. */
6751 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6753 /* We might have been passed a SUBREG. */
6754 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6757 if (REGNO (reg1) % 2 != 0)
6760 /* Integer ldd is deprecated in SPARC V9 */
6761 if (TARGET_V9 && REGNO (reg1) < 32)
6764 return (REGNO (reg1) == REGNO (reg2) - 1);
6767 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6770 This can only happen when addr1 and addr2, the addresses in mem1
6771 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6772 addr1 must also be aligned on a 64-bit boundary.
6774 Also iff dependent_reg_rtx is not null it should not be used to
6775 compute the address for mem1, i.e. we cannot optimize a sequence
6787 But, note that the transformation from:
6792 is perfectly fine. Thus, the peephole2 patterns always pass us
6793 the destination register of the first load, never the second one.
6795 For stores we don't have a similar problem, so dependent_reg_rtx is
6799 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6803 HOST_WIDE_INT offset1;
6805 /* The mems cannot be volatile. */
6806 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6809 /* MEM1 should be aligned on a 64-bit boundary. */
6810 if (MEM_ALIGN (mem1) < 64)
6813 addr1 = XEXP (mem1, 0);
6814 addr2 = XEXP (mem2, 0);
6816 /* Extract a register number and offset (if used) from the first addr. */
6817 if (GET_CODE (addr1) == PLUS)
6819 /* If not a REG, return zero. */
6820 if (GET_CODE (XEXP (addr1, 0)) != REG)
6824 reg1 = REGNO (XEXP (addr1, 0));
6825 /* The offset must be constant! */
6826 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6828 offset1 = INTVAL (XEXP (addr1, 1));
6831 else if (GET_CODE (addr1) != REG)
6835 reg1 = REGNO (addr1);
6836 /* This was a simple (mem (reg)) expression. Offset is 0. */
6840 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6841 if (GET_CODE (addr2) != PLUS)
6844 if (GET_CODE (XEXP (addr2, 0)) != REG
6845 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6848 if (reg1 != REGNO (XEXP (addr2, 0)))
6851 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6854 /* The first offset must be evenly divisible by 8 to ensure the
6855 address is 64 bit aligned. */
6856 if (offset1 % 8 != 0)
6859 /* The offset for the second addr must be 4 more than the first addr. */
6860 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6863 /* All the tests passed. addr1 and addr2 are valid for ldd and std
6868 /* Return 1 if reg is a pseudo, or is the first register in
6869 a hard register pair. This makes it suitable for use in
6870 ldd and std insns. */
6873 register_ok_for_ldd (rtx reg)
6875 /* We might have been passed a SUBREG. */
6879 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6880 return (REGNO (reg) % 2 == 0);
6885 /* Return 1 if OP is a memory whose address is known to be
6886 aligned to 8-byte boundary, or a pseudo during reload.
6887 This makes it suitable for use in ldd and std insns. */
6890 memory_ok_for_ldd (rtx op)
6894 /* In 64-bit mode, we assume that the address is word-aligned. */
6895 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
6898 if ((reload_in_progress || reload_completed)
6899 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
6902 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
6904 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
6913 /* Print operand X (an rtx) in assembler syntax to file FILE.
6914 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6915 For `%' followed by punctuation, CODE is the punctuation and X is null. */
6918 print_operand (FILE *file, rtx x, int code)
6923 /* Output an insn in a delay slot. */
6925 sparc_indent_opcode = 1;
6927 fputs ("\n\t nop", file);
6930 /* Output an annul flag if there's nothing for the delay slot and we
6931 are optimizing. This is always used with '(' below.
6932 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6933 this is a dbx bug. So, we only do this when optimizing.
6934 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6935 Always emit a nop in case the next instruction is a branch. */
6936 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6940 /* Output a 'nop' if there's nothing for the delay slot and we are
6941 not optimizing. This is always used with '*' above. */
6942 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6943 fputs ("\n\t nop", file);
6944 else if (final_sequence)
6945 sparc_indent_opcode = 1;
6948 /* Output the right displacement from the saved PC on function return.
6949 The caller may have placed an "unimp" insn immediately after the call
6950 so we have to account for it. This insn is used in the 32-bit ABI
6951 when calling a function that returns a non zero-sized structure. The
6952 64-bit ABI doesn't have it. Be careful to have this test be the same
6953 as that used on the call. The exception here is that when
6954 sparc_std_struct_return is enabled, the psABI is followed exactly
6955 and the adjustment is made by the code in sparc_struct_value_rtx.
6956 The call emitted is the same when sparc_std_struct_return is
6959 && cfun->returns_struct
6960 && ! sparc_std_struct_return
6961 && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6963 && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6969 /* Output the Embedded Medium/Anywhere code model base register. */
6970 fputs (EMBMEDANY_BASE_REG, file);
6973 /* Print some local dynamic TLS name. */
6974 assemble_name (file, get_some_local_dynamic_name ());
6978 /* Adjust the operand to take into account a RESTORE operation. */
6979 if (GET_CODE (x) == CONST_INT)
6981 else if (GET_CODE (x) != REG)
6982 output_operand_lossage ("invalid %%Y operand");
6983 else if (REGNO (x) < 8)
6984 fputs (reg_names[REGNO (x)], file);
6985 else if (REGNO (x) >= 24 && REGNO (x) < 32)
6986 fputs (reg_names[REGNO (x)-16], file);
6988 output_operand_lossage ("invalid %%Y operand");
6991 /* Print out the low order register name of a register pair. */
6992 if (WORDS_BIG_ENDIAN)
6993 fputs (reg_names[REGNO (x)+1], file);
6995 fputs (reg_names[REGNO (x)], file);
6998 /* Print out the high order register name of a register pair. */
6999 if (WORDS_BIG_ENDIAN)
7000 fputs (reg_names[REGNO (x)], file);
7002 fputs (reg_names[REGNO (x)+1], file);
7005 /* Print out the second register name of a register pair or quad.
7006 I.e., R (%o0) => %o1. */
7007 fputs (reg_names[REGNO (x)+1], file);
7010 /* Print out the third register name of a register quad.
7011 I.e., S (%o0) => %o2. */
7012 fputs (reg_names[REGNO (x)+2], file);
7015 /* Print out the fourth register name of a register quad.
7016 I.e., T (%o0) => %o3. */
7017 fputs (reg_names[REGNO (x)+3], file);
7020 /* Print a condition code register. */
7021 if (REGNO (x) == SPARC_ICC_REG)
7023 /* We don't handle CC[X]_NOOVmode because they're not supposed
7025 if (GET_MODE (x) == CCmode)
7026 fputs ("%icc", file);
7027 else if (GET_MODE (x) == CCXmode)
7028 fputs ("%xcc", file);
7033 /* %fccN register */
7034 fputs (reg_names[REGNO (x)], file);
7037 /* Print the operand's address only. */
7038 output_address (XEXP (x, 0));
7041 /* In this case we need a register. Use %g0 if the
7042 operand is const0_rtx. */
7044 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
7046 fputs ("%g0", file);
7053 switch (GET_CODE (x))
7055 case IOR: fputs ("or", file); break;
7056 case AND: fputs ("and", file); break;
7057 case XOR: fputs ("xor", file); break;
7058 default: output_operand_lossage ("invalid %%A operand");
7063 switch (GET_CODE (x))
7065 case IOR: fputs ("orn", file); break;
7066 case AND: fputs ("andn", file); break;
7067 case XOR: fputs ("xnor", file); break;
7068 default: output_operand_lossage ("invalid %%B operand");
7072 /* These are used by the conditional move instructions. */
7076 enum rtx_code rc = GET_CODE (x);
7080 enum machine_mode mode = GET_MODE (XEXP (x, 0));
7081 if (mode == CCFPmode || mode == CCFPEmode)
7082 rc = reverse_condition_maybe_unordered (GET_CODE (x));
7084 rc = reverse_condition (GET_CODE (x));
7088 case NE: fputs ("ne", file); break;
7089 case EQ: fputs ("e", file); break;
7090 case GE: fputs ("ge", file); break;
7091 case GT: fputs ("g", file); break;
7092 case LE: fputs ("le", file); break;
7093 case LT: fputs ("l", file); break;
7094 case GEU: fputs ("geu", file); break;
7095 case GTU: fputs ("gu", file); break;
7096 case LEU: fputs ("leu", file); break;
7097 case LTU: fputs ("lu", file); break;
7098 case LTGT: fputs ("lg", file); break;
7099 case UNORDERED: fputs ("u", file); break;
7100 case ORDERED: fputs ("o", file); break;
7101 case UNLT: fputs ("ul", file); break;
7102 case UNLE: fputs ("ule", file); break;
7103 case UNGT: fputs ("ug", file); break;
7104 case UNGE: fputs ("uge", file); break;
7105 case UNEQ: fputs ("ue", file); break;
7106 default: output_operand_lossage (code == 'c'
7107 ? "invalid %%c operand"
7108 : "invalid %%C operand");
7113 /* These are used by the movr instruction pattern. */
7117 enum rtx_code rc = (code == 'd'
7118 ? reverse_condition (GET_CODE (x))
7122 case NE: fputs ("ne", file); break;
7123 case EQ: fputs ("e", file); break;
7124 case GE: fputs ("gez", file); break;
7125 case LT: fputs ("lz", file); break;
7126 case LE: fputs ("lez", file); break;
7127 case GT: fputs ("gz", file); break;
7128 default: output_operand_lossage (code == 'd'
7129 ? "invalid %%d operand"
7130 : "invalid %%D operand");
7137 /* Print a sign-extended character. */
7138 int i = trunc_int_for_mode (INTVAL (x), QImode);
7139 fprintf (file, "%d", i);
7144 /* Operand must be a MEM; write its address. */
7145 if (GET_CODE (x) != MEM)
7146 output_operand_lossage ("invalid %%f operand");
7147 output_address (XEXP (x, 0));
7152 /* Print a sign-extended 32-bit value. */
7154 if (GET_CODE(x) == CONST_INT)
7156 else if (GET_CODE(x) == CONST_DOUBLE)
7157 i = CONST_DOUBLE_LOW (x);
7160 output_operand_lossage ("invalid %%s operand");
7163 i = trunc_int_for_mode (i, SImode);
7164 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7169 /* Do nothing special. */
7173 /* Undocumented flag. */
7174 output_operand_lossage ("invalid operand output code");
7177 if (GET_CODE (x) == REG)
7178 fputs (reg_names[REGNO (x)], file);
7179 else if (GET_CODE (x) == MEM)
7182 /* Poor Sun assembler doesn't understand absolute addressing. */
7183 if (CONSTANT_P (XEXP (x, 0)))
7184 fputs ("%g0+", file);
7185 output_address (XEXP (x, 0));
7188 else if (GET_CODE (x) == HIGH)
7190 fputs ("%hi(", file);
7191 output_addr_const (file, XEXP (x, 0));
7194 else if (GET_CODE (x) == LO_SUM)
7196 print_operand (file, XEXP (x, 0), 0);
7197 if (TARGET_CM_MEDMID)
7198 fputs ("+%l44(", file);
7200 fputs ("+%lo(", file);
7201 output_addr_const (file, XEXP (x, 1));
7204 else if (GET_CODE (x) == CONST_DOUBLE
7205 && (GET_MODE (x) == VOIDmode
7206 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7208 if (CONST_DOUBLE_HIGH (x) == 0)
7209 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7210 else if (CONST_DOUBLE_HIGH (x) == -1
7211 && CONST_DOUBLE_LOW (x) < 0)
7212 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7214 output_operand_lossage ("long long constant not a valid immediate operand");
7216 else if (GET_CODE (x) == CONST_DOUBLE)
7217 output_operand_lossage ("floating point constant not a valid immediate operand");
7218 else { output_addr_const (file, x); }
7221 /* Target hook for assembling integer objects. The sparc version has
7222 special handling for aligned DI-mode objects. */
7225 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7227 /* ??? We only output .xword's for symbols and only then in environments
7228 where the assembler can handle them. */
7229 if (aligned_p && size == 8
7230 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7234 assemble_integer_with_op ("\t.xword\t", x);
7239 assemble_aligned_integer (4, const0_rtx);
7240 assemble_aligned_integer (4, x);
7244 return default_assemble_integer (x, size, aligned_p);
7247 /* Return the value of a code used in the .proc pseudo-op that says
7248 what kind of result this function returns. For non-C types, we pick
7249 the closest C type. */
7251 #ifndef SHORT_TYPE_SIZE
7252 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7255 #ifndef INT_TYPE_SIZE
7256 #define INT_TYPE_SIZE BITS_PER_WORD
7259 #ifndef LONG_TYPE_SIZE
7260 #define LONG_TYPE_SIZE BITS_PER_WORD
7263 #ifndef LONG_LONG_TYPE_SIZE
7264 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7267 #ifndef FLOAT_TYPE_SIZE
7268 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7271 #ifndef DOUBLE_TYPE_SIZE
7272 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7275 #ifndef LONG_DOUBLE_TYPE_SIZE
7276 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7280 sparc_type_code (register tree type)
7282 register unsigned long qualifiers = 0;
7283 register unsigned shift;
7285 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7286 setting more, since some assemblers will give an error for this. Also,
7287 we must be careful to avoid shifts of 32 bits or more to avoid getting
7288 unpredictable results. */
7290 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7292 switch (TREE_CODE (type))
7298 qualifiers |= (3 << shift);
7303 qualifiers |= (2 << shift);
7307 case REFERENCE_TYPE:
7309 qualifiers |= (1 << shift);
7313 return (qualifiers | 8);
7316 case QUAL_UNION_TYPE:
7317 return (qualifiers | 9);
7320 return (qualifiers | 10);
7323 return (qualifiers | 16);
7326 /* If this is a range type, consider it to be the underlying
7328 if (TREE_TYPE (type) != 0)
7331 /* Carefully distinguish all the standard types of C,
7332 without messing up if the language is not C. We do this by
7333 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7334 look at both the names and the above fields, but that's redundant.
7335 Any type whose size is between two C types will be considered
7336 to be the wider of the two types. Also, we do not have a
7337 special code to use for "long long", so anything wider than
7338 long is treated the same. Note that we can't distinguish
7339 between "int" and "long" in this code if they are the same
7340 size, but that's fine, since neither can the assembler. */
7342 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7343 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7345 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7346 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7348 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7349 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7352 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7355 /* If this is a range type, consider it to be the underlying
7357 if (TREE_TYPE (type) != 0)
7360 /* Carefully distinguish all the standard types of C,
7361 without messing up if the language is not C. */
7363 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7364 return (qualifiers | 6);
7367 return (qualifiers | 7);
7369 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7370 /* ??? We need to distinguish between double and float complex types,
7371 but I don't know how yet because I can't reach this code from
7372 existing front-ends. */
7373 return (qualifiers | 7); /* Who knows? */
7376 case BOOLEAN_TYPE: /* Boolean truth value type. */
7377 case LANG_TYPE: /* ? */
7381 gcc_unreachable (); /* Not a type! */
7388 /* Nested function support. */
7390 /* Emit RTL insns to initialize the variable parts of a trampoline.
7391 FNADDR is an RTX for the address of the function's pure code.
7392 CXT is an RTX for the static chain value for the function.
7394 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7395 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7396 (to store insns). This is a bit excessive. Perhaps a different
7397 mechanism would be better here.
7399 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7402 sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7404 /* SPARC 32-bit trampoline:
7407 sethi %hi(static), %g2
7409 or %g2, %lo(static), %g2
7411 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7412 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7416 (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
7417 expand_binop (SImode, ior_optab,
7418 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7419 size_int (10), 0, 1),
7420 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7421 NULL_RTX, 1, OPTAB_DIRECT));
7424 (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7425 expand_binop (SImode, ior_optab,
7426 expand_shift (RSHIFT_EXPR, SImode, cxt,
7427 size_int (10), 0, 1),
7428 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7429 NULL_RTX, 1, OPTAB_DIRECT));
7432 (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7433 expand_binop (SImode, ior_optab,
7434 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7435 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7436 NULL_RTX, 1, OPTAB_DIRECT));
7439 (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7440 expand_binop (SImode, ior_optab,
7441 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7442 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7443 NULL_RTX, 1, OPTAB_DIRECT));
7445 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7446 aligned on a 16 byte boundary so one flush clears it all. */
7447 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
7448 if (sparc_cpu != PROCESSOR_ULTRASPARC
7449 && sparc_cpu != PROCESSOR_ULTRASPARC3
7450 && sparc_cpu != PROCESSOR_NIAGARA
7451 && sparc_cpu != PROCESSOR_NIAGARA2)
7452 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
7453 plus_constant (tramp, 8)))));
7455 /* Call __enable_execute_stack after writing onto the stack to make sure
7456 the stack address is accessible. */
7457 #ifdef ENABLE_EXECUTE_STACK
7458 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7459 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7464 /* The 64-bit version is simpler because it makes more sense to load the
7465 values as "immediate" data out of the trampoline. It's also easier since
7466 we can read the PC without clobbering a register. */
7469 sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7471 /* SPARC 64-bit trampoline:
7480 emit_move_insn (gen_rtx_MEM (SImode, tramp),
7481 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7482 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7483 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7484 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7485 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7486 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7487 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7488 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
7489 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
7490 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
7492 if (sparc_cpu != PROCESSOR_ULTRASPARC
7493 && sparc_cpu != PROCESSOR_ULTRASPARC3
7494 && sparc_cpu != PROCESSOR_NIAGARA
7495 && sparc_cpu != PROCESSOR_NIAGARA2)
7496 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
7498 /* Call __enable_execute_stack after writing onto the stack to make sure
7499 the stack address is accessible. */
7500 #ifdef ENABLE_EXECUTE_STACK
7501 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7502 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7506 /* Adjust the cost of a scheduling dependency. Return the new cost of
7507 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7510 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7512 enum attr_type insn_type;
7514 if (! recog_memoized (insn))
7517 insn_type = get_attr_type (insn);
7519 if (REG_NOTE_KIND (link) == 0)
7521 /* Data dependency; DEP_INSN writes a register that INSN reads some
7524 /* if a load, then the dependence must be on the memory address;
7525 add an extra "cycle". Note that the cost could be two cycles
7526 if the reg was written late in an instruction group; we ca not tell
7528 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7531 /* Get the delay only if the address of the store is the dependence. */
7532 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7534 rtx pat = PATTERN(insn);
7535 rtx dep_pat = PATTERN (dep_insn);
7537 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7538 return cost; /* This should not happen! */
7540 /* The dependency between the two instructions was on the data that
7541 is being stored. Assume that this implies that the address of the
7542 store is not dependent. */
7543 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7546 return cost + 3; /* An approximation. */
7549 /* A shift instruction cannot receive its data from an instruction
7550 in the same cycle; add a one cycle penalty. */
7551 if (insn_type == TYPE_SHIFT)
7552 return cost + 3; /* Split before cascade into shift. */
7556 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7557 INSN writes some cycles later. */
7559 /* These are only significant for the fpu unit; writing a fp reg before
7560 the fpu has finished with it stalls the processor. */
7562 /* Reusing an integer register causes no problems. */
7563 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7571 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7573 enum attr_type insn_type, dep_type;
7574 rtx pat = PATTERN(insn);
7575 rtx dep_pat = PATTERN (dep_insn);
7577 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7580 insn_type = get_attr_type (insn);
7581 dep_type = get_attr_type (dep_insn);
7583 switch (REG_NOTE_KIND (link))
7586 /* Data dependency; DEP_INSN writes a register that INSN reads some
7593 /* Get the delay iff the address of the store is the dependence. */
7594 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7597 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7604 /* If a load, then the dependence must be on the memory address. If
7605 the addresses aren't equal, then it might be a false dependency */
7606 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7608 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7609 || GET_CODE (SET_DEST (dep_pat)) != MEM
7610 || GET_CODE (SET_SRC (pat)) != MEM
7611 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7612 XEXP (SET_SRC (pat), 0)))
7620 /* Compare to branch latency is 0. There is no benefit from
7621 separating compare and branch. */
7622 if (dep_type == TYPE_COMPARE)
7624 /* Floating point compare to branch latency is less than
7625 compare to conditional move. */
7626 if (dep_type == TYPE_FPCMP)
7635 /* Anti-dependencies only penalize the fpu unit. */
7636 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7648 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7652 case PROCESSOR_SUPERSPARC:
7653 cost = supersparc_adjust_cost (insn, link, dep, cost);
7655 case PROCESSOR_HYPERSPARC:
7656 case PROCESSOR_SPARCLITE86X:
7657 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7666 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7667 int sched_verbose ATTRIBUTE_UNUSED,
7668 int max_ready ATTRIBUTE_UNUSED)
7673 sparc_use_sched_lookahead (void)
7675 if (sparc_cpu == PROCESSOR_NIAGARA
7676 || sparc_cpu == PROCESSOR_NIAGARA2)
7678 if (sparc_cpu == PROCESSOR_ULTRASPARC
7679 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7681 if ((1 << sparc_cpu) &
7682 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7683 (1 << PROCESSOR_SPARCLITE86X)))
7689 sparc_issue_rate (void)
7693 case PROCESSOR_NIAGARA:
7694 case PROCESSOR_NIAGARA2:
7698 /* Assume V9 processors are capable of at least dual-issue. */
7700 case PROCESSOR_SUPERSPARC:
7702 case PROCESSOR_HYPERSPARC:
7703 case PROCESSOR_SPARCLITE86X:
7705 case PROCESSOR_ULTRASPARC:
7706 case PROCESSOR_ULTRASPARC3:
7712 set_extends (rtx insn)
7714 register rtx pat = PATTERN (insn);
7716 switch (GET_CODE (SET_SRC (pat)))
7718 /* Load and some shift instructions zero extend. */
7721 /* sethi clears the high bits */
7723 /* LO_SUM is used with sethi. sethi cleared the high
7724 bits and the values used with lo_sum are positive */
7726 /* Store flag stores 0 or 1 */
7736 rtx op0 = XEXP (SET_SRC (pat), 0);
7737 rtx op1 = XEXP (SET_SRC (pat), 1);
7738 if (GET_CODE (op1) == CONST_INT)
7739 return INTVAL (op1) >= 0;
7740 if (GET_CODE (op0) != REG)
7742 if (sparc_check_64 (op0, insn) == 1)
7744 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7749 rtx op0 = XEXP (SET_SRC (pat), 0);
7750 rtx op1 = XEXP (SET_SRC (pat), 1);
7751 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7753 if (GET_CODE (op1) == CONST_INT)
7754 return INTVAL (op1) >= 0;
7755 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7758 return GET_MODE (SET_SRC (pat)) == SImode;
7759 /* Positive integers leave the high bits zero. */
7761 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7763 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7766 return - (GET_MODE (SET_SRC (pat)) == SImode);
7768 return sparc_check_64 (SET_SRC (pat), insn);
7774 /* We _ought_ to have only one kind per function, but... */
7775 static GTY(()) rtx sparc_addr_diff_list;
7776 static GTY(()) rtx sparc_addr_list;
7779 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7781 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7783 sparc_addr_diff_list
7784 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7786 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7790 sparc_output_addr_vec (rtx vec)
7792 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7793 int idx, vlen = XVECLEN (body, 0);
7795 #ifdef ASM_OUTPUT_ADDR_VEC_START
7796 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7799 #ifdef ASM_OUTPUT_CASE_LABEL
7800 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7803 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7806 for (idx = 0; idx < vlen; idx++)
7808 ASM_OUTPUT_ADDR_VEC_ELT
7809 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7812 #ifdef ASM_OUTPUT_ADDR_VEC_END
7813 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7818 sparc_output_addr_diff_vec (rtx vec)
7820 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7821 rtx base = XEXP (XEXP (body, 0), 0);
7822 int idx, vlen = XVECLEN (body, 1);
7824 #ifdef ASM_OUTPUT_ADDR_VEC_START
7825 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7828 #ifdef ASM_OUTPUT_CASE_LABEL
7829 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7832 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7835 for (idx = 0; idx < vlen; idx++)
7837 ASM_OUTPUT_ADDR_DIFF_ELT
7840 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7841 CODE_LABEL_NUMBER (base));
7844 #ifdef ASM_OUTPUT_ADDR_VEC_END
7845 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7850 sparc_output_deferred_case_vectors (void)
7855 if (sparc_addr_list == NULL_RTX
7856 && sparc_addr_diff_list == NULL_RTX)
7859 /* Align to cache line in the function's code section. */
7860 switch_to_section (current_function_section ());
7862 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7864 ASM_OUTPUT_ALIGN (asm_out_file, align);
7866 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7867 sparc_output_addr_vec (XEXP (t, 0));
7868 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7869 sparc_output_addr_diff_vec (XEXP (t, 0));
7871 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7874 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7875 unknown. Return 1 if the high bits are zero, -1 if the register is
7878 sparc_check_64 (rtx x, rtx insn)
7880 /* If a register is set only once it is safe to ignore insns this
7881 code does not know how to handle. The loop will either recognize
7882 the single set and return the correct value or fail to recognize
7887 gcc_assert (GET_CODE (x) == REG);
7889 if (GET_MODE (x) == DImode)
7890 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7892 if (flag_expensive_optimizations
7893 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
7899 insn = get_last_insn_anywhere ();
7904 while ((insn = PREV_INSN (insn)))
7906 switch (GET_CODE (insn))
7919 rtx pat = PATTERN (insn);
7920 if (GET_CODE (pat) != SET)
7922 if (rtx_equal_p (x, SET_DEST (pat)))
7923 return set_extends (insn);
7924 if (y && rtx_equal_p (y, SET_DEST (pat)))
7925 return set_extends (insn);
7926 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7934 /* Returns assembly code to perform a DImode shift using
7935 a 64-bit global or out register on SPARC-V8+. */
7937 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7939 static char asm_code[60];
7941 /* The scratch register is only required when the destination
7942 register is not a 64-bit global or out register. */
7943 if (which_alternative != 2)
7944 operands[3] = operands[0];
7946 /* We can only shift by constants <= 63. */
7947 if (GET_CODE (operands[2]) == CONST_INT)
7948 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7950 if (GET_CODE (operands[1]) == CONST_INT)
7952 output_asm_insn ("mov\t%1, %3", operands);
7956 output_asm_insn ("sllx\t%H1, 32, %3", operands);
7957 if (sparc_check_64 (operands[1], insn) <= 0)
7958 output_asm_insn ("srl\t%L1, 0, %L1", operands);
7959 output_asm_insn ("or\t%L1, %3, %3", operands);
7962 strcpy(asm_code, opcode);
7964 if (which_alternative != 2)
7965 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7967 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7970 /* Output rtl to increment the profiler label LABELNO
7971 for profiling a function entry. */
7974 sparc_profile_hook (int labelno)
7979 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7980 if (NO_PROFILE_COUNTERS)
7982 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
7986 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7987 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7988 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7992 #ifdef OBJECT_FORMAT_ELF
7994 sparc_elf_asm_named_section (const char *name, unsigned int flags,
7997 if (flags & SECTION_MERGE)
7999 /* entsize cannot be expressed in this section attributes
8001 default_elf_asm_named_section (name, flags, decl);
8005 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
8007 if (!(flags & SECTION_DEBUG))
8008 fputs (",#alloc", asm_out_file);
8009 if (flags & SECTION_WRITE)
8010 fputs (",#write", asm_out_file);
8011 if (flags & SECTION_TLS)
8012 fputs (",#tls", asm_out_file);
8013 if (flags & SECTION_CODE)
8014 fputs (",#execinstr", asm_out_file);
8016 /* ??? Handle SECTION_BSS. */
8018 fputc ('\n', asm_out_file);
8020 #endif /* OBJECT_FORMAT_ELF */
8022 /* We do not allow indirect calls to be optimized into sibling calls.
8024 We cannot use sibling calls when delayed branches are disabled
8025 because they will likely require the call delay slot to be filled.
8027 Also, on SPARC 32-bit we cannot emit a sibling call when the
8028 current function returns a structure. This is because the "unimp
8029 after call" convention would cause the callee to return to the
8030 wrong place. The generic code already disallows cases where the
8031 function being called returns a structure.
8033 It may seem strange how this last case could occur. Usually there
8034 is code after the call which jumps to epilogue code which dumps the
8035 return value into the struct return area. That ought to invalidate
8036 the sibling call right? Well, in the C++ case we can end up passing
8037 the pointer to the struct return area to a constructor (which returns
8038 void) and then nothing else happens. Such a sibling call would look
8039 valid without the added check here.
8041 VxWorks PIC PLT entries require the global pointer to be initialized
8042 on entry. We therefore can't emit sibling calls to them. */
8044 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8047 && flag_delayed_branch
8048 && (TARGET_ARCH64 || ! cfun->returns_struct)
8049 && !(TARGET_VXWORKS_RTP
8051 && !targetm.binds_local_p (decl)));
8054 /* libfunc renaming. */
8055 #include "config/gofast.h"
8058 sparc_init_libfuncs (void)
8062 /* Use the subroutines that Sun's library provides for integer
8063 multiply and divide. The `*' prevents an underscore from
8064 being prepended by the compiler. .umul is a little faster
8066 set_optab_libfunc (smul_optab, SImode, "*.umul");
8067 set_optab_libfunc (sdiv_optab, SImode, "*.div");
8068 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
8069 set_optab_libfunc (smod_optab, SImode, "*.rem");
8070 set_optab_libfunc (umod_optab, SImode, "*.urem");
8072 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
8073 set_optab_libfunc (add_optab, TFmode, "_Q_add");
8074 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
8075 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
8076 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
8077 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
8079 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
8080 is because with soft-float, the SFmode and DFmode sqrt
8081 instructions will be absent, and the compiler will notice and
8082 try to use the TFmode sqrt instruction for calls to the
8083 builtin function sqrt, but this fails. */
8085 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
8087 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
8088 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
8089 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
8090 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
8091 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
8092 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
8094 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
8095 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
8096 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
8097 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
8099 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
8100 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
8101 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
8102 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
8104 if (DITF_CONVERSION_LIBFUNCS)
8106 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
8107 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
8108 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
8109 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
8112 if (SUN_CONVERSION_LIBFUNCS)
8114 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
8115 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
8116 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
8117 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
8122 /* In the SPARC 64bit ABI, SImode multiply and divide functions
8123 do not exist in the library. Make sure the compiler does not
8124 emit calls to them by accident. (It should always use the
8125 hardware instructions.) */
8126 set_optab_libfunc (smul_optab, SImode, 0);
8127 set_optab_libfunc (sdiv_optab, SImode, 0);
8128 set_optab_libfunc (udiv_optab, SImode, 0);
8129 set_optab_libfunc (smod_optab, SImode, 0);
8130 set_optab_libfunc (umod_optab, SImode, 0);
8132 if (SUN_INTEGER_MULTIPLY_64)
8134 set_optab_libfunc (smul_optab, DImode, "__mul64");
8135 set_optab_libfunc (sdiv_optab, DImode, "__div64");
8136 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
8137 set_optab_libfunc (smod_optab, DImode, "__rem64");
8138 set_optab_libfunc (umod_optab, DImode, "__urem64");
8141 if (SUN_CONVERSION_LIBFUNCS)
8143 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
8144 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
8145 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
8146 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
8150 gofast_maybe_init_libfuncs ();
8153 #define def_builtin(NAME, CODE, TYPE) \
8154 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
8157 /* Implement the TARGET_INIT_BUILTINS target hook.
8158 Create builtin functions for special SPARC instructions. */
8161 sparc_init_builtins (void)
8164 sparc_vis_init_builtins ();
8167 /* Create builtin functions for VIS 1.0 instructions. */
8170 sparc_vis_init_builtins (void)
8172 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
8173 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
8174 tree v4hi = build_vector_type (intHI_type_node, 4);
8175 tree v2hi = build_vector_type (intHI_type_node, 2);
8176 tree v2si = build_vector_type (intSI_type_node, 2);
8178 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
8179 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
8180 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
8181 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
8182 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
8183 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
8184 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
8185 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
8186 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
8187 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
8188 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
8189 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
8190 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
8192 intDI_type_node, 0);
8193 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
8195 intDI_type_node, 0);
8196 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
8198 intSI_type_node, 0);
8199 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
8201 intDI_type_node, 0);
8203 /* Packing and expanding vectors. */
8204 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
8205 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
8206 v8qi_ftype_v2si_v8qi);
8207 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
8209 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
8210 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
8211 v8qi_ftype_v4qi_v4qi);
8213 /* Multiplications. */
8214 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
8215 v4hi_ftype_v4qi_v4hi);
8216 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
8217 v4hi_ftype_v4qi_v2hi);
8218 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
8219 v4hi_ftype_v4qi_v2hi);
8220 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
8221 v4hi_ftype_v8qi_v4hi);
8222 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
8223 v4hi_ftype_v8qi_v4hi);
8224 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
8225 v2si_ftype_v4qi_v2hi);
8226 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
8227 v2si_ftype_v4qi_v2hi);
8229 /* Data aligning. */
8230 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
8231 v4hi_ftype_v4hi_v4hi);
8232 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
8233 v8qi_ftype_v8qi_v8qi);
8234 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
8235 v2si_ftype_v2si_v2si);
8236 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
8239 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
8242 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
8245 /* Pixel distance. */
8246 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
8247 di_ftype_v8qi_v8qi_di);
8250 /* Handle TARGET_EXPAND_BUILTIN target hook.
8251 Expand builtin functions for sparc intrinsics. */
8254 sparc_expand_builtin (tree exp, rtx target,
8255 rtx subtarget ATTRIBUTE_UNUSED,
8256 enum machine_mode tmode ATTRIBUTE_UNUSED,
8257 int ignore ATTRIBUTE_UNUSED)
8260 call_expr_arg_iterator iter;
8261 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8262 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
8264 enum machine_mode mode[4];
8267 mode[0] = insn_data[icode].operand[0].mode;
8269 || GET_MODE (target) != mode[0]
8270 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
8271 op[0] = gen_reg_rtx (mode[0]);
8275 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8278 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8279 op[arg_count] = expand_normal (arg);
8281 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8283 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8289 pat = GEN_FCN (icode) (op[0], op[1]);
8292 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8295 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8310 sparc_vis_mul8x16 (int e8, int e16)
8312 return (e8 * e16 + 128) / 256;
8315 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8316 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8317 constants. A tree list with the results of the multiplications is returned,
8318 and each element in the list is of INNER_TYPE. */
8321 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8323 tree n_elts = NULL_TREE;
8328 case CODE_FOR_fmul8x16_vis:
8329 for (; elts0 && elts1;
8330 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8333 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8334 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8335 n_elts = tree_cons (NULL_TREE,
8336 build_int_cst (inner_type, val),
8341 case CODE_FOR_fmul8x16au_vis:
8342 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8344 for (; elts0; elts0 = TREE_CHAIN (elts0))
8347 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8349 n_elts = tree_cons (NULL_TREE,
8350 build_int_cst (inner_type, val),
8355 case CODE_FOR_fmul8x16al_vis:
8356 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8358 for (; elts0; elts0 = TREE_CHAIN (elts0))
8361 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8363 n_elts = tree_cons (NULL_TREE,
8364 build_int_cst (inner_type, val),
8373 return nreverse (n_elts);
8376 /* Handle TARGET_FOLD_BUILTIN target hook.
8377 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8378 result of the function call is ignored. NULL_TREE is returned if the
8379 function could not be folded. */
8382 sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
8384 tree arg0, arg1, arg2;
8385 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8386 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
8389 && icode != CODE_FOR_alignaddrsi_vis
8390 && icode != CODE_FOR_alignaddrdi_vis)
8391 return fold_convert (rtype, integer_zero_node);
8395 case CODE_FOR_fexpand_vis:
8396 arg0 = TREE_VALUE (arglist);
8399 if (TREE_CODE (arg0) == VECTOR_CST)
8401 tree inner_type = TREE_TYPE (rtype);
8402 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8403 tree n_elts = NULL_TREE;
8405 for (; elts; elts = TREE_CHAIN (elts))
8407 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8408 n_elts = tree_cons (NULL_TREE,
8409 build_int_cst (inner_type, val),
8412 return build_vector (rtype, nreverse (n_elts));
8416 case CODE_FOR_fmul8x16_vis:
8417 case CODE_FOR_fmul8x16au_vis:
8418 case CODE_FOR_fmul8x16al_vis:
8419 arg0 = TREE_VALUE (arglist);
8420 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8424 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8426 tree inner_type = TREE_TYPE (rtype);
8427 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8428 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8429 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
8432 return build_vector (rtype, n_elts);
8436 case CODE_FOR_fpmerge_vis:
8437 arg0 = TREE_VALUE (arglist);
8438 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8442 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8444 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8445 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8446 tree n_elts = NULL_TREE;
8448 for (; elts0 && elts1;
8449 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8451 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8452 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8455 return build_vector (rtype, nreverse (n_elts));
8459 case CODE_FOR_pdist_vis:
8460 arg0 = TREE_VALUE (arglist);
8461 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8462 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8467 if (TREE_CODE (arg0) == VECTOR_CST
8468 && TREE_CODE (arg1) == VECTOR_CST
8469 && TREE_CODE (arg2) == INTEGER_CST)
8472 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8473 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8474 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8475 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8477 for (; elts0 && elts1;
8478 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8480 unsigned HOST_WIDE_INT
8481 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8482 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8483 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8484 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8486 unsigned HOST_WIDE_INT l;
8489 overflow |= neg_double (low1, high1, &l, &h);
8490 overflow |= add_double (low0, high0, l, h, &l, &h);
8492 overflow |= neg_double (l, h, &l, &h);
8494 overflow |= add_double (low, high, l, h, &low, &high);
8497 gcc_assert (overflow == 0);
8499 return build_int_cst_wide (rtype, low, high);
8509 /* ??? This duplicates information provided to the compiler by the
8510 ??? scheduler description. Some day, teach genautomata to output
8511 ??? the latencies and then CSE will just use that. */
8514 sparc_rtx_costs (rtx x, int code, int outer_code, int *total,
8515 bool speed ATTRIBUTE_UNUSED)
8517 enum machine_mode mode = GET_MODE (x);
8518 bool float_mode_p = FLOAT_MODE_P (mode);
8523 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8541 if (GET_MODE (x) == VOIDmode
8542 && ((CONST_DOUBLE_HIGH (x) == 0
8543 && CONST_DOUBLE_LOW (x) < 0x1000)
8544 || (CONST_DOUBLE_HIGH (x) == -1
8545 && CONST_DOUBLE_LOW (x) < 0
8546 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8553 /* If outer-code was a sign or zero extension, a cost
8554 of COSTS_N_INSNS (1) was already added in. This is
8555 why we are subtracting it back out. */
8556 if (outer_code == ZERO_EXTEND)
8558 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8560 else if (outer_code == SIGN_EXTEND)
8562 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8564 else if (float_mode_p)
8566 *total = sparc_costs->float_load;
8570 *total = sparc_costs->int_load;
8578 *total = sparc_costs->float_plusminus;
8580 *total = COSTS_N_INSNS (1);
8585 *total = sparc_costs->float_mul;
8586 else if (! TARGET_HARD_MUL)
8587 *total = COSTS_N_INSNS (25);
8593 if (sparc_costs->int_mul_bit_factor)
8597 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8599 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8600 for (nbits = 0; value != 0; value &= value - 1)
8603 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8604 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8606 rtx x1 = XEXP (x, 1);
8607 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8608 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8610 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8612 for (; value2 != 0; value2 &= value2 - 1)
8620 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8621 bit_cost = COSTS_N_INSNS (bit_cost);
8625 *total = sparc_costs->int_mulX + bit_cost;
8627 *total = sparc_costs->int_mul + bit_cost;
8634 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8644 *total = sparc_costs->float_div_df;
8646 *total = sparc_costs->float_div_sf;
8651 *total = sparc_costs->int_divX;
8653 *total = sparc_costs->int_div;
8660 *total = COSTS_N_INSNS (1);
8667 case UNSIGNED_FLOAT:
8671 case FLOAT_TRUNCATE:
8672 *total = sparc_costs->float_move;
8677 *total = sparc_costs->float_sqrt_df;
8679 *total = sparc_costs->float_sqrt_sf;
8684 *total = sparc_costs->float_cmp;
8686 *total = COSTS_N_INSNS (1);
8691 *total = sparc_costs->float_cmove;
8693 *total = sparc_costs->int_cmove;
8697 /* Handle the NAND vector patterns. */
8698 if (sparc_vector_mode_supported_p (GET_MODE (x))
8699 && GET_CODE (XEXP (x, 0)) == NOT
8700 && GET_CODE (XEXP (x, 1)) == NOT)
8702 *total = COSTS_N_INSNS (1);
8713 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
8714 This is achieved by means of a manual dynamic stack space allocation in
8715 the current frame. We make the assumption that SEQ doesn't contain any
8716 function calls, with the possible exception of calls to the PIC helper. */
8719 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8721 /* We must preserve the lowest 16 words for the register save area. */
8722 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
8723 /* We really need only 2 words of fresh stack space. */
8724 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
8727 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
8728 SPARC_STACK_BIAS + offset));
8730 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
8731 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8733 emit_insn (gen_rtx_SET (VOIDmode,
8734 adjust_address (slot, word_mode, UNITS_PER_WORD),
8738 emit_insn (gen_rtx_SET (VOIDmode,
8740 adjust_address (slot, word_mode, UNITS_PER_WORD)));
8741 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8742 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
8745 /* Output the assembler code for a thunk function. THUNK_DECL is the
8746 declaration for the thunk function itself, FUNCTION is the decl for
8747 the target function. DELTA is an immediate constant offset to be
8748 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8749 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8752 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8753 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8756 rtx this_rtx, insn, funexp;
8757 unsigned int int_arg_first;
8759 reload_completed = 1;
8760 epilogue_completed = 1;
8762 emit_note (NOTE_INSN_PROLOGUE_END);
8764 if (flag_delayed_branch)
8766 /* We will emit a regular sibcall below, so we need to instruct
8767 output_sibcall that we are in a leaf function. */
8768 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8770 /* This will cause final.c to invoke leaf_renumber_regs so we
8771 must behave as if we were in a not-yet-leafified function. */
8772 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8776 /* We will emit the sibcall manually below, so we will need to
8777 manually spill non-leaf registers. */
8778 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8780 /* We really are in a leaf function. */
8781 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8784 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8785 returns a structure, the structure return pointer is there instead. */
8786 if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8787 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
8789 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
8791 /* Add DELTA. When possible use a plain add, otherwise load it into
8792 a register first. */
8795 rtx delta_rtx = GEN_INT (delta);
8797 if (! SPARC_SIMM13_P (delta))
8799 rtx scratch = gen_rtx_REG (Pmode, 1);
8800 emit_move_insn (scratch, delta_rtx);
8801 delta_rtx = scratch;
8804 /* THIS_RTX += DELTA. */
8805 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
8808 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
8811 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8812 rtx scratch = gen_rtx_REG (Pmode, 1);
8814 gcc_assert (vcall_offset < 0);
8816 /* SCRATCH = *THIS_RTX. */
8817 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
8819 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8820 may not have any available scratch register at this point. */
8821 if (SPARC_SIMM13_P (vcall_offset))
8823 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8824 else if (! fixed_regs[5]
8825 /* The below sequence is made up of at least 2 insns,
8826 while the default method may need only one. */
8827 && vcall_offset < -8192)
8829 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8830 emit_move_insn (scratch2, vcall_offset_rtx);
8831 vcall_offset_rtx = scratch2;
8835 rtx increment = GEN_INT (-4096);
8837 /* VCALL_OFFSET is a negative number whose typical range can be
8838 estimated as -32768..0 in 32-bit mode. In almost all cases
8839 it is therefore cheaper to emit multiple add insns than
8840 spilling and loading the constant into a register (at least
8842 while (! SPARC_SIMM13_P (vcall_offset))
8844 emit_insn (gen_add2_insn (scratch, increment));
8845 vcall_offset += 4096;
8847 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8850 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
8851 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8852 gen_rtx_PLUS (Pmode,
8854 vcall_offset_rtx)));
8856 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
8857 emit_insn (gen_add2_insn (this_rtx, scratch));
8860 /* Generate a tail call to the target function. */
8861 if (! TREE_USED (function))
8863 assemble_external (function);
8864 TREE_USED (function) = 1;
8866 funexp = XEXP (DECL_RTL (function), 0);
8868 if (flag_delayed_branch)
8870 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8871 insn = emit_call_insn (gen_sibcall (funexp));
8872 SIBLING_CALL_P (insn) = 1;
8876 /* The hoops we have to jump through in order to generate a sibcall
8877 without using delay slots... */
8878 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
8882 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8883 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
8885 /* Delay emitting the PIC helper function because it needs to
8886 change the section and we are emitting assembly code. */
8887 load_pic_register (true); /* clobbers %o7 */
8888 scratch = legitimize_pic_address (funexp, Pmode, scratch);
8891 emit_and_preserve (seq, spill_reg, spill_reg2);
8893 else if (TARGET_ARCH32)
8895 emit_insn (gen_rtx_SET (VOIDmode,
8897 gen_rtx_HIGH (SImode, funexp)));
8898 emit_insn (gen_rtx_SET (VOIDmode,
8900 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8902 else /* TARGET_ARCH64 */
8904 switch (sparc_cmodel)
8908 /* The destination can serve as a temporary. */
8909 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8914 /* The destination cannot serve as a temporary. */
8915 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8917 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8920 emit_and_preserve (seq, spill_reg, 0);
8928 emit_jump_insn (gen_indirect_jump (scratch));
8933 /* Run just enough of rest_of_compilation to get the insns emitted.
8934 There's not really enough bulk here to make other passes such as
8935 instruction scheduling worth while. Note that use_thunk calls
8936 assemble_start_function and assemble_end_function. */
8937 insn = get_insns ();
8938 insn_locators_alloc ();
8939 shorten_branches (insn);
8940 final_start_function (insn, file, 1);
8941 final (insn, file, 1);
8942 final_end_function ();
8943 free_after_compilation (cfun);
8945 reload_completed = 0;
8946 epilogue_completed = 0;
8949 /* Return true if sparc_output_mi_thunk would be able to output the
8950 assembler code for the thunk function specified by the arguments
8951 it is passed, and false otherwise. */
8953 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
8954 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8955 HOST_WIDE_INT vcall_offset,
8956 const_tree function ATTRIBUTE_UNUSED)
8958 /* Bound the loop used in the default method above. */
8959 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8962 /* How to allocate a 'struct machine_function'. */
8964 static struct machine_function *
8965 sparc_init_machine_status (void)
8967 return GGC_CNEW (struct machine_function);
8970 /* Locate some local-dynamic symbol still in use by this function
8971 so that we can print its name in local-dynamic base patterns. */
8974 get_some_local_dynamic_name (void)
8978 if (cfun->machine->some_ld_name)
8979 return cfun->machine->some_ld_name;
8981 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8983 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8984 return cfun->machine->some_ld_name;
8990 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8995 && GET_CODE (x) == SYMBOL_REF
8996 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8998 cfun->machine->some_ld_name = XSTR (x, 0);
9005 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
9006 This is called from dwarf2out.c to emit call frame instructions
9007 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
9009 sparc_dwarf_handle_frame_unspec (const char *label,
9010 rtx pattern ATTRIBUTE_UNUSED,
9011 int index ATTRIBUTE_UNUSED)
9013 gcc_assert (index == UNSPECV_SAVEW);
9014 dwarf2out_window_save (label);
9017 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
9018 We need to emit DTP-relative relocations. */
9021 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
9026 fputs ("\t.word\t%r_tls_dtpoff32(", file);
9029 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
9034 output_addr_const (file, x);
9038 /* Do whatever processing is required at the end of a file. */
9041 sparc_file_end (void)
9043 /* If we haven't emitted the special PIC helper function, do so now. */
9044 if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
9047 if (NEED_INDICATE_EXEC_STACK)
9048 file_end_indicate_exec_stack ();
9051 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9052 /* Implement TARGET_MANGLE_TYPE. */
9055 sparc_mangle_type (const_tree type)
9058 && TYPE_MAIN_VARIANT (type) == long_double_type_node
9059 && TARGET_LONG_DOUBLE_128)
9062 /* For all other types, use normal C++ mangling. */
9067 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
9068 compare and swap on the word containing the byte or half-word. */
9071 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
9073 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
9074 rtx addr = gen_reg_rtx (Pmode);
9075 rtx off = gen_reg_rtx (SImode);
9076 rtx oldv = gen_reg_rtx (SImode);
9077 rtx newv = gen_reg_rtx (SImode);
9078 rtx oldvalue = gen_reg_rtx (SImode);
9079 rtx newvalue = gen_reg_rtx (SImode);
9080 rtx res = gen_reg_rtx (SImode);
9081 rtx resv = gen_reg_rtx (SImode);
9082 rtx memsi, val, mask, end_label, loop_label, cc;
9084 emit_insn (gen_rtx_SET (VOIDmode, addr,
9085 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
9087 if (Pmode != SImode)
9088 addr1 = gen_lowpart (SImode, addr1);
9089 emit_insn (gen_rtx_SET (VOIDmode, off,
9090 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
9092 memsi = gen_rtx_MEM (SImode, addr);
9093 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
9094 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
9096 val = force_reg (SImode, memsi);
9098 emit_insn (gen_rtx_SET (VOIDmode, off,
9099 gen_rtx_XOR (SImode, off,
9100 GEN_INT (GET_MODE (mem) == QImode
9103 emit_insn (gen_rtx_SET (VOIDmode, off,
9104 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
9106 if (GET_MODE (mem) == QImode)
9107 mask = force_reg (SImode, GEN_INT (0xff));
9109 mask = force_reg (SImode, GEN_INT (0xffff));
9111 emit_insn (gen_rtx_SET (VOIDmode, mask,
9112 gen_rtx_ASHIFT (SImode, mask, off)));
9114 emit_insn (gen_rtx_SET (VOIDmode, val,
9115 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9118 oldval = gen_lowpart (SImode, oldval);
9119 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9120 gen_rtx_ASHIFT (SImode, oldval, off)));
9122 newval = gen_lowpart_common (SImode, newval);
9123 emit_insn (gen_rtx_SET (VOIDmode, newv,
9124 gen_rtx_ASHIFT (SImode, newval, off)));
9126 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9127 gen_rtx_AND (SImode, oldv, mask)));
9129 emit_insn (gen_rtx_SET (VOIDmode, newv,
9130 gen_rtx_AND (SImode, newv, mask)));
9132 end_label = gen_label_rtx ();
9133 loop_label = gen_label_rtx ();
9134 emit_label (loop_label);
9136 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
9137 gen_rtx_IOR (SImode, oldv, val)));
9139 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
9140 gen_rtx_IOR (SImode, newv, val)));
9142 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
9144 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
9146 emit_insn (gen_rtx_SET (VOIDmode, resv,
9147 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9150 cc = gen_compare_reg_1 (NE, resv, val);
9151 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
9153 /* Use cbranchcc4 to separate the compare and branch! */
9154 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
9155 cc, const0_rtx, loop_label));
9157 emit_label (end_label);
9159 emit_insn (gen_rtx_SET (VOIDmode, res,
9160 gen_rtx_AND (SImode, res, mask)));
9162 emit_insn (gen_rtx_SET (VOIDmode, res,
9163 gen_rtx_LSHIFTRT (SImode, res, off)));
9165 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
9168 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
9171 sparc_frame_pointer_required (void)
9173 return !(leaf_function_p () && only_leaf_regs_used ());
9176 /* The way this is structured, we can't eliminate SFP in favor of SP
9177 if the frame pointer is required: we want to use the SFP->HFP elimination
9178 in that case. But the test in update_eliminables doesn't know we are
9179 assuming below that we only do the former elimination. */
9182 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
9184 return (to == HARD_FRAME_POINTER_REGNUM
9185 || !targetm.frame_pointer_required ());
9188 #include "gt-sparc.h"