1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
38 #include "insn-attr.h"
49 #include "target-def.h"
50 #include "cfglayout.h"
52 #include "langhooks.h"
58 struct processor_costs cypress_costs = {
59 COSTS_N_INSNS (2), /* int load */
60 COSTS_N_INSNS (2), /* int signed load */
61 COSTS_N_INSNS (2), /* int zeroed load */
62 COSTS_N_INSNS (2), /* float load */
63 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
64 COSTS_N_INSNS (5), /* fadd, fsub */
65 COSTS_N_INSNS (1), /* fcmp */
66 COSTS_N_INSNS (1), /* fmov, fmovr */
67 COSTS_N_INSNS (7), /* fmul */
68 COSTS_N_INSNS (37), /* fdivs */
69 COSTS_N_INSNS (37), /* fdivd */
70 COSTS_N_INSNS (63), /* fsqrts */
71 COSTS_N_INSNS (63), /* fsqrtd */
72 COSTS_N_INSNS (1), /* imul */
73 COSTS_N_INSNS (1), /* imulX */
74 0, /* imul bit factor */
75 COSTS_N_INSNS (1), /* idiv */
76 COSTS_N_INSNS (1), /* idivX */
77 COSTS_N_INSNS (1), /* movcc/movr */
78 0, /* shift penalty */
82 struct processor_costs supersparc_costs = {
83 COSTS_N_INSNS (1), /* int load */
84 COSTS_N_INSNS (1), /* int signed load */
85 COSTS_N_INSNS (1), /* int zeroed load */
86 COSTS_N_INSNS (0), /* float load */
87 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
88 COSTS_N_INSNS (3), /* fadd, fsub */
89 COSTS_N_INSNS (3), /* fcmp */
90 COSTS_N_INSNS (1), /* fmov, fmovr */
91 COSTS_N_INSNS (3), /* fmul */
92 COSTS_N_INSNS (6), /* fdivs */
93 COSTS_N_INSNS (9), /* fdivd */
94 COSTS_N_INSNS (12), /* fsqrts */
95 COSTS_N_INSNS (12), /* fsqrtd */
96 COSTS_N_INSNS (4), /* imul */
97 COSTS_N_INSNS (4), /* imulX */
98 0, /* imul bit factor */
99 COSTS_N_INSNS (4), /* idiv */
100 COSTS_N_INSNS (4), /* idivX */
101 COSTS_N_INSNS (1), /* movcc/movr */
102 1, /* shift penalty */
106 struct processor_costs hypersparc_costs = {
107 COSTS_N_INSNS (1), /* int load */
108 COSTS_N_INSNS (1), /* int signed load */
109 COSTS_N_INSNS (1), /* int zeroed load */
110 COSTS_N_INSNS (1), /* float load */
111 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
112 COSTS_N_INSNS (1), /* fadd, fsub */
113 COSTS_N_INSNS (1), /* fcmp */
114 COSTS_N_INSNS (1), /* fmov, fmovr */
115 COSTS_N_INSNS (1), /* fmul */
116 COSTS_N_INSNS (8), /* fdivs */
117 COSTS_N_INSNS (12), /* fdivd */
118 COSTS_N_INSNS (17), /* fsqrts */
119 COSTS_N_INSNS (17), /* fsqrtd */
120 COSTS_N_INSNS (17), /* imul */
121 COSTS_N_INSNS (17), /* imulX */
122 0, /* imul bit factor */
123 COSTS_N_INSNS (17), /* idiv */
124 COSTS_N_INSNS (17), /* idivX */
125 COSTS_N_INSNS (1), /* movcc/movr */
126 0, /* shift penalty */
130 struct processor_costs sparclet_costs = {
131 COSTS_N_INSNS (3), /* int load */
132 COSTS_N_INSNS (3), /* int signed load */
133 COSTS_N_INSNS (1), /* int zeroed load */
134 COSTS_N_INSNS (1), /* float load */
135 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
136 COSTS_N_INSNS (1), /* fadd, fsub */
137 COSTS_N_INSNS (1), /* fcmp */
138 COSTS_N_INSNS (1), /* fmov, fmovr */
139 COSTS_N_INSNS (1), /* fmul */
140 COSTS_N_INSNS (1), /* fdivs */
141 COSTS_N_INSNS (1), /* fdivd */
142 COSTS_N_INSNS (1), /* fsqrts */
143 COSTS_N_INSNS (1), /* fsqrtd */
144 COSTS_N_INSNS (5), /* imul */
145 COSTS_N_INSNS (5), /* imulX */
146 0, /* imul bit factor */
147 COSTS_N_INSNS (5), /* idiv */
148 COSTS_N_INSNS (5), /* idivX */
149 COSTS_N_INSNS (1), /* movcc/movr */
150 0, /* shift penalty */
154 struct processor_costs ultrasparc_costs = {
155 COSTS_N_INSNS (2), /* int load */
156 COSTS_N_INSNS (3), /* int signed load */
157 COSTS_N_INSNS (2), /* int zeroed load */
158 COSTS_N_INSNS (2), /* float load */
159 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
160 COSTS_N_INSNS (4), /* fadd, fsub */
161 COSTS_N_INSNS (1), /* fcmp */
162 COSTS_N_INSNS (2), /* fmov, fmovr */
163 COSTS_N_INSNS (4), /* fmul */
164 COSTS_N_INSNS (13), /* fdivs */
165 COSTS_N_INSNS (23), /* fdivd */
166 COSTS_N_INSNS (13), /* fsqrts */
167 COSTS_N_INSNS (23), /* fsqrtd */
168 COSTS_N_INSNS (4), /* imul */
169 COSTS_N_INSNS (4), /* imulX */
170 2, /* imul bit factor */
171 COSTS_N_INSNS (37), /* idiv */
172 COSTS_N_INSNS (68), /* idivX */
173 COSTS_N_INSNS (2), /* movcc/movr */
174 2, /* shift penalty */
178 struct processor_costs ultrasparc3_costs = {
179 COSTS_N_INSNS (2), /* int load */
180 COSTS_N_INSNS (3), /* int signed load */
181 COSTS_N_INSNS (3), /* int zeroed load */
182 COSTS_N_INSNS (2), /* float load */
183 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
184 COSTS_N_INSNS (4), /* fadd, fsub */
185 COSTS_N_INSNS (5), /* fcmp */
186 COSTS_N_INSNS (3), /* fmov, fmovr */
187 COSTS_N_INSNS (4), /* fmul */
188 COSTS_N_INSNS (17), /* fdivs */
189 COSTS_N_INSNS (20), /* fdivd */
190 COSTS_N_INSNS (20), /* fsqrts */
191 COSTS_N_INSNS (29), /* fsqrtd */
192 COSTS_N_INSNS (6), /* imul */
193 COSTS_N_INSNS (6), /* imulX */
194 0, /* imul bit factor */
195 COSTS_N_INSNS (40), /* idiv */
196 COSTS_N_INSNS (71), /* idivX */
197 COSTS_N_INSNS (2), /* movcc/movr */
198 0, /* shift penalty */
202 struct processor_costs niagara_costs = {
203 COSTS_N_INSNS (3), /* int load */
204 COSTS_N_INSNS (3), /* int signed load */
205 COSTS_N_INSNS (3), /* int zeroed load */
206 COSTS_N_INSNS (9), /* float load */
207 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
208 COSTS_N_INSNS (8), /* fadd, fsub */
209 COSTS_N_INSNS (26), /* fcmp */
210 COSTS_N_INSNS (8), /* fmov, fmovr */
211 COSTS_N_INSNS (29), /* fmul */
212 COSTS_N_INSNS (54), /* fdivs */
213 COSTS_N_INSNS (83), /* fdivd */
214 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
215 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
216 COSTS_N_INSNS (11), /* imul */
217 COSTS_N_INSNS (11), /* imulX */
218 0, /* imul bit factor */
219 COSTS_N_INSNS (72), /* idiv */
220 COSTS_N_INSNS (72), /* idivX */
221 COSTS_N_INSNS (1), /* movcc/movr */
222 0, /* shift penalty */
226 struct processor_costs niagara2_costs = {
227 COSTS_N_INSNS (3), /* int load */
228 COSTS_N_INSNS (3), /* int signed load */
229 COSTS_N_INSNS (3), /* int zeroed load */
230 COSTS_N_INSNS (3), /* float load */
231 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
232 COSTS_N_INSNS (6), /* fadd, fsub */
233 COSTS_N_INSNS (6), /* fcmp */
234 COSTS_N_INSNS (6), /* fmov, fmovr */
235 COSTS_N_INSNS (6), /* fmul */
236 COSTS_N_INSNS (19), /* fdivs */
237 COSTS_N_INSNS (33), /* fdivd */
238 COSTS_N_INSNS (19), /* fsqrts */
239 COSTS_N_INSNS (33), /* fsqrtd */
240 COSTS_N_INSNS (5), /* imul */
241 COSTS_N_INSNS (5), /* imulX */
242 0, /* imul bit factor */
243 COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
244 COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
245 COSTS_N_INSNS (1), /* movcc/movr */
246 0, /* shift penalty */
249 const struct processor_costs *sparc_costs = &cypress_costs;
251 #ifdef HAVE_AS_RELAX_OPTION
252 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
253 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
254 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
255 somebody does not branch between the sethi and jmp. */
256 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
258 #define LEAF_SIBCALL_SLOT_RESERVED_P \
259 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
262 /* Global variables for machine-dependent things. */
264 /* Size of frame. Need to know this to emit return insns from leaf procedures.
265 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
266 reload pass. This is important as the value is later used for scheduling
267 (to see what can go in a delay slot).
268 APPARENT_FSIZE is the size of the stack less the register save area and less
269 the outgoing argument area. It is used when saving call preserved regs. */
270 static HOST_WIDE_INT apparent_fsize;
271 static HOST_WIDE_INT actual_fsize;
273 /* Number of live general or floating point registers needed to be
274 saved (as 4-byte quantities). */
275 static int num_gfregs;
277 /* The alias set for prologue/epilogue register save/restore. */
278 static GTY(()) alias_set_type sparc_sr_alias_set;
280 /* The alias set for the structure return value. */
281 static GTY(()) alias_set_type struct_value_alias_set;
283 /* Vector to say how input registers are mapped to output registers.
284 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
285 eliminate it. You must use -fomit-frame-pointer to get that. */
286 char leaf_reg_remap[] =
287 { 0, 1, 2, 3, 4, 5, 6, 7,
288 -1, -1, -1, -1, -1, -1, 14, -1,
289 -1, -1, -1, -1, -1, -1, -1, -1,
290 8, 9, 10, 11, 12, 13, -1, 15,
292 32, 33, 34, 35, 36, 37, 38, 39,
293 40, 41, 42, 43, 44, 45, 46, 47,
294 48, 49, 50, 51, 52, 53, 54, 55,
295 56, 57, 58, 59, 60, 61, 62, 63,
296 64, 65, 66, 67, 68, 69, 70, 71,
297 72, 73, 74, 75, 76, 77, 78, 79,
298 80, 81, 82, 83, 84, 85, 86, 87,
299 88, 89, 90, 91, 92, 93, 94, 95,
300 96, 97, 98, 99, 100};
302 /* Vector, indexed by hard register number, which contains 1
303 for a register that is allowable in a candidate for leaf
304 function treatment. */
305 char sparc_leaf_regs[] =
306 { 1, 1, 1, 1, 1, 1, 1, 1,
307 0, 0, 0, 0, 0, 0, 1, 0,
308 0, 0, 0, 0, 0, 0, 0, 0,
309 1, 1, 1, 1, 1, 1, 0, 1,
310 1, 1, 1, 1, 1, 1, 1, 1,
311 1, 1, 1, 1, 1, 1, 1, 1,
312 1, 1, 1, 1, 1, 1, 1, 1,
313 1, 1, 1, 1, 1, 1, 1, 1,
314 1, 1, 1, 1, 1, 1, 1, 1,
315 1, 1, 1, 1, 1, 1, 1, 1,
316 1, 1, 1, 1, 1, 1, 1, 1,
317 1, 1, 1, 1, 1, 1, 1, 1,
320 struct GTY(()) machine_function
322 /* Some local-dynamic TLS symbol name. */
323 const char *some_ld_name;
325 /* True if the current function is leaf and uses only leaf regs,
326 so that the SPARC leaf function optimization can be applied.
327 Private version of current_function_uses_only_leaf_regs, see
328 sparc_expand_prologue for the rationale. */
331 /* True if the data calculated by sparc_expand_prologue are valid. */
332 bool prologue_data_valid_p;
335 #define sparc_leaf_function_p cfun->machine->leaf_function_p
336 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
338 /* Register we pretend to think the frame pointer is allocated to.
339 Normally, this is %fp, but if we are in a leaf procedure, this
340 is %sp+"something". We record "something" separately as it may
341 be too big for reg+constant addressing. */
342 static rtx frame_base_reg;
343 static HOST_WIDE_INT frame_base_offset;
345 /* 1 if the next opcode is to be specially indented. */
346 int sparc_indent_opcode = 0;
348 static bool sparc_handle_option (size_t, const char *, int);
349 static void sparc_init_modes (void);
350 static void scan_record_type (tree, int *, int *, int *);
351 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
352 tree, int, int, int *, int *);
354 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
355 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
357 static void sparc_output_addr_vec (rtx);
358 static void sparc_output_addr_diff_vec (rtx);
359 static void sparc_output_deferred_case_vectors (void);
360 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
361 static rtx sparc_builtin_saveregs (void);
362 static int epilogue_renumber (rtx *, int);
363 static bool sparc_assemble_integer (rtx, unsigned int, int);
364 static int set_extends (rtx);
365 static void emit_pic_helper (void);
366 static void load_pic_register (bool);
367 static int save_or_restore_regs (int, int, rtx, int, int);
368 static void emit_save_or_restore_regs (int);
369 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
370 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
371 #ifdef OBJECT_FORMAT_ELF
372 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
375 static int sparc_adjust_cost (rtx, rtx, rtx, int);
376 static int sparc_issue_rate (void);
377 static void sparc_sched_init (FILE *, int, int);
378 static int sparc_use_sched_lookahead (void);
380 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
381 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
382 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
383 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
384 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
386 static bool sparc_function_ok_for_sibcall (tree, tree);
387 static void sparc_init_libfuncs (void);
388 static void sparc_init_builtins (void);
389 static void sparc_vis_init_builtins (void);
390 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
391 static tree sparc_fold_builtin (tree, tree, bool);
392 static int sparc_vis_mul8x16 (int, int);
393 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
394 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
395 HOST_WIDE_INT, tree);
396 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
397 HOST_WIDE_INT, const_tree);
398 static struct machine_function * sparc_init_machine_status (void);
399 static bool sparc_cannot_force_const_mem (rtx);
400 static rtx sparc_tls_get_addr (void);
401 static rtx sparc_tls_got (void);
402 static const char *get_some_local_dynamic_name (void);
403 static int get_some_local_dynamic_name_1 (rtx *, void *);
404 static bool sparc_rtx_costs (rtx, int, int, int *, bool);
405 static bool sparc_promote_prototypes (const_tree);
406 static rtx sparc_struct_value_rtx (tree, int);
407 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
408 int *, const_tree, int);
409 static bool sparc_return_in_memory (const_tree, const_tree);
410 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
411 static void sparc_va_start (tree, rtx);
412 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
413 static bool sparc_vector_mode_supported_p (enum machine_mode);
414 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
415 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
416 enum machine_mode, const_tree, bool);
417 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
418 enum machine_mode, tree, bool);
419 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
420 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
421 static void sparc_file_end (void);
422 static bool sparc_frame_pointer_required (void);
423 static bool sparc_can_eliminate (const int, const int);
424 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
425 static const char *sparc_mangle_type (const_tree);
428 #ifdef SUBTARGET_ATTRIBUTE_TABLE
429 /* Table of valid machine attributes. */
430 static const struct attribute_spec sparc_attribute_table[] =
432 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
433 SUBTARGET_ATTRIBUTE_TABLE,
434 { NULL, 0, 0, false, false, false, NULL }
438 /* Option handling. */
441 enum cmodel sparc_cmodel;
443 char sparc_hard_reg_printed[8];
445 struct sparc_cpu_select sparc_select[] =
447 /* switch name, tune arch */
448 { (char *)0, "default", 1, 1 },
449 { (char *)0, "-mcpu=", 1, 1 },
450 { (char *)0, "-mtune=", 1, 0 },
454 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
455 enum processor_type sparc_cpu;
457 /* Whether
\fan FPU option was specified. */
458 static bool fpu_option_set = false;
460 /* Initialize the GCC target structure. */
462 /* The sparc default is to use .half rather than .short for aligned
463 HI objects. Use .word instead of .long on non-ELF systems. */
464 #undef TARGET_ASM_ALIGNED_HI_OP
465 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
466 #ifndef OBJECT_FORMAT_ELF
467 #undef TARGET_ASM_ALIGNED_SI_OP
468 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
471 #undef TARGET_ASM_UNALIGNED_HI_OP
472 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
473 #undef TARGET_ASM_UNALIGNED_SI_OP
474 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
475 #undef TARGET_ASM_UNALIGNED_DI_OP
476 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
478 /* The target hook has to handle DI-mode values. */
479 #undef TARGET_ASM_INTEGER
480 #define TARGET_ASM_INTEGER sparc_assemble_integer
482 #undef TARGET_ASM_FUNCTION_PROLOGUE
483 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
484 #undef TARGET_ASM_FUNCTION_EPILOGUE
485 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
487 #undef TARGET_SCHED_ADJUST_COST
488 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
489 #undef TARGET_SCHED_ISSUE_RATE
490 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
491 #undef TARGET_SCHED_INIT
492 #define TARGET_SCHED_INIT sparc_sched_init
493 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
494 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
496 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
497 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
499 #undef TARGET_INIT_LIBFUNCS
500 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
501 #undef TARGET_INIT_BUILTINS
502 #define TARGET_INIT_BUILTINS sparc_init_builtins
504 #undef TARGET_LEGITIMIZE_ADDRESS
505 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
507 #undef TARGET_EXPAND_BUILTIN
508 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
509 #undef TARGET_FOLD_BUILTIN
510 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
513 #undef TARGET_HAVE_TLS
514 #define TARGET_HAVE_TLS true
517 #undef TARGET_CANNOT_FORCE_CONST_MEM
518 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
520 #undef TARGET_ASM_OUTPUT_MI_THUNK
521 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
522 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
523 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
525 #undef TARGET_RTX_COSTS
526 #define TARGET_RTX_COSTS sparc_rtx_costs
527 #undef TARGET_ADDRESS_COST
528 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
530 #undef TARGET_PROMOTE_FUNCTION_MODE
531 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
533 #undef TARGET_PROMOTE_PROTOTYPES
534 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
536 #undef TARGET_STRUCT_VALUE_RTX
537 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
538 #undef TARGET_RETURN_IN_MEMORY
539 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
540 #undef TARGET_MUST_PASS_IN_STACK
541 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
542 #undef TARGET_PASS_BY_REFERENCE
543 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
544 #undef TARGET_ARG_PARTIAL_BYTES
545 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
547 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
548 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
549 #undef TARGET_STRICT_ARGUMENT_NAMING
550 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
552 #undef TARGET_EXPAND_BUILTIN_VA_START
553 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
554 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
555 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
557 #undef TARGET_VECTOR_MODE_SUPPORTED_P
558 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
560 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
561 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
563 #ifdef SUBTARGET_INSERT_ATTRIBUTES
564 #undef TARGET_INSERT_ATTRIBUTES
565 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
568 #ifdef SUBTARGET_ATTRIBUTE_TABLE
569 #undef TARGET_ATTRIBUTE_TABLE
570 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
573 #undef TARGET_RELAXED_ORDERING
574 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
576 #undef TARGET_DEFAULT_TARGET_FLAGS
577 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
578 #undef TARGET_HANDLE_OPTION
579 #define TARGET_HANDLE_OPTION sparc_handle_option
581 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
582 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
583 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
586 #undef TARGET_ASM_FILE_END
587 #define TARGET_ASM_FILE_END sparc_file_end
589 #undef TARGET_FRAME_POINTER_REQUIRED
590 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
592 #undef TARGET_CAN_ELIMINATE
593 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
595 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
596 #undef TARGET_MANGLE_TYPE
597 #define TARGET_MANGLE_TYPE sparc_mangle_type
600 #undef TARGET_LEGITIMATE_ADDRESS_P
601 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
603 struct gcc_target targetm = TARGET_INITIALIZER;
605 /* Implement TARGET_HANDLE_OPTION. */
608 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
613 case OPT_mhard_float:
614 case OPT_msoft_float:
615 fpu_option_set = true;
619 sparc_select[1].string = arg;
623 sparc_select[2].string = arg;
630 /* Validate and override various options, and do some machine dependent
634 sparc_override_options (void)
636 static struct code_model {
637 const char *const name;
638 const enum cmodel value;
639 } const cmodels[] = {
641 { "medlow", CM_MEDLOW },
642 { "medmid", CM_MEDMID },
643 { "medany", CM_MEDANY },
644 { "embmedany", CM_EMBMEDANY },
645 { NULL, (enum cmodel) 0 }
647 const struct code_model *cmodel;
648 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
649 static struct cpu_default {
651 const char *const name;
652 } const cpu_default[] = {
653 /* There must be one entry here for each TARGET_CPU value. */
654 { TARGET_CPU_sparc, "cypress" },
655 { TARGET_CPU_sparclet, "tsc701" },
656 { TARGET_CPU_sparclite, "f930" },
657 { TARGET_CPU_v8, "v8" },
658 { TARGET_CPU_hypersparc, "hypersparc" },
659 { TARGET_CPU_sparclite86x, "sparclite86x" },
660 { TARGET_CPU_supersparc, "supersparc" },
661 { TARGET_CPU_v9, "v9" },
662 { TARGET_CPU_ultrasparc, "ultrasparc" },
663 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
664 { TARGET_CPU_niagara, "niagara" },
665 { TARGET_CPU_niagara2, "niagara2" },
668 const struct cpu_default *def;
669 /* Table of values for -m{cpu,tune}=. */
670 static struct cpu_table {
671 const char *const name;
672 const enum processor_type processor;
675 } const cpu_table[] = {
676 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
677 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
678 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
679 /* TI TMS390Z55 supersparc */
680 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
681 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
682 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
683 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
684 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
685 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
686 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
687 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
689 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
691 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
692 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
693 /* TI ultrasparc I, II, IIi */
694 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
695 /* Although insns using %y are deprecated, it is a clear win on current
697 |MASK_DEPRECATED_V8_INSNS},
698 /* TI ultrasparc III */
699 /* ??? Check if %y issue still holds true in ultra3. */
700 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
702 { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
703 { "niagara2", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9},
704 { 0, (enum processor_type) 0, 0, 0 }
706 const struct cpu_table *cpu;
707 const struct sparc_cpu_select *sel;
710 #ifndef SPARC_BI_ARCH
711 /* Check for unsupported architecture size. */
712 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
713 error ("%s is not supported by this configuration",
714 DEFAULT_ARCH32_P ? "-m64" : "-m32");
717 /* We force all 64bit archs to use 128 bit long double */
718 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
720 error ("-mlong-double-64 not allowed with -m64");
721 target_flags |= MASK_LONG_DOUBLE_128;
724 /* Code model selection. */
725 sparc_cmodel = SPARC_DEFAULT_CMODEL;
729 sparc_cmodel = CM_32;
732 if (sparc_cmodel_string != NULL)
736 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
737 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
739 if (cmodel->name == NULL)
740 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
742 sparc_cmodel = cmodel->value;
745 error ("-mcmodel= is not supported on 32 bit systems");
748 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
750 /* Set the default CPU. */
751 for (def = &cpu_default[0]; def->name; ++def)
752 if (def->cpu == TARGET_CPU_DEFAULT)
754 gcc_assert (def->name);
755 sparc_select[0].string = def->name;
757 for (sel = &sparc_select[0]; sel->name; ++sel)
761 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
762 if (! strcmp (sel->string, cpu->name))
765 sparc_cpu = cpu->processor;
769 target_flags &= ~cpu->disable;
770 target_flags |= cpu->enable;
776 error ("bad value (%s) for %s switch", sel->string, sel->name);
780 /* If -mfpu or -mno-fpu was explicitly used, don't override with
781 the processor default. */
783 target_flags = (target_flags & ~MASK_FPU) | fpu;
785 /* Don't allow -mvis if FPU is disabled. */
787 target_flags &= ~MASK_VIS;
789 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
791 -m64 also implies v9. */
792 if (TARGET_VIS || TARGET_ARCH64)
794 target_flags |= MASK_V9;
795 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
798 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
799 if (TARGET_V9 && TARGET_ARCH32)
800 target_flags |= MASK_DEPRECATED_V8_INSNS;
802 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
803 if (! TARGET_V9 || TARGET_ARCH64)
804 target_flags &= ~MASK_V8PLUS;
806 /* Don't use stack biasing in 32 bit mode. */
808 target_flags &= ~MASK_STACK_BIAS;
810 /* Supply a default value for align_functions. */
811 if (align_functions == 0
812 && (sparc_cpu == PROCESSOR_ULTRASPARC
813 || sparc_cpu == PROCESSOR_ULTRASPARC3
814 || sparc_cpu == PROCESSOR_NIAGARA
815 || sparc_cpu == PROCESSOR_NIAGARA2))
816 align_functions = 32;
818 /* Validate PCC_STRUCT_RETURN. */
819 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
820 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
822 /* Only use .uaxword when compiling for a 64-bit target. */
824 targetm.asm_out.unaligned_op.di = NULL;
826 /* Do various machine dependent initializations. */
829 /* Acquire unique alias sets for our private stuff. */
830 sparc_sr_alias_set = new_alias_set ();
831 struct_value_alias_set = new_alias_set ();
833 /* Set up function hooks. */
834 init_machine_status = sparc_init_machine_status;
839 case PROCESSOR_CYPRESS:
840 sparc_costs = &cypress_costs;
843 case PROCESSOR_SPARCLITE:
844 case PROCESSOR_SUPERSPARC:
845 sparc_costs = &supersparc_costs;
849 case PROCESSOR_HYPERSPARC:
850 case PROCESSOR_SPARCLITE86X:
851 sparc_costs = &hypersparc_costs;
853 case PROCESSOR_SPARCLET:
854 case PROCESSOR_TSC701:
855 sparc_costs = &sparclet_costs;
858 case PROCESSOR_ULTRASPARC:
859 sparc_costs = &ultrasparc_costs;
861 case PROCESSOR_ULTRASPARC3:
862 sparc_costs = &ultrasparc3_costs;
864 case PROCESSOR_NIAGARA:
865 sparc_costs = &niagara_costs;
867 case PROCESSOR_NIAGARA2:
868 sparc_costs = &niagara2_costs;
872 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
873 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
874 target_flags |= MASK_LONG_DOUBLE_128;
877 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
878 set_param_value ("simultaneous-prefetches",
879 ((sparc_cpu == PROCESSOR_ULTRASPARC
880 || sparc_cpu == PROCESSOR_NIAGARA
881 || sparc_cpu == PROCESSOR_NIAGARA2)
883 : (sparc_cpu == PROCESSOR_ULTRASPARC3
885 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
886 set_param_value ("l1-cache-line-size",
887 ((sparc_cpu == PROCESSOR_ULTRASPARC
888 || sparc_cpu == PROCESSOR_ULTRASPARC3
889 || sparc_cpu == PROCESSOR_NIAGARA
890 || sparc_cpu == PROCESSOR_NIAGARA2)
894 /* Miscellaneous utilities. */
896 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
897 or branch on register contents instructions. */
900 v9_regcmp_p (enum rtx_code code)
902 return (code == EQ || code == NE || code == GE || code == LT
903 || code == LE || code == GT);
906 /* Nonzero if OP is a floating point constant which can
907 be loaded into an integer register using a single
908 sethi instruction. */
913 if (GET_CODE (op) == CONST_DOUBLE)
918 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
919 REAL_VALUE_TO_TARGET_SINGLE (r, i);
920 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
926 /* Nonzero if OP is a floating point constant which can
927 be loaded into an integer register using a single
933 if (GET_CODE (op) == CONST_DOUBLE)
938 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
939 REAL_VALUE_TO_TARGET_SINGLE (r, i);
940 return SPARC_SIMM13_P (i);
946 /* Nonzero if OP is a floating point constant which can
947 be loaded into an integer register using a high/losum
948 instruction sequence. */
951 fp_high_losum_p (rtx op)
953 /* The constraints calling this should only be in
954 SFmode move insns, so any constant which cannot
955 be moved using a single insn will do. */
956 if (GET_CODE (op) == CONST_DOUBLE)
961 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
962 REAL_VALUE_TO_TARGET_SINGLE (r, i);
963 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
969 /* Expand a move instruction. Return true if all work is done. */
972 sparc_expand_move (enum machine_mode mode, rtx *operands)
974 /* Handle sets of MEM first. */
975 if (GET_CODE (operands[0]) == MEM)
977 /* 0 is a register (or a pair of registers) on SPARC. */
978 if (register_or_zero_operand (operands[1], mode))
981 if (!reload_in_progress)
983 operands[0] = validize_mem (operands[0]);
984 operands[1] = force_reg (mode, operands[1]);
988 /* Fixup TLS cases. */
990 && CONSTANT_P (operands[1])
991 && GET_CODE (operands[1]) != HIGH
992 && sparc_tls_referenced_p (operands [1]))
994 rtx sym = operands[1];
997 if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == PLUS)
999 addend = XEXP (XEXP (sym, 0), 1);
1000 sym = XEXP (XEXP (sym, 0), 0);
1003 gcc_assert (SPARC_SYMBOL_REF_TLS_P (sym));
1005 sym = legitimize_tls_address (sym);
1008 sym = gen_rtx_PLUS (mode, sym, addend);
1009 sym = force_operand (sym, operands[0]);
1014 /* Fixup PIC cases. */
1015 if (flag_pic && CONSTANT_P (operands[1]))
1017 if (pic_address_needs_scratch (operands[1]))
1018 operands[1] = legitimize_pic_address (operands[1], mode, 0);
1020 /* VxWorks does not impose a fixed gap between segments; the run-time
1021 gap can be different from the object-file gap. We therefore can't
1022 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1023 are absolutely sure that X is in the same segment as the GOT.
1024 Unfortunately, the flexibility of linker scripts means that we
1025 can't be sure of that in general, so assume that _G_O_T_-relative
1026 accesses are never valid on VxWorks. */
1027 if (GET_CODE (operands[1]) == LABEL_REF && !TARGET_VXWORKS_RTP)
1031 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1037 gcc_assert (TARGET_ARCH64);
1038 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1043 if (symbolic_operand (operands[1], mode))
1045 operands[1] = legitimize_pic_address (operands[1],
1047 (reload_in_progress ?
1054 /* If we are trying to toss an integer constant into FP registers,
1055 or loading a FP or vector constant, force it into memory. */
1056 if (CONSTANT_P (operands[1])
1057 && REG_P (operands[0])
1058 && (SPARC_FP_REG_P (REGNO (operands[0]))
1059 || SCALAR_FLOAT_MODE_P (mode)
1060 || VECTOR_MODE_P (mode)))
1062 /* emit_group_store will send such bogosity to us when it is
1063 not storing directly into memory. So fix this up to avoid
1064 crashes in output_constant_pool. */
1065 if (operands [1] == const0_rtx)
1066 operands[1] = CONST0_RTX (mode);
1068 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1069 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1070 && const_zero_operand (operands[1], mode))
1073 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1074 /* We are able to build any SF constant in integer registers
1075 with at most 2 instructions. */
1077 /* And any DF constant in integer registers. */
1079 && (reload_completed || reload_in_progress))))
1082 operands[1] = force_const_mem (mode, operands[1]);
1083 if (!reload_in_progress)
1084 operands[1] = validize_mem (operands[1]);
1088 /* Accept non-constants and valid constants unmodified. */
1089 if (!CONSTANT_P (operands[1])
1090 || GET_CODE (operands[1]) == HIGH
1091 || input_operand (operands[1], mode))
1097 /* All QImode constants require only one insn, so proceed. */
1102 sparc_emit_set_const32 (operands[0], operands[1]);
1106 /* input_operand should have filtered out 32-bit mode. */
1107 sparc_emit_set_const64 (operands[0], operands[1]);
1117 /* Load OP1, a 32-bit constant, into OP0, a register.
1118 We know it can't be done in one insn when we get
1119 here, the move expander guarantees this. */
1122 sparc_emit_set_const32 (rtx op0, rtx op1)
1124 enum machine_mode mode = GET_MODE (op0);
1127 if (reload_in_progress || reload_completed)
1130 temp = gen_reg_rtx (mode);
1132 if (GET_CODE (op1) == CONST_INT)
1134 gcc_assert (!small_int_operand (op1, mode)
1135 && !const_high_operand (op1, mode));
1137 /* Emit them as real moves instead of a HIGH/LO_SUM,
1138 this way CSE can see everything and reuse intermediate
1139 values if it wants. */
1140 emit_insn (gen_rtx_SET (VOIDmode, temp,
1141 GEN_INT (INTVAL (op1)
1142 & ~(HOST_WIDE_INT)0x3ff)));
1144 emit_insn (gen_rtx_SET (VOIDmode,
1146 gen_rtx_IOR (mode, temp,
1147 GEN_INT (INTVAL (op1) & 0x3ff))));
1151 /* A symbol, emit in the traditional way. */
1152 emit_insn (gen_rtx_SET (VOIDmode, temp,
1153 gen_rtx_HIGH (mode, op1)));
1154 emit_insn (gen_rtx_SET (VOIDmode,
1155 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1159 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1160 If TEMP is nonzero, we are forbidden to use any other scratch
1161 registers. Otherwise, we are allowed to generate them as needed.
1163 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1164 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1167 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1169 rtx temp1, temp2, temp3, temp4, temp5;
1172 if (temp && GET_MODE (temp) == TImode)
1175 temp = gen_rtx_REG (DImode, REGNO (temp));
1178 /* SPARC-V9 code-model support. */
1179 switch (sparc_cmodel)
1182 /* The range spanned by all instructions in the object is less
1183 than 2^31 bytes (2GB) and the distance from any instruction
1184 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1185 than 2^31 bytes (2GB).
1187 The executable must be in the low 4TB of the virtual address
1190 sethi %hi(symbol), %temp1
1191 or %temp1, %lo(symbol), %reg */
1193 temp1 = temp; /* op0 is allowed. */
1195 temp1 = gen_reg_rtx (DImode);
1197 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1198 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1202 /* The range spanned by all instructions in the object is less
1203 than 2^31 bytes (2GB) and the distance from any instruction
1204 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1205 than 2^31 bytes (2GB).
1207 The executable must be in the low 16TB of the virtual address
1210 sethi %h44(symbol), %temp1
1211 or %temp1, %m44(symbol), %temp2
1212 sllx %temp2, 12, %temp3
1213 or %temp3, %l44(symbol), %reg */
1218 temp3 = temp; /* op0 is allowed. */
1222 temp1 = gen_reg_rtx (DImode);
1223 temp2 = gen_reg_rtx (DImode);
1224 temp3 = gen_reg_rtx (DImode);
1227 emit_insn (gen_seth44 (temp1, op1));
1228 emit_insn (gen_setm44 (temp2, temp1, op1));
1229 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1230 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1231 emit_insn (gen_setl44 (op0, temp3, op1));
1235 /* The range spanned by all instructions in the object is less
1236 than 2^31 bytes (2GB) and the distance from any instruction
1237 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1238 than 2^31 bytes (2GB).
1240 The executable can be placed anywhere in the virtual address
1243 sethi %hh(symbol), %temp1
1244 sethi %lm(symbol), %temp2
1245 or %temp1, %hm(symbol), %temp3
1246 sllx %temp3, 32, %temp4
1247 or %temp4, %temp2, %temp5
1248 or %temp5, %lo(symbol), %reg */
1251 /* It is possible that one of the registers we got for operands[2]
1252 might coincide with that of operands[0] (which is why we made
1253 it TImode). Pick the other one to use as our scratch. */
1254 if (rtx_equal_p (temp, op0))
1256 gcc_assert (ti_temp);
1257 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1260 temp2 = temp; /* op0 is _not_ allowed, see above. */
1267 temp1 = gen_reg_rtx (DImode);
1268 temp2 = gen_reg_rtx (DImode);
1269 temp3 = gen_reg_rtx (DImode);
1270 temp4 = gen_reg_rtx (DImode);
1271 temp5 = gen_reg_rtx (DImode);
1274 emit_insn (gen_sethh (temp1, op1));
1275 emit_insn (gen_setlm (temp2, op1));
1276 emit_insn (gen_sethm (temp3, temp1, op1));
1277 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1278 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1279 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1280 gen_rtx_PLUS (DImode, temp4, temp2)));
1281 emit_insn (gen_setlo (op0, temp5, op1));
1285 /* Old old old backwards compatibility kruft here.
1286 Essentially it is MEDLOW with a fixed 64-bit
1287 virtual base added to all data segment addresses.
1288 Text-segment stuff is computed like MEDANY, we can't
1289 reuse the code above because the relocation knobs
1292 Data segment: sethi %hi(symbol), %temp1
1293 add %temp1, EMBMEDANY_BASE_REG, %temp2
1294 or %temp2, %lo(symbol), %reg */
1295 if (data_segment_operand (op1, GET_MODE (op1)))
1299 temp1 = temp; /* op0 is allowed. */
1304 temp1 = gen_reg_rtx (DImode);
1305 temp2 = gen_reg_rtx (DImode);
1308 emit_insn (gen_embmedany_sethi (temp1, op1));
1309 emit_insn (gen_embmedany_brsum (temp2, temp1));
1310 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1313 /* Text segment: sethi %uhi(symbol), %temp1
1314 sethi %hi(symbol), %temp2
1315 or %temp1, %ulo(symbol), %temp3
1316 sllx %temp3, 32, %temp4
1317 or %temp4, %temp2, %temp5
1318 or %temp5, %lo(symbol), %reg */
1323 /* It is possible that one of the registers we got for operands[2]
1324 might coincide with that of operands[0] (which is why we made
1325 it TImode). Pick the other one to use as our scratch. */
1326 if (rtx_equal_p (temp, op0))
1328 gcc_assert (ti_temp);
1329 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1332 temp2 = temp; /* op0 is _not_ allowed, see above. */
1339 temp1 = gen_reg_rtx (DImode);
1340 temp2 = gen_reg_rtx (DImode);
1341 temp3 = gen_reg_rtx (DImode);
1342 temp4 = gen_reg_rtx (DImode);
1343 temp5 = gen_reg_rtx (DImode);
1346 emit_insn (gen_embmedany_textuhi (temp1, op1));
1347 emit_insn (gen_embmedany_texthi (temp2, op1));
1348 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1349 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1350 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1351 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1352 gen_rtx_PLUS (DImode, temp4, temp2)));
1353 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1362 #if HOST_BITS_PER_WIDE_INT == 32
1364 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1369 /* These avoid problems when cross compiling. If we do not
1370 go through all this hair then the optimizer will see
1371 invalid REG_EQUAL notes or in some cases none at all. */
1372 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1373 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1374 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1375 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1377 /* The optimizer is not to assume anything about exactly
1378 which bits are set for a HIGH, they are unspecified.
1379 Unfortunately this leads to many missed optimizations
1380 during CSE. We mask out the non-HIGH bits, and matches
1381 a plain movdi, to alleviate this problem. */
1383 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1385 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1389 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1391 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1395 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1397 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1401 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1403 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1406 /* Worker routines for 64-bit constant formation on arch64.
1407 One of the key things to be doing in these emissions is
1408 to create as many temp REGs as possible. This makes it
1409 possible for half-built constants to be used later when
1410 such values are similar to something required later on.
1411 Without doing this, the optimizer cannot see such
1414 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1415 unsigned HOST_WIDE_INT, int);
1418 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1419 unsigned HOST_WIDE_INT low_bits, int is_neg)
1421 unsigned HOST_WIDE_INT high_bits;
1424 high_bits = (~low_bits) & 0xffffffff;
1426 high_bits = low_bits;
1428 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1431 emit_insn (gen_rtx_SET (VOIDmode, op0,
1432 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1436 /* If we are XOR'ing with -1, then we should emit a one's complement
1437 instead. This way the combiner will notice logical operations
1438 such as ANDN later on and substitute. */
1439 if ((low_bits & 0x3ff) == 0x3ff)
1441 emit_insn (gen_rtx_SET (VOIDmode, op0,
1442 gen_rtx_NOT (DImode, temp)));
1446 emit_insn (gen_rtx_SET (VOIDmode, op0,
1447 gen_safe_XOR64 (temp,
1448 (-(HOST_WIDE_INT)0x400
1449 | (low_bits & 0x3ff)))));
1454 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1455 unsigned HOST_WIDE_INT, int);
1458 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1459 unsigned HOST_WIDE_INT high_bits,
1460 unsigned HOST_WIDE_INT low_immediate,
1465 if ((high_bits & 0xfffffc00) != 0)
1467 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1468 if ((high_bits & ~0xfffffc00) != 0)
1469 emit_insn (gen_rtx_SET (VOIDmode, op0,
1470 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1476 emit_insn (gen_safe_SET64 (temp, high_bits));
1480 /* Now shift it up into place. */
1481 emit_insn (gen_rtx_SET (VOIDmode, op0,
1482 gen_rtx_ASHIFT (DImode, temp2,
1483 GEN_INT (shift_count))));
1485 /* If there is a low immediate part piece, finish up by
1486 putting that in as well. */
1487 if (low_immediate != 0)
1488 emit_insn (gen_rtx_SET (VOIDmode, op0,
1489 gen_safe_OR64 (op0, low_immediate)));
1492 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1493 unsigned HOST_WIDE_INT);
1495 /* Full 64-bit constant decomposition. Even though this is the
1496 'worst' case, we still optimize a few things away. */
1498 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1499 unsigned HOST_WIDE_INT high_bits,
1500 unsigned HOST_WIDE_INT low_bits)
1504 if (reload_in_progress || reload_completed)
1507 sub_temp = gen_reg_rtx (DImode);
1509 if ((high_bits & 0xfffffc00) != 0)
1511 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1512 if ((high_bits & ~0xfffffc00) != 0)
1513 emit_insn (gen_rtx_SET (VOIDmode,
1515 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1521 emit_insn (gen_safe_SET64 (temp, high_bits));
1525 if (!reload_in_progress && !reload_completed)
1527 rtx temp2 = gen_reg_rtx (DImode);
1528 rtx temp3 = gen_reg_rtx (DImode);
1529 rtx temp4 = gen_reg_rtx (DImode);
1531 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1532 gen_rtx_ASHIFT (DImode, sub_temp,
1535 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1536 if ((low_bits & ~0xfffffc00) != 0)
1538 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1539 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1540 emit_insn (gen_rtx_SET (VOIDmode, op0,
1541 gen_rtx_PLUS (DImode, temp4, temp3)));
1545 emit_insn (gen_rtx_SET (VOIDmode, op0,
1546 gen_rtx_PLUS (DImode, temp4, temp2)));
1551 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1552 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1553 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1556 /* We are in the middle of reload, so this is really
1557 painful. However we do still make an attempt to
1558 avoid emitting truly stupid code. */
1559 if (low1 != const0_rtx)
1561 emit_insn (gen_rtx_SET (VOIDmode, op0,
1562 gen_rtx_ASHIFT (DImode, sub_temp,
1563 GEN_INT (to_shift))));
1564 emit_insn (gen_rtx_SET (VOIDmode, op0,
1565 gen_rtx_IOR (DImode, op0, low1)));
1573 if (low2 != const0_rtx)
1575 emit_insn (gen_rtx_SET (VOIDmode, op0,
1576 gen_rtx_ASHIFT (DImode, sub_temp,
1577 GEN_INT (to_shift))));
1578 emit_insn (gen_rtx_SET (VOIDmode, op0,
1579 gen_rtx_IOR (DImode, op0, low2)));
1587 emit_insn (gen_rtx_SET (VOIDmode, op0,
1588 gen_rtx_ASHIFT (DImode, sub_temp,
1589 GEN_INT (to_shift))));
1590 if (low3 != const0_rtx)
1591 emit_insn (gen_rtx_SET (VOIDmode, op0,
1592 gen_rtx_IOR (DImode, op0, low3)));
1597 /* Analyze a 64-bit constant for certain properties. */
1598 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1599 unsigned HOST_WIDE_INT,
1600 int *, int *, int *);
1603 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1604 unsigned HOST_WIDE_INT low_bits,
1605 int *hbsp, int *lbsp, int *abbasp)
1607 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1610 lowest_bit_set = highest_bit_set = -1;
1614 if ((lowest_bit_set == -1)
1615 && ((low_bits >> i) & 1))
1617 if ((highest_bit_set == -1)
1618 && ((high_bits >> (32 - i - 1)) & 1))
1619 highest_bit_set = (64 - i - 1);
1622 && ((highest_bit_set == -1)
1623 || (lowest_bit_set == -1)));
1629 if ((lowest_bit_set == -1)
1630 && ((high_bits >> i) & 1))
1631 lowest_bit_set = i + 32;
1632 if ((highest_bit_set == -1)
1633 && ((low_bits >> (32 - i - 1)) & 1))
1634 highest_bit_set = 32 - i - 1;
1637 && ((highest_bit_set == -1)
1638 || (lowest_bit_set == -1)));
1640 /* If there are no bits set this should have gone out
1641 as one instruction! */
1642 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1643 all_bits_between_are_set = 1;
1644 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1648 if ((low_bits & (1 << i)) != 0)
1653 if ((high_bits & (1 << (i - 32))) != 0)
1656 all_bits_between_are_set = 0;
1659 *hbsp = highest_bit_set;
1660 *lbsp = lowest_bit_set;
1661 *abbasp = all_bits_between_are_set;
1664 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1667 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1668 unsigned HOST_WIDE_INT low_bits)
1670 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1673 || high_bits == 0xffffffff)
1676 analyze_64bit_constant (high_bits, low_bits,
1677 &highest_bit_set, &lowest_bit_set,
1678 &all_bits_between_are_set);
1680 if ((highest_bit_set == 63
1681 || lowest_bit_set == 0)
1682 && all_bits_between_are_set != 0)
1685 if ((highest_bit_set - lowest_bit_set) < 21)
1691 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1692 unsigned HOST_WIDE_INT,
1695 static unsigned HOST_WIDE_INT
1696 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1697 unsigned HOST_WIDE_INT low_bits,
1698 int lowest_bit_set, int shift)
1700 HOST_WIDE_INT hi, lo;
1702 if (lowest_bit_set < 32)
1704 lo = (low_bits >> lowest_bit_set) << shift;
1705 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1710 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1712 gcc_assert (! (hi & lo));
1716 /* Here we are sure to be arch64 and this is an integer constant
1717 being loaded into a register. Emit the most efficient
1718 insn sequence possible. Detection of all the 1-insn cases
1719 has been done already. */
1721 sparc_emit_set_const64 (rtx op0, rtx op1)
1723 unsigned HOST_WIDE_INT high_bits, low_bits;
1724 int lowest_bit_set, highest_bit_set;
1725 int all_bits_between_are_set;
1728 /* Sanity check that we know what we are working with. */
1729 gcc_assert (TARGET_ARCH64
1730 && (GET_CODE (op0) == SUBREG
1731 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1733 if (reload_in_progress || reload_completed)
1736 if (GET_CODE (op1) != CONST_INT)
1738 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1743 temp = gen_reg_rtx (DImode);
1745 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1746 low_bits = (INTVAL (op1) & 0xffffffff);
1748 /* low_bits bits 0 --> 31
1749 high_bits bits 32 --> 63 */
1751 analyze_64bit_constant (high_bits, low_bits,
1752 &highest_bit_set, &lowest_bit_set,
1753 &all_bits_between_are_set);
1755 /* First try for a 2-insn sequence. */
1757 /* These situations are preferred because the optimizer can
1758 * do more things with them:
1760 * sllx %reg, shift, %reg
1762 * srlx %reg, shift, %reg
1763 * 3) mov some_small_const, %reg
1764 * sllx %reg, shift, %reg
1766 if (((highest_bit_set == 63
1767 || lowest_bit_set == 0)
1768 && all_bits_between_are_set != 0)
1769 || ((highest_bit_set - lowest_bit_set) < 12))
1771 HOST_WIDE_INT the_const = -1;
1772 int shift = lowest_bit_set;
1774 if ((highest_bit_set != 63
1775 && lowest_bit_set != 0)
1776 || all_bits_between_are_set == 0)
1779 create_simple_focus_bits (high_bits, low_bits,
1782 else if (lowest_bit_set == 0)
1783 shift = -(63 - highest_bit_set);
1785 gcc_assert (SPARC_SIMM13_P (the_const));
1786 gcc_assert (shift != 0);
1788 emit_insn (gen_safe_SET64 (temp, the_const));
1790 emit_insn (gen_rtx_SET (VOIDmode,
1792 gen_rtx_ASHIFT (DImode,
1796 emit_insn (gen_rtx_SET (VOIDmode,
1798 gen_rtx_LSHIFTRT (DImode,
1800 GEN_INT (-shift))));
1804 /* Now a range of 22 or less bits set somewhere.
1805 * 1) sethi %hi(focus_bits), %reg
1806 * sllx %reg, shift, %reg
1807 * 2) sethi %hi(focus_bits), %reg
1808 * srlx %reg, shift, %reg
1810 if ((highest_bit_set - lowest_bit_set) < 21)
1812 unsigned HOST_WIDE_INT focus_bits =
1813 create_simple_focus_bits (high_bits, low_bits,
1814 lowest_bit_set, 10);
1816 gcc_assert (SPARC_SETHI_P (focus_bits));
1817 gcc_assert (lowest_bit_set != 10);
1819 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1821 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1822 if (lowest_bit_set < 10)
1823 emit_insn (gen_rtx_SET (VOIDmode,
1825 gen_rtx_LSHIFTRT (DImode, temp,
1826 GEN_INT (10 - lowest_bit_set))));
1827 else if (lowest_bit_set > 10)
1828 emit_insn (gen_rtx_SET (VOIDmode,
1830 gen_rtx_ASHIFT (DImode, temp,
1831 GEN_INT (lowest_bit_set - 10))));
1835 /* 1) sethi %hi(low_bits), %reg
1836 * or %reg, %lo(low_bits), %reg
1837 * 2) sethi %hi(~low_bits), %reg
1838 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1841 || high_bits == 0xffffffff)
1843 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1844 (high_bits == 0xffffffff));
1848 /* Now, try 3-insn sequences. */
1850 /* 1) sethi %hi(high_bits), %reg
1851 * or %reg, %lo(high_bits), %reg
1852 * sllx %reg, 32, %reg
1856 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1860 /* We may be able to do something quick
1861 when the constant is negated, so try that. */
1862 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1863 (~low_bits) & 0xfffffc00))
1865 /* NOTE: The trailing bits get XOR'd so we need the
1866 non-negated bits, not the negated ones. */
1867 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1869 if ((((~high_bits) & 0xffffffff) == 0
1870 && ((~low_bits) & 0x80000000) == 0)
1871 || (((~high_bits) & 0xffffffff) == 0xffffffff
1872 && ((~low_bits) & 0x80000000) != 0))
1874 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1876 if ((SPARC_SETHI_P (fast_int)
1877 && (~high_bits & 0xffffffff) == 0)
1878 || SPARC_SIMM13_P (fast_int))
1879 emit_insn (gen_safe_SET64 (temp, fast_int));
1881 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1886 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1887 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1888 sparc_emit_set_const64 (temp, negated_const);
1891 /* If we are XOR'ing with -1, then we should emit a one's complement
1892 instead. This way the combiner will notice logical operations
1893 such as ANDN later on and substitute. */
1894 if (trailing_bits == 0x3ff)
1896 emit_insn (gen_rtx_SET (VOIDmode, op0,
1897 gen_rtx_NOT (DImode, temp)));
1901 emit_insn (gen_rtx_SET (VOIDmode,
1903 gen_safe_XOR64 (temp,
1904 (-0x400 | trailing_bits))));
1909 /* 1) sethi %hi(xxx), %reg
1910 * or %reg, %lo(xxx), %reg
1911 * sllx %reg, yyy, %reg
1913 * ??? This is just a generalized version of the low_bits==0
1914 * thing above, FIXME...
1916 if ((highest_bit_set - lowest_bit_set) < 32)
1918 unsigned HOST_WIDE_INT focus_bits =
1919 create_simple_focus_bits (high_bits, low_bits,
1922 /* We can't get here in this state. */
1923 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1925 /* So what we know is that the set bits straddle the
1926 middle of the 64-bit word. */
1927 sparc_emit_set_const64_quick2 (op0, temp,
1933 /* 1) sethi %hi(high_bits), %reg
1934 * or %reg, %lo(high_bits), %reg
1935 * sllx %reg, 32, %reg
1936 * or %reg, low_bits, %reg
1938 if (SPARC_SIMM13_P(low_bits)
1939 && ((int)low_bits > 0))
1941 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1945 /* The easiest way when all else fails, is full decomposition. */
1947 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1948 high_bits, low_bits, ~high_bits, ~low_bits);
1950 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1952 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1954 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1955 return the mode to be used for the comparison. For floating-point,
1956 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1957 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1958 processing is needed. */
1961 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1963 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1989 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1990 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1992 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1993 return CCX_NOOVmode;
1999 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2006 /* Emit the compare insn and return the CC reg for a CODE comparison
2007 with operands X and Y. */
2010 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2012 enum machine_mode mode;
2015 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2018 mode = SELECT_CC_MODE (code, x, y);
2020 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2021 fcc regs (cse can't tell they're really call clobbered regs and will
2022 remove a duplicate comparison even if there is an intervening function
2023 call - it will then try to reload the cc reg via an int reg which is why
2024 we need the movcc patterns). It is possible to provide the movcc
2025 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2026 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2027 to tell cse that CCFPE mode registers (even pseudos) are call
2030 /* ??? This is an experiment. Rather than making changes to cse which may
2031 or may not be easy/clean, we do our own cse. This is possible because
2032 we will generate hard registers. Cse knows they're call clobbered (it
2033 doesn't know the same thing about pseudos). If we guess wrong, no big
2034 deal, but if we win, great! */
2036 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2037 #if 1 /* experiment */
2040 /* We cycle through the registers to ensure they're all exercised. */
2041 static int next_fcc_reg = 0;
2042 /* Previous x,y for each fcc reg. */
2043 static rtx prev_args[4][2];
2045 /* Scan prev_args for x,y. */
2046 for (reg = 0; reg < 4; reg++)
2047 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2052 prev_args[reg][0] = x;
2053 prev_args[reg][1] = y;
2054 next_fcc_reg = (next_fcc_reg + 1) & 3;
2056 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2059 cc_reg = gen_reg_rtx (mode);
2060 #endif /* ! experiment */
2061 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2062 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2064 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2066 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2067 will only result in an unrecognizable insn so no point in asserting. */
2068 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2074 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2077 gen_compare_reg (rtx cmp)
2079 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2082 /* This function is used for v9 only.
2083 DEST is the target of the Scc insn.
2084 CODE is the code for an Scc's comparison.
2085 X and Y are the values we compare.
2087 This function is needed to turn
2090 (gt (reg:CCX 100 %icc)
2094 (gt:DI (reg:CCX 100 %icc)
2097 IE: The instruction recognizer needs to see the mode of the comparison to
2098 find the right instruction. We could use "gt:DI" right in the
2099 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2102 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2105 && (GET_MODE (x) == DImode
2106 || GET_MODE (dest) == DImode))
2109 /* Try to use the movrCC insns. */
2111 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2113 && v9_regcmp_p (compare_code))
2118 /* Special case for op0 != 0. This can be done with one instruction if
2121 if (compare_code == NE
2122 && GET_MODE (dest) == DImode
2123 && rtx_equal_p (op0, dest))
2125 emit_insn (gen_rtx_SET (VOIDmode, dest,
2126 gen_rtx_IF_THEN_ELSE (DImode,
2127 gen_rtx_fmt_ee (compare_code, DImode,
2134 if (reg_overlap_mentioned_p (dest, op0))
2136 /* Handle the case where dest == x.
2137 We "early clobber" the result. */
2138 op0 = gen_reg_rtx (GET_MODE (x));
2139 emit_move_insn (op0, x);
2142 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2143 if (GET_MODE (op0) != DImode)
2145 temp = gen_reg_rtx (DImode);
2146 convert_move (temp, op0, 0);
2150 emit_insn (gen_rtx_SET (VOIDmode, dest,
2151 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2152 gen_rtx_fmt_ee (compare_code, DImode,
2160 x = gen_compare_reg_1 (compare_code, x, y);
2163 gcc_assert (GET_MODE (x) != CC_NOOVmode
2164 && GET_MODE (x) != CCX_NOOVmode);
2166 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2167 emit_insn (gen_rtx_SET (VOIDmode, dest,
2168 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2169 gen_rtx_fmt_ee (compare_code,
2170 GET_MODE (x), x, y),
2171 const1_rtx, dest)));
2177 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2178 without jumps using the addx/subx instructions. */
2181 emit_scc_insn (rtx operands[])
2188 /* The quad-word fp compare library routines all return nonzero to indicate
2189 true, which is different from the equivalent libgcc routines, so we must
2190 handle them specially here. */
2191 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2193 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2194 GET_CODE (operands[1]));
2195 operands[2] = XEXP (operands[1], 0);
2196 operands[3] = XEXP (operands[1], 1);
2199 code = GET_CODE (operands[1]);
2203 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2204 more applications). The exception to this is "reg != 0" which can
2205 be done in one instruction on v9 (so we do it). */
2208 if (GET_MODE (x) == SImode)
2210 rtx pat = gen_seqsi_special (operands[0], x, y);
2214 else if (GET_MODE (x) == DImode)
2216 rtx pat = gen_seqdi_special (operands[0], x, y);
2224 if (GET_MODE (x) == SImode)
2226 rtx pat = gen_snesi_special (operands[0], x, y);
2230 else if (GET_MODE (x) == DImode)
2232 rtx pat = gen_snedi_special (operands[0], x, y);
2238 /* For the rest, on v9 we can use conditional moves. */
2242 if (gen_v9_scc (operands[0], code, x, y))
2246 /* We can do LTU and GEU using the addx/subx instructions too. And
2247 for GTU/LEU, if both operands are registers swap them and fall
2248 back to the easy case. */
2249 if (code == GTU || code == LEU)
2251 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2252 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2257 code = swap_condition (code);
2261 if (code == LTU || code == GEU)
2263 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2264 gen_rtx_fmt_ee (code, SImode,
2265 gen_compare_reg_1 (code, x, y),
2270 /* Nope, do branches. */
2274 /* Emit a conditional jump insn for the v9 architecture using comparison code
2275 CODE and jump target LABEL.
2276 This function exists to take advantage of the v9 brxx insns. */
2279 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2281 emit_jump_insn (gen_rtx_SET (VOIDmode,
2283 gen_rtx_IF_THEN_ELSE (VOIDmode,
2284 gen_rtx_fmt_ee (code, GET_MODE (op0),
2286 gen_rtx_LABEL_REF (VOIDmode, label),
2291 emit_conditional_branch_insn (rtx operands[])
2293 /* The quad-word fp compare library routines all return nonzero to indicate
2294 true, which is different from the equivalent libgcc routines, so we must
2295 handle them specially here. */
2296 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2298 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2299 GET_CODE (operands[0]));
2300 operands[1] = XEXP (operands[0], 0);
2301 operands[2] = XEXP (operands[0], 1);
2304 if (TARGET_ARCH64 && operands[2] == const0_rtx
2305 && GET_CODE (operands[1]) == REG
2306 && GET_MODE (operands[1]) == DImode)
2308 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2312 operands[1] = gen_compare_reg (operands[0]);
2313 operands[2] = const0_rtx;
2314 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2315 operands[1], operands[2]);
2316 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2321 /* Generate a DFmode part of a hard TFmode register.
2322 REG is the TFmode hard register, LOW is 1 for the
2323 low 64bit of the register and 0 otherwise.
2326 gen_df_reg (rtx reg, int low)
2328 int regno = REGNO (reg);
2330 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2331 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2332 return gen_rtx_REG (DFmode, regno);
2335 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2336 Unlike normal calls, TFmode operands are passed by reference. It is
2337 assumed that no more than 3 operands are required. */
2340 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2342 rtx ret_slot = NULL, arg[3], func_sym;
2345 /* We only expect to be called for conversions, unary, and binary ops. */
2346 gcc_assert (nargs == 2 || nargs == 3);
2348 for (i = 0; i < nargs; ++i)
2350 rtx this_arg = operands[i];
2353 /* TFmode arguments and return values are passed by reference. */
2354 if (GET_MODE (this_arg) == TFmode)
2356 int force_stack_temp;
2358 force_stack_temp = 0;
2359 if (TARGET_BUGGY_QP_LIB && i == 0)
2360 force_stack_temp = 1;
2362 if (GET_CODE (this_arg) == MEM
2363 && ! force_stack_temp)
2364 this_arg = XEXP (this_arg, 0);
2365 else if (CONSTANT_P (this_arg)
2366 && ! force_stack_temp)
2368 this_slot = force_const_mem (TFmode, this_arg);
2369 this_arg = XEXP (this_slot, 0);
2373 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2375 /* Operand 0 is the return value. We'll copy it out later. */
2377 emit_move_insn (this_slot, this_arg);
2379 ret_slot = this_slot;
2381 this_arg = XEXP (this_slot, 0);
2388 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2390 if (GET_MODE (operands[0]) == TFmode)
2393 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2394 arg[0], GET_MODE (arg[0]),
2395 arg[1], GET_MODE (arg[1]));
2397 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2398 arg[0], GET_MODE (arg[0]),
2399 arg[1], GET_MODE (arg[1]),
2400 arg[2], GET_MODE (arg[2]));
2403 emit_move_insn (operands[0], ret_slot);
2409 gcc_assert (nargs == 2);
2411 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2412 GET_MODE (operands[0]), 1,
2413 arg[1], GET_MODE (arg[1]));
2415 if (ret != operands[0])
2416 emit_move_insn (operands[0], ret);
2420 /* Expand soft-float TFmode calls to sparc abi routines. */
2423 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2445 emit_soft_tfmode_libcall (func, 3, operands);
2449 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2453 gcc_assert (code == SQRT);
2456 emit_soft_tfmode_libcall (func, 2, operands);
2460 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2467 switch (GET_MODE (operands[1]))
2480 case FLOAT_TRUNCATE:
2481 switch (GET_MODE (operands[0]))
2495 switch (GET_MODE (operands[1]))
2500 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2510 case UNSIGNED_FLOAT:
2511 switch (GET_MODE (operands[1]))
2516 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2527 switch (GET_MODE (operands[0]))
2541 switch (GET_MODE (operands[0]))
2558 emit_soft_tfmode_libcall (func, 2, operands);
2561 /* Expand a hard-float tfmode operation. All arguments must be in
2565 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2569 if (GET_RTX_CLASS (code) == RTX_UNARY)
2571 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2572 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2576 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2577 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2578 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2579 operands[1], operands[2]);
2582 if (register_operand (operands[0], VOIDmode))
2585 dest = gen_reg_rtx (GET_MODE (operands[0]));
2587 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2589 if (dest != operands[0])
2590 emit_move_insn (operands[0], dest);
2594 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2596 if (TARGET_HARD_QUAD)
2597 emit_hard_tfmode_operation (code, operands);
2599 emit_soft_tfmode_binop (code, operands);
2603 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2605 if (TARGET_HARD_QUAD)
2606 emit_hard_tfmode_operation (code, operands);
2608 emit_soft_tfmode_unop (code, operands);
2612 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2614 if (TARGET_HARD_QUAD)
2615 emit_hard_tfmode_operation (code, operands);
2617 emit_soft_tfmode_cvt (code, operands);
2620 /* Return nonzero if a branch/jump/call instruction will be emitting
2621 nop into its delay slot. */
2624 empty_delay_slot (rtx insn)
2628 /* If no previous instruction (should not happen), return true. */
2629 if (PREV_INSN (insn) == NULL)
2632 seq = NEXT_INSN (PREV_INSN (insn));
2633 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2639 /* Return nonzero if TRIAL can go into the call delay slot. */
2642 tls_call_delay (rtx trial)
2647 call __tls_get_addr, %tgd_call (foo)
2648 add %l7, %o0, %o0, %tgd_add (foo)
2649 while Sun as/ld does not. */
2650 if (TARGET_GNU_TLS || !TARGET_TLS)
2653 pat = PATTERN (trial);
2655 /* We must reject tgd_add{32|64}, i.e.
2656 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2657 and tldm_add{32|64}, i.e.
2658 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2660 if (GET_CODE (pat) == SET
2661 && GET_CODE (SET_SRC (pat)) == PLUS)
2663 rtx unspec = XEXP (SET_SRC (pat), 1);
2665 if (GET_CODE (unspec) == UNSPEC
2666 && (XINT (unspec, 1) == UNSPEC_TLSGD
2667 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2674 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2675 instruction. RETURN_P is true if the v9 variant 'return' is to be
2676 considered in the test too.
2678 TRIAL must be a SET whose destination is a REG appropriate for the
2679 'restore' instruction or, if RETURN_P is true, for the 'return'
2683 eligible_for_restore_insn (rtx trial, bool return_p)
2685 rtx pat = PATTERN (trial);
2686 rtx src = SET_SRC (pat);
2688 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2689 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2690 && arith_operand (src, GET_MODE (src)))
2693 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2695 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2698 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2699 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2700 && arith_double_operand (src, GET_MODE (src)))
2701 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2703 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2704 else if (! TARGET_FPU && register_operand (src, SFmode))
2707 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2708 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2711 /* If we have the 'return' instruction, anything that does not use
2712 local or output registers and can go into a delay slot wins. */
2713 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2714 && (get_attr_in_uncond_branch_delay (trial)
2715 == IN_UNCOND_BRANCH_DELAY_TRUE))
2718 /* The 'restore src1,src2,dest' pattern for SImode. */
2719 else if (GET_CODE (src) == PLUS
2720 && register_operand (XEXP (src, 0), SImode)
2721 && arith_operand (XEXP (src, 1), SImode))
2724 /* The 'restore src1,src2,dest' pattern for DImode. */
2725 else if (GET_CODE (src) == PLUS
2726 && register_operand (XEXP (src, 0), DImode)
2727 && arith_double_operand (XEXP (src, 1), DImode))
2730 /* The 'restore src1,%lo(src2),dest' pattern. */
2731 else if (GET_CODE (src) == LO_SUM
2732 && ! TARGET_CM_MEDMID
2733 && ((register_operand (XEXP (src, 0), SImode)
2734 && immediate_operand (XEXP (src, 1), SImode))
2736 && register_operand (XEXP (src, 0), DImode)
2737 && immediate_operand (XEXP (src, 1), DImode))))
2740 /* The 'restore src,src,dest' pattern. */
2741 else if (GET_CODE (src) == ASHIFT
2742 && (register_operand (XEXP (src, 0), SImode)
2743 || register_operand (XEXP (src, 0), DImode))
2744 && XEXP (src, 1) == const1_rtx)
2750 /* Return nonzero if TRIAL can go into the function return's
2754 eligible_for_return_delay (rtx trial)
2758 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2761 if (get_attr_length (trial) != 1)
2764 /* If there are any call-saved registers, we should scan TRIAL if it
2765 does not reference them. For now just make it easy. */
2769 /* If the function uses __builtin_eh_return, the eh_return machinery
2770 occupies the delay slot. */
2771 if (crtl->calls_eh_return)
2774 /* In the case of a true leaf function, anything can go into the slot. */
2775 if (sparc_leaf_function_p)
2776 return get_attr_in_uncond_branch_delay (trial)
2777 == IN_UNCOND_BRANCH_DELAY_TRUE;
2779 pat = PATTERN (trial);
2781 /* Otherwise, only operations which can be done in tandem with
2782 a `restore' or `return' insn can go into the delay slot. */
2783 if (GET_CODE (SET_DEST (pat)) != REG
2784 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2787 /* If this instruction sets up floating point register and we have a return
2788 instruction, it can probably go in. But restore will not work
2790 if (REGNO (SET_DEST (pat)) >= 32)
2792 && ! epilogue_renumber (&pat, 1)
2793 && (get_attr_in_uncond_branch_delay (trial)
2794 == IN_UNCOND_BRANCH_DELAY_TRUE));
2796 return eligible_for_restore_insn (trial, true);
2799 /* Return nonzero if TRIAL can go into the sibling call's
2803 eligible_for_sibcall_delay (rtx trial)
2807 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2810 if (get_attr_length (trial) != 1)
2813 pat = PATTERN (trial);
2815 if (sparc_leaf_function_p)
2817 /* If the tail call is done using the call instruction,
2818 we have to restore %o7 in the delay slot. */
2819 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2822 /* %g1 is used to build the function address */
2823 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2829 /* Otherwise, only operations which can be done in tandem with
2830 a `restore' insn can go into the delay slot. */
2831 if (GET_CODE (SET_DEST (pat)) != REG
2832 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2833 || REGNO (SET_DEST (pat)) >= 32)
2836 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2838 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2841 return eligible_for_restore_insn (trial, false);
2845 short_branch (int uid1, int uid2)
2847 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2849 /* Leave a few words of "slop". */
2850 if (delta >= -1023 && delta <= 1022)
2856 /* Return nonzero if REG is not used after INSN.
2857 We assume REG is a reload reg, and therefore does
2858 not live past labels or calls or jumps. */
2860 reg_unused_after (rtx reg, rtx insn)
2862 enum rtx_code code, prev_code = UNKNOWN;
2864 while ((insn = NEXT_INSN (insn)))
2866 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2869 code = GET_CODE (insn);
2870 if (GET_CODE (insn) == CODE_LABEL)
2875 rtx set = single_set (insn);
2876 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2879 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2881 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2889 /* Determine if it's legal to put X into the constant pool. This
2890 is not possible if X contains the address of a symbol that is
2891 not constant (TLS) or not known at final link time (PIC). */
2894 sparc_cannot_force_const_mem (rtx x)
2896 switch (GET_CODE (x))
2901 /* Accept all non-symbolic constants. */
2905 /* Labels are OK iff we are non-PIC. */
2906 return flag_pic != 0;
2909 /* 'Naked' TLS symbol references are never OK,
2910 non-TLS symbols are OK iff we are non-PIC. */
2911 if (SYMBOL_REF_TLS_MODEL (x))
2914 return flag_pic != 0;
2917 return sparc_cannot_force_const_mem (XEXP (x, 0));
2920 return sparc_cannot_force_const_mem (XEXP (x, 0))
2921 || sparc_cannot_force_const_mem (XEXP (x, 1));
2930 static GTY(()) char pic_helper_symbol_name[256];
2931 static GTY(()) rtx pic_helper_symbol;
2932 static GTY(()) bool pic_helper_emitted_p = false;
2933 static GTY(()) rtx global_offset_table;
2935 /* Ensure that we are not using patterns that are not OK with PIC. */
2943 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2944 && (GET_CODE (recog_data.operand[i]) != CONST
2945 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2946 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2947 == global_offset_table)
2948 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2956 /* Return true if X is an address which needs a temporary register when
2957 reloaded while generating PIC code. */
2960 pic_address_needs_scratch (rtx x)
2962 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2963 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2964 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2965 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2966 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2972 /* Determine if a given RTX is a valid constant. We already know this
2973 satisfies CONSTANT_P. */
2976 legitimate_constant_p (rtx x)
2980 switch (GET_CODE (x))
2983 /* TLS symbols are not constant. */
2984 if (SYMBOL_REF_TLS_MODEL (x))
2989 inner = XEXP (x, 0);
2991 /* Offsets of TLS symbols are never valid.
2992 Discourage CSE from creating them. */
2993 if (GET_CODE (inner) == PLUS
2994 && SPARC_SYMBOL_REF_TLS_P (XEXP (inner, 0)))
2999 if (GET_MODE (x) == VOIDmode)
3002 /* Floating point constants are generally not ok.
3003 The only exception is 0.0 in VIS. */
3005 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
3006 && const_zero_operand (x, GET_MODE (x)))
3012 /* Vector constants are generally not ok.
3013 The only exception is 0 in VIS. */
3015 && const_zero_operand (x, GET_MODE (x)))
3027 /* Determine if a given RTX is a valid constant address. */
3030 constant_address_p (rtx x)
3032 switch (GET_CODE (x))
3040 if (flag_pic && pic_address_needs_scratch (x))
3042 return legitimate_constant_p (x);
3045 return !flag_pic && legitimate_constant_p (x);
3052 /* Nonzero if the constant value X is a legitimate general operand
3053 when generating PIC code. It is given that flag_pic is on and
3054 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3057 legitimate_pic_operand_p (rtx x)
3059 if (pic_address_needs_scratch (x))
3061 if (SPARC_SYMBOL_REF_TLS_P (x)
3062 || (GET_CODE (x) == CONST
3063 && GET_CODE (XEXP (x, 0)) == PLUS
3064 && SPARC_SYMBOL_REF_TLS_P (XEXP (XEXP (x, 0), 0))))
3069 /* Return nonzero if ADDR is a valid memory address.
3070 STRICT specifies whether strict register checking applies. */
3073 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3075 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3077 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3079 else if (GET_CODE (addr) == PLUS)
3081 rs1 = XEXP (addr, 0);
3082 rs2 = XEXP (addr, 1);
3084 /* Canonicalize. REG comes first, if there are no regs,
3085 LO_SUM comes first. */
3087 && GET_CODE (rs1) != SUBREG
3089 || GET_CODE (rs2) == SUBREG
3090 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3092 rs1 = XEXP (addr, 1);
3093 rs2 = XEXP (addr, 0);
3097 && rs1 == pic_offset_table_rtx
3099 && GET_CODE (rs2) != SUBREG
3100 && GET_CODE (rs2) != LO_SUM
3101 && GET_CODE (rs2) != MEM
3102 && ! SPARC_SYMBOL_REF_TLS_P (rs2)
3103 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3104 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3106 || GET_CODE (rs1) == SUBREG)
3107 && RTX_OK_FOR_OFFSET_P (rs2)))
3112 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3113 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3115 /* We prohibit REG + REG for TFmode when there are no quad move insns
3116 and we consequently need to split. We do this because REG+REG
3117 is not an offsettable address. If we get the situation in reload
3118 where source and destination of a movtf pattern are both MEMs with
3119 REG+REG address, then only one of them gets converted to an
3120 offsettable address. */
3122 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3125 /* We prohibit REG + REG on ARCH32 if not optimizing for
3126 DFmode/DImode because then mem_min_alignment is likely to be zero
3127 after reload and the forced split would lack a matching splitter
3129 if (TARGET_ARCH32 && !optimize
3130 && (mode == DFmode || mode == DImode))
3133 else if (USE_AS_OFFSETABLE_LO10
3134 && GET_CODE (rs1) == LO_SUM
3136 && ! TARGET_CM_MEDMID
3137 && RTX_OK_FOR_OLO10_P (rs2))
3140 imm1 = XEXP (rs1, 1);
3141 rs1 = XEXP (rs1, 0);
3142 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3146 else if (GET_CODE (addr) == LO_SUM)
3148 rs1 = XEXP (addr, 0);
3149 imm1 = XEXP (addr, 1);
3151 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3154 /* We can't allow TFmode in 32-bit mode, because an offset greater
3155 than the alignment (8) may cause the LO_SUM to overflow. */
3156 if (mode == TFmode && TARGET_ARCH32)
3159 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3164 if (GET_CODE (rs1) == SUBREG)
3165 rs1 = SUBREG_REG (rs1);
3171 if (GET_CODE (rs2) == SUBREG)
3172 rs2 = SUBREG_REG (rs2);
3179 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3180 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3185 if ((REGNO (rs1) >= 32
3186 && REGNO (rs1) != FRAME_POINTER_REGNUM
3187 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3189 && (REGNO (rs2) >= 32
3190 && REGNO (rs2) != FRAME_POINTER_REGNUM
3191 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3197 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3199 static GTY(()) rtx sparc_tls_symbol;
3202 sparc_tls_get_addr (void)
3204 if (!sparc_tls_symbol)
3205 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3207 return sparc_tls_symbol;
3211 sparc_tls_got (void)
3216 crtl->uses_pic_offset_table = 1;
3217 return pic_offset_table_rtx;
3220 if (!global_offset_table)
3221 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3222 temp = gen_reg_rtx (Pmode);
3223 emit_move_insn (temp, global_offset_table);
3227 /* Return 1 if *X is a thread-local symbol. */
3230 sparc_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
3232 return SPARC_SYMBOL_REF_TLS_P (*x);
3235 /* Return 1 if X contains a thread-local symbol. */
3238 sparc_tls_referenced_p (rtx x)
3240 if (!TARGET_HAVE_TLS)
3243 return for_each_rtx (&x, &sparc_tls_symbol_ref_1, 0);
3246 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3247 this (thread-local) address. */
3250 legitimize_tls_address (rtx addr)
3252 rtx temp1, temp2, temp3, ret, o0, got, insn;
3254 gcc_assert (can_create_pseudo_p ());
3256 if (GET_CODE (addr) == SYMBOL_REF)
3257 switch (SYMBOL_REF_TLS_MODEL (addr))
3259 case TLS_MODEL_GLOBAL_DYNAMIC:
3261 temp1 = gen_reg_rtx (SImode);
3262 temp2 = gen_reg_rtx (SImode);
3263 ret = gen_reg_rtx (Pmode);
3264 o0 = gen_rtx_REG (Pmode, 8);
3265 got = sparc_tls_got ();
3266 emit_insn (gen_tgd_hi22 (temp1, addr));
3267 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3270 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3271 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3276 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3277 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3280 CALL_INSN_FUNCTION_USAGE (insn)
3281 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3282 CALL_INSN_FUNCTION_USAGE (insn));
3283 insn = get_insns ();
3285 emit_libcall_block (insn, ret, o0, addr);
3288 case TLS_MODEL_LOCAL_DYNAMIC:
3290 temp1 = gen_reg_rtx (SImode);
3291 temp2 = gen_reg_rtx (SImode);
3292 temp3 = gen_reg_rtx (Pmode);
3293 ret = gen_reg_rtx (Pmode);
3294 o0 = gen_rtx_REG (Pmode, 8);
3295 got = sparc_tls_got ();
3296 emit_insn (gen_tldm_hi22 (temp1));
3297 emit_insn (gen_tldm_lo10 (temp2, temp1));
3300 emit_insn (gen_tldm_add32 (o0, got, temp2));
3301 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3306 emit_insn (gen_tldm_add64 (o0, got, temp2));
3307 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3310 CALL_INSN_FUNCTION_USAGE (insn)
3311 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3312 CALL_INSN_FUNCTION_USAGE (insn));
3313 insn = get_insns ();
3315 emit_libcall_block (insn, temp3, o0,
3316 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3317 UNSPEC_TLSLD_BASE));
3318 temp1 = gen_reg_rtx (SImode);
3319 temp2 = gen_reg_rtx (SImode);
3320 emit_insn (gen_tldo_hix22 (temp1, addr));
3321 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3323 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3325 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3328 case TLS_MODEL_INITIAL_EXEC:
3329 temp1 = gen_reg_rtx (SImode);
3330 temp2 = gen_reg_rtx (SImode);
3331 temp3 = gen_reg_rtx (Pmode);
3332 got = sparc_tls_got ();
3333 emit_insn (gen_tie_hi22 (temp1, addr));
3334 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3336 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3338 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3341 ret = gen_reg_rtx (Pmode);
3343 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3346 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3350 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3353 case TLS_MODEL_LOCAL_EXEC:
3354 temp1 = gen_reg_rtx (Pmode);
3355 temp2 = gen_reg_rtx (Pmode);
3358 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3359 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3363 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3364 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3366 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3374 gcc_unreachable (); /* for now ... */
3380 /* Legitimize PIC addresses. If the address is already position-independent,
3381 we return ORIG. Newly generated position-independent addresses go into a
3382 reg. This is REG if nonzero, otherwise we allocate register(s) as
3386 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3389 if (GET_CODE (orig) == SYMBOL_REF
3390 /* See the comment in sparc_expand_move. */
3391 || (TARGET_VXWORKS_RTP && GET_CODE (orig) == LABEL_REF))
3393 rtx pic_ref, address;
3398 gcc_assert (! reload_in_progress && ! reload_completed);
3399 reg = gen_reg_rtx (Pmode);
3404 /* If not during reload, allocate another temp reg here for loading
3405 in the address, so that these instructions can be optimized
3407 rtx temp_reg = ((reload_in_progress || reload_completed)
3408 ? reg : gen_reg_rtx (Pmode));
3410 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3411 won't get confused into thinking that these two instructions
3412 are loading in the true address of the symbol. If in the
3413 future a PIC rtx exists, that should be used instead. */
3416 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3417 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3421 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3422 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3429 pic_ref = gen_const_mem (Pmode,
3430 gen_rtx_PLUS (Pmode,
3431 pic_offset_table_rtx, address));
3432 crtl->uses_pic_offset_table = 1;
3433 insn = emit_move_insn (reg, pic_ref);
3434 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3436 set_unique_reg_note (insn, REG_EQUAL, orig);
3439 else if (GET_CODE (orig) == CONST)
3443 if (GET_CODE (XEXP (orig, 0)) == PLUS
3444 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3449 gcc_assert (! reload_in_progress && ! reload_completed);
3450 reg = gen_reg_rtx (Pmode);
3453 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3454 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3455 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3456 base == reg ? 0 : reg);
3458 if (GET_CODE (offset) == CONST_INT)
3460 if (SMALL_INT (offset))
3461 return plus_constant (base, INTVAL (offset));
3462 else if (! reload_in_progress && ! reload_completed)
3463 offset = force_reg (Pmode, offset);
3465 /* If we reach here, then something is seriously wrong. */
3468 return gen_rtx_PLUS (Pmode, base, offset);
3470 else if (GET_CODE (orig) == LABEL_REF)
3471 /* ??? Why do we do this? */
3472 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3473 the register is live instead, in case it is eliminated. */
3474 crtl->uses_pic_offset_table = 1;
3479 /* Try machine-dependent ways of modifying an illegitimate address X
3480 to be legitimate. If we find one, return the new, valid address.
3482 OLDX is the address as it was before break_out_memory_refs was called.
3483 In some cases it is useful to look at this to decide what needs to be done.
3485 MODE is the mode of the operand pointed to by X.
3487 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3490 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3491 enum machine_mode mode)
3495 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3496 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3497 force_operand (XEXP (x, 0), NULL_RTX));
3498 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3499 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3500 force_operand (XEXP (x, 1), NULL_RTX));
3501 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3502 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3504 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3505 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3506 force_operand (XEXP (x, 1), NULL_RTX));
3508 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3511 if (SPARC_SYMBOL_REF_TLS_P (x))
3512 x = legitimize_tls_address (x);
3514 x = legitimize_pic_address (x, mode, 0);
3515 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3516 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3517 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3518 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3519 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3520 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3521 else if (GET_CODE (x) == SYMBOL_REF
3522 || GET_CODE (x) == CONST
3523 || GET_CODE (x) == LABEL_REF)
3524 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3528 /* Emit the special PIC helper function. */
3531 emit_pic_helper (void)
3533 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3536 switch_to_section (text_section);
3538 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3540 ASM_OUTPUT_ALIGN (asm_out_file, align);
3541 ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3542 if (flag_delayed_branch)
3543 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3544 pic_name, pic_name);
3546 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3547 pic_name, pic_name);
3549 pic_helper_emitted_p = true;
3552 /* Emit code to load the PIC register. */
3555 load_pic_register (bool delay_pic_helper)
3557 int orig_flag_pic = flag_pic;
3559 if (TARGET_VXWORKS_RTP)
3561 emit_insn (gen_vxworks_load_got ());
3562 emit_use (pic_offset_table_rtx);
3566 /* If we haven't initialized the special PIC symbols, do so now. */
3567 if (!pic_helper_symbol_name[0])
3569 ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3570 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3571 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3574 /* If we haven't emitted the special PIC helper function, do so now unless
3575 we are requested to delay it. */
3576 if (!delay_pic_helper && !pic_helper_emitted_p)
3581 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3582 pic_helper_symbol));
3584 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3585 pic_helper_symbol));
3586 flag_pic = orig_flag_pic;
3588 /* Need to emit this whether or not we obey regdecls,
3589 since setjmp/longjmp can cause life info to screw up.
3590 ??? In the case where we don't obey regdecls, this is not sufficient
3591 since we may not fall out the bottom. */
3592 emit_use (pic_offset_table_rtx);
3595 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3596 address of the call target. */
3599 sparc_emit_call_insn (rtx pat, rtx addr)
3603 insn = emit_call_insn (pat);
3605 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3606 if (TARGET_VXWORKS_RTP
3608 && GET_CODE (addr) == SYMBOL_REF
3609 && (SYMBOL_REF_DECL (addr)
3610 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3611 : !SYMBOL_REF_LOCAL_P (addr)))
3613 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3614 crtl->uses_pic_offset_table = 1;
3618 /* Return 1 if RTX is a MEM which is known to be aligned to at
3619 least a DESIRED byte boundary. */
3622 mem_min_alignment (rtx mem, int desired)
3624 rtx addr, base, offset;
3626 /* If it's not a MEM we can't accept it. */
3627 if (GET_CODE (mem) != MEM)
3631 if (!TARGET_UNALIGNED_DOUBLES
3632 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3635 /* ??? The rest of the function predates MEM_ALIGN so
3636 there is probably a bit of redundancy. */
3637 addr = XEXP (mem, 0);
3638 base = offset = NULL_RTX;
3639 if (GET_CODE (addr) == PLUS)
3641 if (GET_CODE (XEXP (addr, 0)) == REG)
3643 base = XEXP (addr, 0);
3645 /* What we are saying here is that if the base
3646 REG is aligned properly, the compiler will make
3647 sure any REG based index upon it will be so
3649 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3650 offset = XEXP (addr, 1);
3652 offset = const0_rtx;
3655 else if (GET_CODE (addr) == REG)
3658 offset = const0_rtx;
3661 if (base != NULL_RTX)
3663 int regno = REGNO (base);
3665 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3667 /* Check if the compiler has recorded some information
3668 about the alignment of the base REG. If reload has
3669 completed, we already matched with proper alignments.
3670 If not running global_alloc, reload might give us
3671 unaligned pointer to local stack though. */
3673 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3674 || (optimize && reload_completed))
3675 && (INTVAL (offset) & (desired - 1)) == 0)
3680 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3684 else if (! TARGET_UNALIGNED_DOUBLES
3685 || CONSTANT_P (addr)
3686 || GET_CODE (addr) == LO_SUM)
3688 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3689 is true, in which case we can only assume that an access is aligned if
3690 it is to a constant address, or the address involves a LO_SUM. */
3694 /* An obviously unaligned address. */
3699 /* Vectors to keep interesting information about registers where it can easily
3700 be got. We used to use the actual mode value as the bit number, but there
3701 are more than 32 modes now. Instead we use two tables: one indexed by
3702 hard register number, and one indexed by mode. */
3704 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3705 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3706 mapped into one sparc_mode_class mode. */
3708 enum sparc_mode_class {
3709 S_MODE, D_MODE, T_MODE, O_MODE,
3710 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3714 /* Modes for single-word and smaller quantities. */
3715 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3717 /* Modes for double-word and smaller quantities. */
3718 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3720 /* Modes for quad-word and smaller quantities. */
3721 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3723 /* Modes for 8-word and smaller quantities. */
3724 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3726 /* Modes for single-float quantities. We must allow any single word or
3727 smaller quantity. This is because the fix/float conversion instructions
3728 take integer inputs/outputs from the float registers. */
3729 #define SF_MODES (S_MODES)
3731 /* Modes for double-float and smaller quantities. */
3732 #define DF_MODES (S_MODES | D_MODES)
3734 /* Modes for double-float only quantities. */
3735 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3737 /* Modes for quad-float only quantities. */
3738 #define TF_ONLY_MODES (1 << (int) TF_MODE)
3740 /* Modes for quad-float and smaller quantities. */
3741 #define TF_MODES (DF_MODES | TF_ONLY_MODES)
3743 /* Modes for quad-float and double-float quantities. */
3744 #define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
3746 /* Modes for quad-float pair only quantities. */
3747 #define OF_ONLY_MODES (1 << (int) OF_MODE)
3749 /* Modes for quad-float pairs and smaller quantities. */
3750 #define OF_MODES (TF_MODES | OF_ONLY_MODES)
3752 #define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
3754 /* Modes for condition codes. */
3755 #define CC_MODES (1 << (int) CC_MODE)
3756 #define CCFP_MODES (1 << (int) CCFP_MODE)
3758 /* Value is 1 if register/mode pair is acceptable on sparc.
3759 The funny mixture of D and T modes is because integer operations
3760 do not specially operate on tetra quantities, so non-quad-aligned
3761 registers can hold quadword quantities (except %o4 and %i4 because
3762 they cross fixed registers). */
3764 /* This points to either the 32 bit or the 64 bit version. */
3765 const int *hard_regno_mode_classes;
3767 static const int hard_32bit_mode_classes[] = {
3768 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3769 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3770 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3771 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3773 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3774 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3775 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3776 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3778 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3779 and none can hold SFmode/SImode values. */
3780 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3781 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3782 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3783 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3786 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3792 static const int hard_64bit_mode_classes[] = {
3793 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3794 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3795 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3796 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3798 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3799 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3800 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3801 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3803 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3804 and none can hold SFmode/SImode values. */
3805 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3806 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3807 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3808 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3811 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3817 int sparc_mode_class [NUM_MACHINE_MODES];
3819 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3822 sparc_init_modes (void)
3826 for (i = 0; i < NUM_MACHINE_MODES; i++)
3828 switch (GET_MODE_CLASS (i))
3831 case MODE_PARTIAL_INT:
3832 case MODE_COMPLEX_INT:
3833 if (GET_MODE_SIZE (i) <= 4)
3834 sparc_mode_class[i] = 1 << (int) S_MODE;
3835 else if (GET_MODE_SIZE (i) == 8)
3836 sparc_mode_class[i] = 1 << (int) D_MODE;
3837 else if (GET_MODE_SIZE (i) == 16)
3838 sparc_mode_class[i] = 1 << (int) T_MODE;
3839 else if (GET_MODE_SIZE (i) == 32)
3840 sparc_mode_class[i] = 1 << (int) O_MODE;
3842 sparc_mode_class[i] = 0;
3844 case MODE_VECTOR_INT:
3845 if (GET_MODE_SIZE (i) <= 4)
3846 sparc_mode_class[i] = 1 << (int)SF_MODE;
3847 else if (GET_MODE_SIZE (i) == 8)
3848 sparc_mode_class[i] = 1 << (int)DF_MODE;
3851 case MODE_COMPLEX_FLOAT:
3852 if (GET_MODE_SIZE (i) <= 4)
3853 sparc_mode_class[i] = 1 << (int) SF_MODE;
3854 else if (GET_MODE_SIZE (i) == 8)
3855 sparc_mode_class[i] = 1 << (int) DF_MODE;
3856 else if (GET_MODE_SIZE (i) == 16)
3857 sparc_mode_class[i] = 1 << (int) TF_MODE;
3858 else if (GET_MODE_SIZE (i) == 32)
3859 sparc_mode_class[i] = 1 << (int) OF_MODE;
3861 sparc_mode_class[i] = 0;
3864 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3865 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3867 sparc_mode_class[i] = 1 << (int) CC_MODE;
3870 sparc_mode_class[i] = 0;
3876 hard_regno_mode_classes = hard_64bit_mode_classes;
3878 hard_regno_mode_classes = hard_32bit_mode_classes;
3880 /* Initialize the array used by REGNO_REG_CLASS. */
3881 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3883 if (i < 16 && TARGET_V8PLUS)
3884 sparc_regno_reg_class[i] = I64_REGS;
3885 else if (i < 32 || i == FRAME_POINTER_REGNUM)
3886 sparc_regno_reg_class[i] = GENERAL_REGS;
3888 sparc_regno_reg_class[i] = FP_REGS;
3890 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3892 sparc_regno_reg_class[i] = FPCC_REGS;
3894 sparc_regno_reg_class[i] = NO_REGS;
3898 /* Compute the frame size required by the function. This function is called
3899 during the reload pass and also by sparc_expand_prologue. */
3902 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3904 int outgoing_args_size = (crtl->outgoing_args_size
3905 + REG_PARM_STACK_SPACE (current_function_decl));
3906 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
3911 for (i = 0; i < 8; i++)
3912 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3917 for (i = 0; i < 8; i += 2)
3918 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3919 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3923 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3924 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3925 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3928 /* Set up values for use in prologue and epilogue. */
3929 num_gfregs = n_regs;
3934 && crtl->outgoing_args_size == 0)
3935 actual_fsize = apparent_fsize = 0;
3938 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
3939 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3940 apparent_fsize += n_regs * 4;
3941 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3944 /* Make sure nothing can clobber our register windows.
3945 If a SAVE must be done, or there is a stack-local variable,
3946 the register window area must be allocated. */
3947 if (! leaf_function_p || size > 0)
3948 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3950 return SPARC_STACK_ALIGN (actual_fsize);
3953 /* Output any necessary .register pseudo-ops. */
3956 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3958 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3964 /* Check if %g[2367] were used without
3965 .register being printed for them already. */
3966 for (i = 2; i < 8; i++)
3968 if (df_regs_ever_live_p (i)
3969 && ! sparc_hard_reg_printed [i])
3971 sparc_hard_reg_printed [i] = 1;
3972 /* %g7 is used as TLS base register, use #ignore
3973 for it instead of #scratch. */
3974 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
3975 i == 7 ? "ignore" : "scratch");
3982 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3983 as needed. LOW should be double-word aligned for 32-bit registers.
3984 Return the new OFFSET. */
3987 #define SORR_RESTORE 1
3990 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3995 if (TARGET_ARCH64 && high <= 32)
3997 for (i = low; i < high; i++)
3999 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4001 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
4002 set_mem_alias_set (mem, sparc_sr_alias_set);
4003 if (action == SORR_SAVE)
4005 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4006 RTX_FRAME_RELATED_P (insn) = 1;
4008 else /* action == SORR_RESTORE */
4009 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4016 for (i = low; i < high; i += 2)
4018 bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i];
4019 bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1];
4020 enum machine_mode mode;
4025 mode = i < 32 ? DImode : DFmode;
4030 mode = i < 32 ? SImode : SFmode;
4035 mode = i < 32 ? SImode : SFmode;
4042 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
4043 set_mem_alias_set (mem, sparc_sr_alias_set);
4044 if (action == SORR_SAVE)
4046 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4047 RTX_FRAME_RELATED_P (insn) = 1;
4049 else /* action == SORR_RESTORE */
4050 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4052 /* Always preserve double-word alignment. */
4053 offset = (offset + 7) & -8;
4060 /* Emit code to save call-saved registers. */
4063 emit_save_or_restore_regs (int action)
4065 HOST_WIDE_INT offset;
4068 offset = frame_base_offset - apparent_fsize;
4070 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
4072 /* ??? This might be optimized a little as %g1 might already have a
4073 value close enough that a single add insn will do. */
4074 /* ??? Although, all of this is probably only a temporary fix
4075 because if %g1 can hold a function result, then
4076 sparc_expand_epilogue will lose (the result will be
4078 base = gen_rtx_REG (Pmode, 1);
4079 emit_move_insn (base, GEN_INT (offset));
4080 emit_insn (gen_rtx_SET (VOIDmode,
4082 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
4086 base = frame_base_reg;
4088 offset = save_or_restore_regs (0, 8, base, offset, action);
4089 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
4092 /* Generate a save_register_window insn. */
4095 gen_save_register_window (rtx increment)
4098 return gen_save_register_windowdi (increment);
4100 return gen_save_register_windowsi (increment);
4103 /* Generate an increment for the stack pointer. */
4106 gen_stack_pointer_inc (rtx increment)
4108 return gen_rtx_SET (VOIDmode,
4110 gen_rtx_PLUS (Pmode,
4115 /* Generate a decrement for the stack pointer. */
4118 gen_stack_pointer_dec (rtx decrement)
4120 return gen_rtx_SET (VOIDmode,
4122 gen_rtx_MINUS (Pmode,
4127 /* Expand the function prologue. The prologue is responsible for reserving
4128 storage for the frame, saving the call-saved registers and loading the
4129 PIC register if needed. */
4132 sparc_expand_prologue (void)
4137 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4138 on the final value of the flag means deferring the prologue/epilogue
4139 expansion until just before the second scheduling pass, which is too
4140 late to emit multiple epilogues or return insns.
4142 Of course we are making the assumption that the value of the flag
4143 will not change between now and its final value. Of the three parts
4144 of the formula, only the last one can reasonably vary. Let's take a
4145 closer look, after assuming that the first two ones are set to true
4146 (otherwise the last value is effectively silenced).
4148 If only_leaf_regs_used returns false, the global predicate will also
4149 be false so the actual frame size calculated below will be positive.
4150 As a consequence, the save_register_window insn will be emitted in
4151 the instruction stream; now this insn explicitly references %fp
4152 which is not a leaf register so only_leaf_regs_used will always
4153 return false subsequently.
4155 If only_leaf_regs_used returns true, we hope that the subsequent
4156 optimization passes won't cause non-leaf registers to pop up. For
4157 example, the regrename pass has special provisions to not rename to
4158 non-leaf registers in a leaf function. */
4159 sparc_leaf_function_p
4160 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
4162 /* Need to use actual_fsize, since we are also allocating
4163 space for our callee (and our own register save area). */
4165 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4167 /* Advertise that the data calculated just above are now valid. */
4168 sparc_prologue_data_valid_p = true;
4170 if (sparc_leaf_function_p)
4172 frame_base_reg = stack_pointer_rtx;
4173 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4177 frame_base_reg = hard_frame_pointer_rtx;
4178 frame_base_offset = SPARC_STACK_BIAS;
4181 if (actual_fsize == 0)
4183 else if (sparc_leaf_function_p)
4185 if (actual_fsize <= 4096)
4186 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4187 else if (actual_fsize <= 8192)
4189 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4190 /* %sp is still the CFA register. */
4191 RTX_FRAME_RELATED_P (insn) = 1;
4193 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4197 rtx reg = gen_rtx_REG (Pmode, 1);
4198 emit_move_insn (reg, GEN_INT (-actual_fsize));
4199 insn = emit_insn (gen_stack_pointer_inc (reg));
4200 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4201 gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4204 RTX_FRAME_RELATED_P (insn) = 1;
4208 if (actual_fsize <= 4096)
4209 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4210 else if (actual_fsize <= 8192)
4212 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
4213 /* %sp is not the CFA register anymore. */
4214 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4218 rtx reg = gen_rtx_REG (Pmode, 1);
4219 emit_move_insn (reg, GEN_INT (-actual_fsize));
4220 insn = emit_insn (gen_save_register_window (reg));
4223 RTX_FRAME_RELATED_P (insn) = 1;
4224 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4225 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4229 emit_save_or_restore_regs (SORR_SAVE);
4231 /* Load the PIC register if needed. */
4232 if (flag_pic && crtl->uses_pic_offset_table)
4233 load_pic_register (false);
4236 /* This function generates the assembly code for function entry, which boils
4237 down to emitting the necessary .register directives. */
4240 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4242 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4243 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4245 sparc_output_scratch_registers (file);
4248 /* Expand the function epilogue, either normal or part of a sibcall.
4249 We emit all the instructions except the return or the call. */
4252 sparc_expand_epilogue (void)
4255 emit_save_or_restore_regs (SORR_RESTORE);
4257 if (actual_fsize == 0)
4259 else if (sparc_leaf_function_p)
4261 if (actual_fsize <= 4096)
4262 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4263 else if (actual_fsize <= 8192)
4265 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4266 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4270 rtx reg = gen_rtx_REG (Pmode, 1);
4271 emit_move_insn (reg, GEN_INT (-actual_fsize));
4272 emit_insn (gen_stack_pointer_dec (reg));
4277 /* Return true if it is appropriate to emit `return' instructions in the
4278 body of a function. */
4281 sparc_can_use_return_insn_p (void)
4283 return sparc_prologue_data_valid_p
4284 && (actual_fsize == 0 || !sparc_leaf_function_p);
4287 /* This function generates the assembly code for function exit. */
4290 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4292 /* If code does not drop into the epilogue, we have to still output
4293 a dummy nop for the sake of sane backtraces. Otherwise, if the
4294 last two instructions of a function were "call foo; dslot;" this
4295 can make the return PC of foo (i.e. address of call instruction
4296 plus 8) point to the first instruction in the next function. */
4298 rtx insn, last_real_insn;
4300 insn = get_last_insn ();
4302 last_real_insn = prev_real_insn (insn);
4304 && GET_CODE (last_real_insn) == INSN
4305 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4306 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4308 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4309 fputs("\tnop\n", file);
4311 sparc_output_deferred_case_vectors ();
4314 /* Output a 'restore' instruction. */
4317 output_restore (rtx pat)
4323 fputs ("\t restore\n", asm_out_file);
4327 gcc_assert (GET_CODE (pat) == SET);
4329 operands[0] = SET_DEST (pat);
4330 pat = SET_SRC (pat);
4332 switch (GET_CODE (pat))
4335 operands[1] = XEXP (pat, 0);
4336 operands[2] = XEXP (pat, 1);
4337 output_asm_insn (" restore %r1, %2, %Y0", operands);
4340 operands[1] = XEXP (pat, 0);
4341 operands[2] = XEXP (pat, 1);
4342 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4345 operands[1] = XEXP (pat, 0);
4346 gcc_assert (XEXP (pat, 1) == const1_rtx);
4347 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4351 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4356 /* Output a return. */
4359 output_return (rtx insn)
4361 if (sparc_leaf_function_p)
4363 /* This is a leaf function so we don't have to bother restoring the
4364 register window, which frees us from dealing with the convoluted
4365 semantics of restore/return. We simply output the jump to the
4366 return address and the insn in the delay slot (if any). */
4368 gcc_assert (! crtl->calls_eh_return);
4370 return "jmp\t%%o7+%)%#";
4374 /* This is a regular function so we have to restore the register window.
4375 We may have a pending insn for the delay slot, which will be either
4376 combined with the 'restore' instruction or put in the delay slot of
4377 the 'return' instruction. */
4379 if (crtl->calls_eh_return)
4381 /* If the function uses __builtin_eh_return, the eh_return
4382 machinery occupies the delay slot. */
4383 gcc_assert (! final_sequence);
4385 if (! flag_delayed_branch)
4386 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4389 fputs ("\treturn\t%i7+8\n", asm_out_file);
4391 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4393 if (flag_delayed_branch)
4394 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4396 fputs ("\t nop\n", asm_out_file);
4398 else if (final_sequence)
4402 delay = NEXT_INSN (insn);
4405 pat = PATTERN (delay);
4407 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4409 epilogue_renumber (&pat, 0);
4410 return "return\t%%i7+%)%#";
4414 output_asm_insn ("jmp\t%%i7+%)", NULL);
4415 output_restore (pat);
4416 PATTERN (delay) = gen_blockage ();
4417 INSN_CODE (delay) = -1;
4422 /* The delay slot is empty. */
4424 return "return\t%%i7+%)\n\t nop";
4425 else if (flag_delayed_branch)
4426 return "jmp\t%%i7+%)\n\t restore";
4428 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4435 /* Output a sibling call. */
4438 output_sibcall (rtx insn, rtx call_operand)
4442 gcc_assert (flag_delayed_branch);
4444 operands[0] = call_operand;
4446 if (sparc_leaf_function_p)
4448 /* This is a leaf function so we don't have to bother restoring the
4449 register window. We simply output the jump to the function and
4450 the insn in the delay slot (if any). */
4452 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4455 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4458 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4459 it into branch if possible. */
4460 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4465 /* This is a regular function so we have to restore the register window.
4466 We may have a pending insn for the delay slot, which will be combined
4467 with the 'restore' instruction. */
4469 output_asm_insn ("call\t%a0, 0", operands);
4473 rtx delay = NEXT_INSN (insn);
4476 output_restore (PATTERN (delay));
4478 PATTERN (delay) = gen_blockage ();
4479 INSN_CODE (delay) = -1;
4482 output_restore (NULL_RTX);
4488 /* Functions for handling argument passing.
4490 For 32-bit, the first 6 args are normally in registers and the rest are
4491 pushed. Any arg that starts within the first 6 words is at least
4492 partially passed in a register unless its data type forbids.
4494 For 64-bit, the argument registers are laid out as an array of 16 elements
4495 and arguments are added sequentially. The first 6 int args and up to the
4496 first 16 fp args (depending on size) are passed in regs.
4498 Slot Stack Integral Float Float in structure Double Long Double
4499 ---- ----- -------- ----- ------------------ ------ -----------
4500 15 [SP+248] %f31 %f30,%f31 %d30
4501 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4502 13 [SP+232] %f27 %f26,%f27 %d26
4503 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4504 11 [SP+216] %f23 %f22,%f23 %d22
4505 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4506 9 [SP+200] %f19 %f18,%f19 %d18
4507 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4508 7 [SP+184] %f15 %f14,%f15 %d14
4509 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4510 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4511 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4512 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4513 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4514 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4515 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4517 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4519 Integral arguments are always passed as 64-bit quantities appropriately
4522 Passing of floating point values is handled as follows.
4523 If a prototype is in scope:
4524 If the value is in a named argument (i.e. not a stdarg function or a
4525 value not part of the `...') then the value is passed in the appropriate
4527 If the value is part of the `...' and is passed in one of the first 6
4528 slots then the value is passed in the appropriate int reg.
4529 If the value is part of the `...' and is not passed in one of the first 6
4530 slots then the value is passed in memory.
4531 If a prototype is not in scope:
4532 If the value is one of the first 6 arguments the value is passed in the
4533 appropriate integer reg and the appropriate fp reg.
4534 If the value is not one of the first 6 arguments the value is passed in
4535 the appropriate fp reg and in memory.
4538 Summary of the calling conventions implemented by GCC on SPARC:
4541 size argument return value
4543 small integer <4 int. reg. int. reg.
4544 word 4 int. reg. int. reg.
4545 double word 8 int. reg. int. reg.
4547 _Complex small integer <8 int. reg. int. reg.
4548 _Complex word 8 int. reg. int. reg.
4549 _Complex double word 16 memory int. reg.
4551 vector integer <=8 int. reg. FP reg.
4552 vector integer >8 memory memory
4554 float 4 int. reg. FP reg.
4555 double 8 int. reg. FP reg.
4556 long double 16 memory memory
4558 _Complex float 8 memory FP reg.
4559 _Complex double 16 memory FP reg.
4560 _Complex long double 32 memory FP reg.
4562 vector float any memory memory
4564 aggregate any memory memory
4569 size argument return value
4571 small integer <8 int. reg. int. reg.
4572 word 8 int. reg. int. reg.
4573 double word 16 int. reg. int. reg.
4575 _Complex small integer <16 int. reg. int. reg.
4576 _Complex word 16 int. reg. int. reg.
4577 _Complex double word 32 memory int. reg.
4579 vector integer <=16 FP reg. FP reg.
4580 vector integer 16<s<=32 memory FP reg.
4581 vector integer >32 memory memory
4583 float 4 FP reg. FP reg.
4584 double 8 FP reg. FP reg.
4585 long double 16 FP reg. FP reg.
4587 _Complex float 8 FP reg. FP reg.
4588 _Complex double 16 FP reg. FP reg.
4589 _Complex long double 32 memory FP reg.
4591 vector float <=16 FP reg. FP reg.
4592 vector float 16<s<=32 memory FP reg.
4593 vector float >32 memory memory
4595 aggregate <=16 reg. reg.
4596 aggregate 16<s<=32 memory reg.
4597 aggregate >32 memory memory
4601 Note #1: complex floating-point types follow the extended SPARC ABIs as
4602 implemented by the Sun compiler.
4604 Note #2: integral vector types follow the scalar floating-point types
4605 conventions to match what is implemented by the Sun VIS SDK.
4607 Note #3: floating-point vector types follow the aggregate types
4611 /* Maximum number of int regs for args. */
4612 #define SPARC_INT_ARG_MAX 6
4613 /* Maximum number of fp regs for args. */
4614 #define SPARC_FP_ARG_MAX 16
4616 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4618 /* Handle the INIT_CUMULATIVE_ARGS macro.
4619 Initialize a variable CUM of type CUMULATIVE_ARGS
4620 for a call to a function whose data type is FNTYPE.
4621 For a library call, FNTYPE is 0. */
4624 init_cumulative_args (struct sparc_args *cum, tree fntype,
4625 rtx libname ATTRIBUTE_UNUSED,
4626 tree fndecl ATTRIBUTE_UNUSED)
4629 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4630 cum->libcall_p = fntype == 0;
4633 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4634 When a prototype says `char' or `short', really pass an `int'. */
4637 sparc_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED)
4639 return TARGET_ARCH32 ? true : false;
4642 /* Handle promotion of pointer and integer arguments. */
4644 static enum machine_mode
4645 sparc_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
4646 enum machine_mode mode,
4647 int *punsignedp ATTRIBUTE_UNUSED,
4648 const_tree fntype ATTRIBUTE_UNUSED,
4649 int for_return ATTRIBUTE_UNUSED)
4651 if (POINTER_TYPE_P (type))
4653 *punsignedp = POINTERS_EXTEND_UNSIGNED;
4657 /* For TARGET_ARCH64 we need this, as we don't have instructions
4658 for arithmetic operations which do zero/sign extension at the same time,
4659 so without this we end up with a srl/sra after every assignment to an
4660 user variable, which means very very bad code. */
4663 && GET_MODE_CLASS (mode) == MODE_INT
4664 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4672 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4675 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4677 return TARGET_ARCH64 ? true : false;
4680 /* Scan the record type TYPE and return the following predicates:
4681 - INTREGS_P: the record contains at least one field or sub-field
4682 that is eligible for promotion in integer registers.
4683 - FP_REGS_P: the record contains at least one field or sub-field
4684 that is eligible for promotion in floating-point registers.
4685 - PACKED_P: the record contains at least one field that is packed.
4687 Sub-fields are not taken into account for the PACKED_P predicate. */
4690 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4694 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4696 if (TREE_CODE (field) == FIELD_DECL)
4698 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4699 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4700 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4701 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4707 if (packed_p && DECL_PACKED (field))
4713 /* Compute the slot number to pass an argument in.
4714 Return the slot number or -1 if passing on the stack.
4716 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4717 the preceding args and about the function being called.
4718 MODE is the argument's machine mode.
4719 TYPE is the data type of the argument (as a tree).
4720 This is null for libcalls where that information may
4722 NAMED is nonzero if this argument is a named parameter
4723 (otherwise it is an extra parameter matching an ellipsis).
4724 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4725 *PREGNO records the register number to use if scalar type.
4726 *PPADDING records the amount of padding needed in words. */
4729 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4730 tree type, int named, int incoming_p,
4731 int *pregno, int *ppadding)
4733 int regbase = (incoming_p
4734 ? SPARC_INCOMING_INT_ARG_FIRST
4735 : SPARC_OUTGOING_INT_ARG_FIRST);
4736 int slotno = cum->words;
4737 enum mode_class mclass;
4742 if (type && TREE_ADDRESSABLE (type))
4748 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4751 /* For SPARC64, objects requiring 16-byte alignment get it. */
4753 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4754 && (slotno & 1) != 0)
4755 slotno++, *ppadding = 1;
4757 mclass = GET_MODE_CLASS (mode);
4758 if (type && TREE_CODE (type) == VECTOR_TYPE)
4760 /* Vector types deserve special treatment because they are
4761 polymorphic wrt their mode, depending upon whether VIS
4762 instructions are enabled. */
4763 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4765 /* The SPARC port defines no floating-point vector modes. */
4766 gcc_assert (mode == BLKmode);
4770 /* Integral vector types should either have a vector
4771 mode or an integral mode, because we are guaranteed
4772 by pass_by_reference that their size is not greater
4773 than 16 bytes and TImode is 16-byte wide. */
4774 gcc_assert (mode != BLKmode);
4776 /* Vector integers are handled like floats according to
4778 mclass = MODE_FLOAT;
4785 case MODE_COMPLEX_FLOAT:
4786 case MODE_VECTOR_INT:
4787 if (TARGET_ARCH64 && TARGET_FPU && named)
4789 if (slotno >= SPARC_FP_ARG_MAX)
4791 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4792 /* Arguments filling only one single FP register are
4793 right-justified in the outer double FP register. */
4794 if (GET_MODE_SIZE (mode) <= 4)
4801 case MODE_COMPLEX_INT:
4802 if (slotno >= SPARC_INT_ARG_MAX)
4804 regno = regbase + slotno;
4808 if (mode == VOIDmode)
4809 /* MODE is VOIDmode when generating the actual call. */
4812 gcc_assert (mode == BLKmode);
4816 || (TREE_CODE (type) != VECTOR_TYPE
4817 && TREE_CODE (type) != RECORD_TYPE))
4819 if (slotno >= SPARC_INT_ARG_MAX)
4821 regno = regbase + slotno;
4823 else /* TARGET_ARCH64 && type */
4825 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4827 /* First see what kinds of registers we would need. */
4828 if (TREE_CODE (type) == VECTOR_TYPE)
4831 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4833 /* The ABI obviously doesn't specify how packed structures
4834 are passed. These are defined to be passed in int regs
4835 if possible, otherwise memory. */
4836 if (packed_p || !named)
4837 fpregs_p = 0, intregs_p = 1;
4839 /* If all arg slots are filled, then must pass on stack. */
4840 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4843 /* If there are only int args and all int arg slots are filled,
4844 then must pass on stack. */
4845 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4848 /* Note that even if all int arg slots are filled, fp members may
4849 still be passed in regs if such regs are available.
4850 *PREGNO isn't set because there may be more than one, it's up
4851 to the caller to compute them. */
4864 /* Handle recursive register counting for structure field layout. */
4866 struct function_arg_record_value_parms
4868 rtx ret; /* return expression being built. */
4869 int slotno; /* slot number of the argument. */
4870 int named; /* whether the argument is named. */
4871 int regbase; /* regno of the base register. */
4872 int stack; /* 1 if part of the argument is on the stack. */
4873 int intoffset; /* offset of the first pending integer field. */
4874 unsigned int nregs; /* number of words passed in registers. */
4877 static void function_arg_record_value_3
4878 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4879 static void function_arg_record_value_2
4880 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4881 static void function_arg_record_value_1
4882 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4883 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
4884 static rtx function_arg_union_value (int, enum machine_mode, int, int);
4886 /* A subroutine of function_arg_record_value. Traverse the structure
4887 recursively and determine how many registers will be required. */
4890 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
4891 struct function_arg_record_value_parms *parms,
4896 /* We need to compute how many registers are needed so we can
4897 allocate the PARALLEL but before we can do that we need to know
4898 whether there are any packed fields. The ABI obviously doesn't
4899 specify how structures are passed in this case, so they are
4900 defined to be passed in int regs if possible, otherwise memory,
4901 regardless of whether there are fp values present. */
4904 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4906 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4913 /* Compute how many registers we need. */
4914 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4916 if (TREE_CODE (field) == FIELD_DECL)
4918 HOST_WIDE_INT bitpos = startbitpos;
4920 if (DECL_SIZE (field) != 0)
4922 if (integer_zerop (DECL_SIZE (field)))
4925 if (host_integerp (bit_position (field), 1))
4926 bitpos += int_bit_position (field);
4929 /* ??? FIXME: else assume zero offset. */
4931 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4932 function_arg_record_value_1 (TREE_TYPE (field),
4936 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4937 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4942 if (parms->intoffset != -1)
4944 unsigned int startbit, endbit;
4945 int intslots, this_slotno;
4947 startbit = parms->intoffset & -BITS_PER_WORD;
4948 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4950 intslots = (endbit - startbit) / BITS_PER_WORD;
4951 this_slotno = parms->slotno + parms->intoffset
4954 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4956 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4957 /* We need to pass this field on the stack. */
4961 parms->nregs += intslots;
4962 parms->intoffset = -1;
4965 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4966 If it wasn't true we wouldn't be here. */
4967 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4968 && DECL_MODE (field) == BLKmode)
4969 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4970 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4977 if (parms->intoffset == -1)
4978 parms->intoffset = bitpos;
4984 /* A subroutine of function_arg_record_value. Assign the bits of the
4985 structure between parms->intoffset and bitpos to integer registers. */
4988 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4989 struct function_arg_record_value_parms *parms)
4991 enum machine_mode mode;
4993 unsigned int startbit, endbit;
4994 int this_slotno, intslots, intoffset;
4997 if (parms->intoffset == -1)
5000 intoffset = parms->intoffset;
5001 parms->intoffset = -1;
5003 startbit = intoffset & -BITS_PER_WORD;
5004 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5005 intslots = (endbit - startbit) / BITS_PER_WORD;
5006 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
5008 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
5012 /* If this is the trailing part of a word, only load that much into
5013 the register. Otherwise load the whole register. Note that in
5014 the latter case we may pick up unwanted bits. It's not a problem
5015 at the moment but may wish to revisit. */
5017 if (intoffset % BITS_PER_WORD != 0)
5018 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5023 intoffset /= BITS_PER_UNIT;
5026 regno = parms->regbase + this_slotno;
5027 reg = gen_rtx_REG (mode, regno);
5028 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5029 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5032 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5037 while (intslots > 0);
5040 /* A subroutine of function_arg_record_value. Traverse the structure
5041 recursively and assign bits to floating point registers. Track which
5042 bits in between need integer registers; invoke function_arg_record_value_3
5043 to make that happen. */
5046 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
5047 struct function_arg_record_value_parms *parms,
5053 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5055 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5062 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5064 if (TREE_CODE (field) == FIELD_DECL)
5066 HOST_WIDE_INT bitpos = startbitpos;
5068 if (DECL_SIZE (field) != 0)
5070 if (integer_zerop (DECL_SIZE (field)))
5073 if (host_integerp (bit_position (field), 1))
5074 bitpos += int_bit_position (field);
5077 /* ??? FIXME: else assume zero offset. */
5079 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5080 function_arg_record_value_2 (TREE_TYPE (field),
5084 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5085 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5090 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5091 int regno, nregs, pos;
5092 enum machine_mode mode = DECL_MODE (field);
5095 function_arg_record_value_3 (bitpos, parms);
5097 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5100 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5101 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5103 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5105 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5111 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5112 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5114 reg = gen_rtx_REG (mode, regno);
5115 pos = bitpos / BITS_PER_UNIT;
5116 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5117 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5121 regno += GET_MODE_SIZE (mode) / 4;
5122 reg = gen_rtx_REG (mode, regno);
5123 pos += GET_MODE_SIZE (mode);
5124 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5125 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5131 if (parms->intoffset == -1)
5132 parms->intoffset = bitpos;
5138 /* Used by function_arg and function_value to implement the complex
5139 conventions of the 64-bit ABI for passing and returning structures.
5140 Return an expression valid as a return value for the two macros
5141 FUNCTION_ARG and FUNCTION_VALUE.
5143 TYPE is the data type of the argument (as a tree).
5144 This is null for libcalls where that information may
5146 MODE is the argument's machine mode.
5147 SLOTNO is the index number of the argument's slot in the parameter array.
5148 NAMED is nonzero if this argument is a named parameter
5149 (otherwise it is an extra parameter matching an ellipsis).
5150 REGBASE is the regno of the base register for the parameter array. */
5153 function_arg_record_value (const_tree type, enum machine_mode mode,
5154 int slotno, int named, int regbase)
5156 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5157 struct function_arg_record_value_parms parms;
5160 parms.ret = NULL_RTX;
5161 parms.slotno = slotno;
5162 parms.named = named;
5163 parms.regbase = regbase;
5166 /* Compute how many registers we need. */
5168 parms.intoffset = 0;
5169 function_arg_record_value_1 (type, 0, &parms, false);
5171 /* Take into account pending integer fields. */
5172 if (parms.intoffset != -1)
5174 unsigned int startbit, endbit;
5175 int intslots, this_slotno;
5177 startbit = parms.intoffset & -BITS_PER_WORD;
5178 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5179 intslots = (endbit - startbit) / BITS_PER_WORD;
5180 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5182 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5184 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5185 /* We need to pass this field on the stack. */
5189 parms.nregs += intslots;
5191 nregs = parms.nregs;
5193 /* Allocate the vector and handle some annoying special cases. */
5196 /* ??? Empty structure has no value? Duh? */
5199 /* Though there's nothing really to store, return a word register
5200 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5201 leads to breakage due to the fact that there are zero bytes to
5203 return gen_rtx_REG (mode, regbase);
5207 /* ??? C++ has structures with no fields, and yet a size. Give up
5208 for now and pass everything back in integer registers. */
5209 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5211 if (nregs + slotno > SPARC_INT_ARG_MAX)
5212 nregs = SPARC_INT_ARG_MAX - slotno;
5214 gcc_assert (nregs != 0);
5216 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5218 /* If at least one field must be passed on the stack, generate
5219 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5220 also be passed on the stack. We can't do much better because the
5221 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5222 of structures for which the fields passed exclusively in registers
5223 are not at the beginning of the structure. */
5225 XVECEXP (parms.ret, 0, 0)
5226 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5228 /* Fill in the entries. */
5230 parms.intoffset = 0;
5231 function_arg_record_value_2 (type, 0, &parms, false);
5232 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5234 gcc_assert (parms.nregs == nregs);
5239 /* Used by function_arg and function_value to implement the conventions
5240 of the 64-bit ABI for passing and returning unions.
5241 Return an expression valid as a return value for the two macros
5242 FUNCTION_ARG and FUNCTION_VALUE.
5244 SIZE is the size in bytes of the union.
5245 MODE is the argument's machine mode.
5246 REGNO is the hard register the union will be passed in. */
5249 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5252 int nwords = ROUND_ADVANCE (size), i;
5255 /* See comment in previous function for empty structures. */
5257 return gen_rtx_REG (mode, regno);
5259 if (slotno == SPARC_INT_ARG_MAX - 1)
5262 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5264 for (i = 0; i < nwords; i++)
5266 /* Unions are passed left-justified. */
5267 XVECEXP (regs, 0, i)
5268 = gen_rtx_EXPR_LIST (VOIDmode,
5269 gen_rtx_REG (word_mode, regno),
5270 GEN_INT (UNITS_PER_WORD * i));
5277 /* Used by function_arg and function_value to implement the conventions
5278 for passing and returning large (BLKmode) vectors.
5279 Return an expression valid as a return value for the two macros
5280 FUNCTION_ARG and FUNCTION_VALUE.
5282 SIZE is the size in bytes of the vector (at least 8 bytes).
5283 REGNO is the FP hard register the vector will be passed in. */
5286 function_arg_vector_value (int size, int regno)
5288 int i, nregs = size / 8;
5291 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5293 for (i = 0; i < nregs; i++)
5295 XVECEXP (regs, 0, i)
5296 = gen_rtx_EXPR_LIST (VOIDmode,
5297 gen_rtx_REG (DImode, regno + 2*i),
5304 /* Handle the FUNCTION_ARG macro.
5305 Determine where to put an argument to a function.
5306 Value is zero to push the argument on the stack,
5307 or a hard register in which to store the argument.
5309 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5310 the preceding args and about the function being called.
5311 MODE is the argument's machine mode.
5312 TYPE is the data type of the argument (as a tree).
5313 This is null for libcalls where that information may
5315 NAMED is nonzero if this argument is a named parameter
5316 (otherwise it is an extra parameter matching an ellipsis).
5317 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5320 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5321 tree type, int named, int incoming_p)
5323 int regbase = (incoming_p
5324 ? SPARC_INCOMING_INT_ARG_FIRST
5325 : SPARC_OUTGOING_INT_ARG_FIRST);
5326 int slotno, regno, padding;
5327 enum mode_class mclass = GET_MODE_CLASS (mode);
5329 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5334 /* Vector types deserve special treatment because they are polymorphic wrt
5335 their mode, depending upon whether VIS instructions are enabled. */
5336 if (type && TREE_CODE (type) == VECTOR_TYPE)
5338 HOST_WIDE_INT size = int_size_in_bytes (type);
5339 gcc_assert ((TARGET_ARCH32 && size <= 8)
5340 || (TARGET_ARCH64 && size <= 16));
5342 if (mode == BLKmode)
5343 return function_arg_vector_value (size,
5344 SPARC_FP_ARG_FIRST + 2*slotno);
5346 mclass = MODE_FLOAT;
5350 return gen_rtx_REG (mode, regno);
5352 /* Structures up to 16 bytes in size are passed in arg slots on the stack
5353 and are promoted to registers if possible. */
5354 if (type && TREE_CODE (type) == RECORD_TYPE)
5356 HOST_WIDE_INT size = int_size_in_bytes (type);
5357 gcc_assert (size <= 16);
5359 return function_arg_record_value (type, mode, slotno, named, regbase);
5362 /* Unions up to 16 bytes in size are passed in integer registers. */
5363 else if (type && TREE_CODE (type) == UNION_TYPE)
5365 HOST_WIDE_INT size = int_size_in_bytes (type);
5366 gcc_assert (size <= 16);
5368 return function_arg_union_value (size, mode, slotno, regno);
5371 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5372 but also have the slot allocated for them.
5373 If no prototype is in scope fp values in register slots get passed
5374 in two places, either fp regs and int regs or fp regs and memory. */
5375 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5376 && SPARC_FP_REG_P (regno))
5378 rtx reg = gen_rtx_REG (mode, regno);
5379 if (cum->prototype_p || cum->libcall_p)
5381 /* "* 2" because fp reg numbers are recorded in 4 byte
5384 /* ??? This will cause the value to be passed in the fp reg and
5385 in the stack. When a prototype exists we want to pass the
5386 value in the reg but reserve space on the stack. That's an
5387 optimization, and is deferred [for a bit]. */
5388 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5389 return gen_rtx_PARALLEL (mode,
5391 gen_rtx_EXPR_LIST (VOIDmode,
5392 NULL_RTX, const0_rtx),
5393 gen_rtx_EXPR_LIST (VOIDmode,
5397 /* ??? It seems that passing back a register even when past
5398 the area declared by REG_PARM_STACK_SPACE will allocate
5399 space appropriately, and will not copy the data onto the
5400 stack, exactly as we desire.
5402 This is due to locate_and_pad_parm being called in
5403 expand_call whenever reg_parm_stack_space > 0, which
5404 while beneficial to our example here, would seem to be
5405 in error from what had been intended. Ho hum... -- r~ */
5413 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5417 /* On incoming, we don't need to know that the value
5418 is passed in %f0 and %i0, and it confuses other parts
5419 causing needless spillage even on the simplest cases. */
5423 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5424 + (regno - SPARC_FP_ARG_FIRST) / 2);
5426 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5427 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5429 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5433 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5434 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5435 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5440 /* All other aggregate types are passed in an integer register in a mode
5441 corresponding to the size of the type. */
5442 else if (type && AGGREGATE_TYPE_P (type))
5444 HOST_WIDE_INT size = int_size_in_bytes (type);
5445 gcc_assert (size <= 16);
5447 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5450 return gen_rtx_REG (mode, regno);
5453 /* For an arg passed partly in registers and partly in memory,
5454 this is the number of bytes of registers used.
5455 For args passed entirely in registers or entirely in memory, zero.
5457 Any arg that starts in the first 6 regs but won't entirely fit in them
5458 needs partial registers on v8. On v9, structures with integer
5459 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5460 values that begin in the last fp reg [where "last fp reg" varies with the
5461 mode] will be split between that reg and memory. */
5464 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5465 tree type, bool named)
5467 int slotno, regno, padding;
5469 /* We pass 0 for incoming_p here, it doesn't matter. */
5470 slotno = function_arg_slotno (cum, mode, type, named, 0, ®no, &padding);
5477 if ((slotno + (mode == BLKmode
5478 ? ROUND_ADVANCE (int_size_in_bytes (type))
5479 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5480 > SPARC_INT_ARG_MAX)
5481 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5485 /* We are guaranteed by pass_by_reference that the size of the
5486 argument is not greater than 16 bytes, so we only need to return
5487 one word if the argument is partially passed in registers. */
5489 if (type && AGGREGATE_TYPE_P (type))
5491 int size = int_size_in_bytes (type);
5493 if (size > UNITS_PER_WORD
5494 && slotno == SPARC_INT_ARG_MAX - 1)
5495 return UNITS_PER_WORD;
5497 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5498 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5499 && ! (TARGET_FPU && named)))
5501 /* The complex types are passed as packed types. */
5502 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5503 && slotno == SPARC_INT_ARG_MAX - 1)
5504 return UNITS_PER_WORD;
5506 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5508 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5510 return UNITS_PER_WORD;
5517 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5518 Specify whether to pass the argument by reference. */
5521 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5522 enum machine_mode mode, const_tree type,
5523 bool named ATTRIBUTE_UNUSED)
5526 /* Original SPARC 32-bit ABI says that structures and unions,
5527 and quad-precision floats are passed by reference. For Pascal,
5528 also pass arrays by reference. All other base types are passed
5531 Extended ABI (as implemented by the Sun compiler) says that all
5532 complex floats are passed by reference. Pass complex integers
5533 in registers up to 8 bytes. More generally, enforce the 2-word
5534 cap for passing arguments in registers.
5536 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5537 integers are passed like floats of the same size, that is in
5538 registers up to 8 bytes. Pass all vector floats by reference
5539 like structure and unions. */
5540 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5542 /* Catch CDImode, TFmode, DCmode and TCmode. */
5543 || GET_MODE_SIZE (mode) > 8
5545 && TREE_CODE (type) == VECTOR_TYPE
5546 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5548 /* Original SPARC 64-bit ABI says that structures and unions
5549 smaller than 16 bytes are passed in registers, as well as
5550 all other base types.
5552 Extended ABI (as implemented by the Sun compiler) says that
5553 complex floats are passed in registers up to 16 bytes. Pass
5554 all complex integers in registers up to 16 bytes. More generally,
5555 enforce the 2-word cap for passing arguments in registers.
5557 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5558 integers are passed like floats of the same size, that is in
5559 registers (up to 16 bytes). Pass all vector floats like structure
5562 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5563 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5564 /* Catch CTImode and TCmode. */
5565 || GET_MODE_SIZE (mode) > 16);
5568 /* Handle the FUNCTION_ARG_ADVANCE macro.
5569 Update the data in CUM to advance over an argument
5570 of mode MODE and data type TYPE.
5571 TYPE is null for libcalls where that information may not be available. */
5574 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5575 tree type, int named)
5577 int slotno, regno, padding;
5579 /* We pass 0 for incoming_p here, it doesn't matter. */
5580 slotno = function_arg_slotno (cum, mode, type, named, 0, ®no, &padding);
5582 /* If register required leading padding, add it. */
5584 cum->words += padding;
5588 cum->words += (mode != BLKmode
5589 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5590 : ROUND_ADVANCE (int_size_in_bytes (type)));
5594 if (type && AGGREGATE_TYPE_P (type))
5596 int size = int_size_in_bytes (type);
5600 else if (size <= 16)
5602 else /* passed by reference */
5607 cum->words += (mode != BLKmode
5608 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5609 : ROUND_ADVANCE (int_size_in_bytes (type)));
5614 /* Handle the FUNCTION_ARG_PADDING macro.
5615 For the 64 bit ABI structs are always stored left shifted in their
5619 function_arg_padding (enum machine_mode mode, const_tree type)
5621 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5624 /* Fall back to the default. */
5625 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5628 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5629 Specify whether to return the return value in memory. */
5632 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5635 /* Original SPARC 32-bit ABI says that structures and unions,
5636 and quad-precision floats are returned in memory. All other
5637 base types are returned in registers.
5639 Extended ABI (as implemented by the Sun compiler) says that
5640 all complex floats are returned in registers (8 FP registers
5641 at most for '_Complex long double'). Return all complex integers
5642 in registers (4 at most for '_Complex long long').
5644 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5645 integers are returned like floats of the same size, that is in
5646 registers up to 8 bytes and in memory otherwise. Return all
5647 vector floats in memory like structure and unions; note that
5648 they always have BLKmode like the latter. */
5649 return (TYPE_MODE (type) == BLKmode
5650 || TYPE_MODE (type) == TFmode
5651 || (TREE_CODE (type) == VECTOR_TYPE
5652 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5654 /* Original SPARC 64-bit ABI says that structures and unions
5655 smaller than 32 bytes are returned in registers, as well as
5656 all other base types.
5658 Extended ABI (as implemented by the Sun compiler) says that all
5659 complex floats are returned in registers (8 FP registers at most
5660 for '_Complex long double'). Return all complex integers in
5661 registers (4 at most for '_Complex TItype').
5663 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5664 integers are returned like floats of the same size, that is in
5665 registers. Return all vector floats like structure and unions;
5666 note that they always have BLKmode like the latter. */
5667 return ((TYPE_MODE (type) == BLKmode
5668 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5671 /* Handle the TARGET_STRUCT_VALUE target hook.
5672 Return where to find the structure return value address. */
5675 sparc_struct_value_rtx (tree fndecl, int incoming)
5684 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5685 STRUCT_VALUE_OFFSET));
5687 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5688 STRUCT_VALUE_OFFSET));
5690 /* Only follow the SPARC ABI for fixed-size structure returns.
5691 Variable size structure returns are handled per the normal
5692 procedures in GCC. This is enabled by -mstd-struct-return */
5694 && sparc_std_struct_return
5695 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
5696 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
5698 /* We must check and adjust the return address, as it is
5699 optional as to whether the return object is really
5701 rtx ret_rtx = gen_rtx_REG (Pmode, 31);
5702 rtx scratch = gen_reg_rtx (SImode);
5703 rtx endlab = gen_label_rtx ();
5705 /* Calculate the return object size */
5706 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
5707 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
5708 /* Construct a temporary return value */
5709 rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
5711 /* Implement SPARC 32-bit psABI callee returns struck checking
5714 Fetch the instruction where we will return to and see if
5715 it's an unimp instruction (the most significant 10 bits
5717 emit_move_insn (scratch, gen_rtx_MEM (SImode,
5718 plus_constant (ret_rtx, 8)));
5719 /* Assume the size is valid and pre-adjust */
5720 emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5721 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
5722 emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5723 /* Assign stack temp:
5724 Write the address of the memory pointed to by temp_val into
5725 the memory pointed to by mem */
5726 emit_move_insn (mem, XEXP (temp_val, 0));
5727 emit_label (endlab);
5730 set_mem_alias_set (mem, struct_value_alias_set);
5735 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5736 For v9, function return values are subject to the same rules as arguments,
5737 except that up to 32 bytes may be returned in registers. */
5740 function_value (const_tree type, enum machine_mode mode, int incoming_p)
5742 /* Beware that the two values are swapped here wrt function_arg. */
5743 int regbase = (incoming_p
5744 ? SPARC_OUTGOING_INT_ARG_FIRST
5745 : SPARC_INCOMING_INT_ARG_FIRST);
5746 enum mode_class mclass = GET_MODE_CLASS (mode);
5749 /* Vector types deserve special treatment because they are polymorphic wrt
5750 their mode, depending upon whether VIS instructions are enabled. */
5751 if (type && TREE_CODE (type) == VECTOR_TYPE)
5753 HOST_WIDE_INT size = int_size_in_bytes (type);
5754 gcc_assert ((TARGET_ARCH32 && size <= 8)
5755 || (TARGET_ARCH64 && size <= 32));
5757 if (mode == BLKmode)
5758 return function_arg_vector_value (size,
5759 SPARC_FP_ARG_FIRST);
5761 mclass = MODE_FLOAT;
5764 if (TARGET_ARCH64 && type)
5766 /* Structures up to 32 bytes in size are returned in registers. */
5767 if (TREE_CODE (type) == RECORD_TYPE)
5769 HOST_WIDE_INT size = int_size_in_bytes (type);
5770 gcc_assert (size <= 32);
5772 return function_arg_record_value (type, mode, 0, 1, regbase);
5775 /* Unions up to 32 bytes in size are returned in integer registers. */
5776 else if (TREE_CODE (type) == UNION_TYPE)
5778 HOST_WIDE_INT size = int_size_in_bytes (type);
5779 gcc_assert (size <= 32);
5781 return function_arg_union_value (size, mode, 0, regbase);
5784 /* Objects that require it are returned in FP registers. */
5785 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5788 /* All other aggregate types are returned in an integer register in a
5789 mode corresponding to the size of the type. */
5790 else if (AGGREGATE_TYPE_P (type))
5792 /* All other aggregate types are passed in an integer register
5793 in a mode corresponding to the size of the type. */
5794 HOST_WIDE_INT size = int_size_in_bytes (type);
5795 gcc_assert (size <= 32);
5797 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5799 /* ??? We probably should have made the same ABI change in
5800 3.4.0 as the one we made for unions. The latter was
5801 required by the SCD though, while the former is not
5802 specified, so we favored compatibility and efficiency.
5804 Now we're stuck for aggregates larger than 16 bytes,
5805 because OImode vanished in the meantime. Let's not
5806 try to be unduly clever, and simply follow the ABI
5807 for unions in that case. */
5808 if (mode == BLKmode)
5809 return function_arg_union_value (size, mode, 0, regbase);
5814 /* This must match sparc_promote_function_mode.
5815 ??? Maybe 32-bit pointers should actually remain in Pmode? */
5816 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5820 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
5821 regno = SPARC_FP_ARG_FIRST;
5825 return gen_rtx_REG (mode, regno);
5828 /* Do what is necessary for `va_start'. We look at the current function
5829 to determine if stdarg or varargs is used and return the address of
5830 the first unnamed parameter. */
5833 sparc_builtin_saveregs (void)
5835 int first_reg = crtl->args.info.words;
5839 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5840 emit_move_insn (gen_rtx_MEM (word_mode,
5841 gen_rtx_PLUS (Pmode,
5843 GEN_INT (FIRST_PARM_OFFSET (0)
5846 gen_rtx_REG (word_mode,
5847 SPARC_INCOMING_INT_ARG_FIRST + regno));
5849 address = gen_rtx_PLUS (Pmode,
5851 GEN_INT (FIRST_PARM_OFFSET (0)
5852 + UNITS_PER_WORD * first_reg));
5857 /* Implement `va_start' for stdarg. */
5860 sparc_va_start (tree valist, rtx nextarg)
5862 nextarg = expand_builtin_saveregs ();
5863 std_expand_builtin_va_start (valist, nextarg);
5866 /* Implement `va_arg' for stdarg. */
5869 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
5872 HOST_WIDE_INT size, rsize, align;
5875 tree ptrtype = build_pointer_type (type);
5877 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5880 size = rsize = UNITS_PER_WORD;
5886 size = int_size_in_bytes (type);
5887 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5892 /* For SPARC64, objects requiring 16-byte alignment get it. */
5893 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5894 align = 2 * UNITS_PER_WORD;
5896 /* SPARC-V9 ABI states that structures up to 16 bytes in size
5897 are left-justified in their slots. */
5898 if (AGGREGATE_TYPE_P (type))
5901 size = rsize = UNITS_PER_WORD;
5911 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5912 size_int (align - 1));
5913 incr = fold_convert (sizetype, incr);
5914 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
5916 incr = fold_convert (ptr_type_node, incr);
5919 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5922 if (BYTES_BIG_ENDIAN && size < rsize)
5923 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5924 size_int (rsize - size));
5928 addr = fold_convert (build_pointer_type (ptrtype), addr);
5929 addr = build_va_arg_indirect_ref (addr);
5932 /* If the address isn't aligned properly for the type, we need a temporary.
5933 FIXME: This is inefficient, usually we can do this in registers. */
5934 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
5936 tree tmp = create_tmp_var (type, "va_arg_tmp");
5937 tree dest_addr = build_fold_addr_expr (tmp);
5938 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
5939 3, dest_addr, addr, size_int (rsize));
5940 TREE_ADDRESSABLE (tmp) = 1;
5941 gimplify_and_add (copy, pre_p);
5946 addr = fold_convert (ptrtype, addr);
5949 = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
5950 gimplify_assign (valist, incr, post_p);
5952 return build_va_arg_indirect_ref (addr);
5955 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5956 Specify whether the vector mode is supported by the hardware. */
5959 sparc_vector_mode_supported_p (enum machine_mode mode)
5961 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5964 /* Return the string to output an unconditional branch to LABEL, which is
5965 the operand number of the label.
5967 DEST is the destination insn (i.e. the label), INSN is the source. */
5970 output_ubranch (rtx dest, int label, rtx insn)
5972 static char string[64];
5973 bool v9_form = false;
5976 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5978 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5979 - INSN_ADDRESSES (INSN_UID (insn)));
5980 /* Leave some instructions for "slop". */
5981 if (delta >= -260000 && delta < 260000)
5986 strcpy (string, "ba%*,pt\t%%xcc, ");
5988 strcpy (string, "b%*\t");
5990 p = strchr (string, '\0');
6001 /* Return the string to output a conditional branch to LABEL, which is
6002 the operand number of the label. OP is the conditional expression.
6003 XEXP (OP, 0) is assumed to be a condition code register (integer or
6004 floating point) and its mode specifies what kind of comparison we made.
6006 DEST is the destination insn (i.e. the label), INSN is the source.
6008 REVERSED is nonzero if we should reverse the sense of the comparison.
6010 ANNUL is nonzero if we should generate an annulling branch. */
6013 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
6016 static char string[64];
6017 enum rtx_code code = GET_CODE (op);
6018 rtx cc_reg = XEXP (op, 0);
6019 enum machine_mode mode = GET_MODE (cc_reg);
6020 const char *labelno, *branch;
6021 int spaces = 8, far;
6024 /* v9 branches are limited to +-1MB. If it is too far away,
6037 fbne,a,pn %fcc2, .LC29
6045 far = TARGET_V9 && (get_attr_length (insn) >= 3);
6048 /* Reversal of FP compares takes care -- an ordered compare
6049 becomes an unordered compare and vice versa. */
6050 if (mode == CCFPmode || mode == CCFPEmode)
6051 code = reverse_condition_maybe_unordered (code);
6053 code = reverse_condition (code);
6056 /* Start by writing the branch condition. */
6057 if (mode == CCFPmode || mode == CCFPEmode)
6108 /* ??? !v9: FP branches cannot be preceded by another floating point
6109 insn. Because there is currently no concept of pre-delay slots,
6110 we can fix this only by always emitting a nop before a floating
6115 strcpy (string, "nop\n\t");
6116 strcat (string, branch);
6129 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6141 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6162 strcpy (string, branch);
6164 spaces -= strlen (branch);
6165 p = strchr (string, '\0');
6167 /* Now add the annulling, the label, and a possible noop. */
6180 if (! far && insn && INSN_ADDRESSES_SET_P ())
6182 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6183 - INSN_ADDRESSES (INSN_UID (insn)));
6184 /* Leave some instructions for "slop". */
6185 if (delta < -260000 || delta >= 260000)
6189 if (mode == CCFPmode || mode == CCFPEmode)
6191 static char v9_fcc_labelno[] = "%%fccX, ";
6192 /* Set the char indicating the number of the fcc reg to use. */
6193 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6194 labelno = v9_fcc_labelno;
6197 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6201 else if (mode == CCXmode || mode == CCX_NOOVmode)
6203 labelno = "%%xcc, ";
6208 labelno = "%%icc, ";
6213 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6216 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6229 strcpy (p, labelno);
6230 p = strchr (p, '\0');
6233 strcpy (p, ".+12\n\t nop\n\tb\t");
6234 /* Skip the next insn if requested or
6235 if we know that it will be a nop. */
6236 if (annul || ! final_sequence)
6250 /* Emit a library call comparison between floating point X and Y.
6251 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
6252 Return the new operator to be used in the comparison sequence.
6254 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6255 values as arguments instead of the TFmode registers themselves,
6256 that's why we cannot call emit_float_lib_cmp. */
6259 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6262 rtx slot0, slot1, result, tem, tem2, libfunc;
6263 enum machine_mode mode;
6264 enum rtx_code new_comparison;
6269 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
6273 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
6277 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
6281 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
6285 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
6289 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
6300 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
6313 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6314 emit_move_insn (slot0, x);
6321 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6322 emit_move_insn (slot1, y);
6325 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6326 emit_library_call (libfunc, LCT_NORMAL,
6328 XEXP (slot0, 0), Pmode,
6329 XEXP (slot1, 0), Pmode);
6334 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6335 emit_library_call (libfunc, LCT_NORMAL,
6337 x, TFmode, y, TFmode);
6342 /* Immediately move the result of the libcall into a pseudo
6343 register so reload doesn't clobber the value if it needs
6344 the return register for a spill reg. */
6345 result = gen_reg_rtx (mode);
6346 emit_move_insn (result, hard_libcall_value (mode, libfunc));
6351 return gen_rtx_NE (VOIDmode, result, const0_rtx);
6354 new_comparison = (comparison == UNORDERED ? EQ : NE);
6355 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
6358 new_comparison = (comparison == UNGT ? GT : NE);
6359 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
6361 return gen_rtx_NE (VOIDmode, result, const2_rtx);
6363 tem = gen_reg_rtx (mode);
6365 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6367 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6368 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
6371 tem = gen_reg_rtx (mode);
6373 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6375 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6376 tem2 = gen_reg_rtx (mode);
6378 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6380 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6381 new_comparison = (comparison == UNEQ ? EQ : NE);
6382 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
6388 /* Generate an unsigned DImode to FP conversion. This is the same code
6389 optabs would emit if we didn't have TFmode patterns. */
6392 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6394 rtx neglab, donelab, i0, i1, f0, in, out;
6397 in = force_reg (DImode, operands[1]);
6398 neglab = gen_label_rtx ();
6399 donelab = gen_label_rtx ();
6400 i0 = gen_reg_rtx (DImode);
6401 i1 = gen_reg_rtx (DImode);
6402 f0 = gen_reg_rtx (mode);
6404 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6406 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6407 emit_jump_insn (gen_jump (donelab));
6410 emit_label (neglab);
6412 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6413 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6414 emit_insn (gen_iordi3 (i0, i0, i1));
6415 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6416 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6418 emit_label (donelab);
6421 /* Generate an FP to unsigned DImode conversion. This is the same code
6422 optabs would emit if we didn't have TFmode patterns. */
6425 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6427 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6430 in = force_reg (mode, operands[1]);
6431 neglab = gen_label_rtx ();
6432 donelab = gen_label_rtx ();
6433 i0 = gen_reg_rtx (DImode);
6434 i1 = gen_reg_rtx (DImode);
6435 limit = gen_reg_rtx (mode);
6436 f0 = gen_reg_rtx (mode);
6438 emit_move_insn (limit,
6439 CONST_DOUBLE_FROM_REAL_VALUE (
6440 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6441 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6443 emit_insn (gen_rtx_SET (VOIDmode,
6445 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6446 emit_jump_insn (gen_jump (donelab));
6449 emit_label (neglab);
6451 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6452 emit_insn (gen_rtx_SET (VOIDmode,
6454 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6455 emit_insn (gen_movdi (i1, const1_rtx));
6456 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6457 emit_insn (gen_xordi3 (out, i0, i1));
6459 emit_label (donelab);
6462 /* Return the string to output a conditional branch to LABEL, testing
6463 register REG. LABEL is the operand number of the label; REG is the
6464 operand number of the reg. OP is the conditional expression. The mode
6465 of REG says what kind of comparison we made.
6467 DEST is the destination insn (i.e. the label), INSN is the source.
6469 REVERSED is nonzero if we should reverse the sense of the comparison.
6471 ANNUL is nonzero if we should generate an annulling branch. */
6474 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6475 int annul, rtx insn)
6477 static char string[64];
6478 enum rtx_code code = GET_CODE (op);
6479 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6484 /* branch on register are limited to +-128KB. If it is too far away,
6497 brgez,a,pn %o1, .LC29
6503 ba,pt %xcc, .LC29 */
6505 far = get_attr_length (insn) >= 3;
6507 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6509 code = reverse_condition (code);
6511 /* Only 64 bit versions of these instructions exist. */
6512 gcc_assert (mode == DImode);
6514 /* Start by writing the branch condition. */
6519 strcpy (string, "brnz");
6523 strcpy (string, "brz");
6527 strcpy (string, "brgez");
6531 strcpy (string, "brlz");
6535 strcpy (string, "brlez");
6539 strcpy (string, "brgz");
6546 p = strchr (string, '\0');
6548 /* Now add the annulling, reg, label, and nop. */
6555 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6558 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6563 *p = p < string + 8 ? '\t' : ' ';
6571 int veryfar = 1, delta;
6573 if (INSN_ADDRESSES_SET_P ())
6575 delta = (INSN_ADDRESSES (INSN_UID (dest))
6576 - INSN_ADDRESSES (INSN_UID (insn)));
6577 /* Leave some instructions for "slop". */
6578 if (delta >= -260000 && delta < 260000)
6582 strcpy (p, ".+12\n\t nop\n\t");
6583 /* Skip the next insn if requested or
6584 if we know that it will be a nop. */
6585 if (annul || ! final_sequence)
6595 strcpy (p, "ba,pt\t%%xcc, ");
6609 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6610 Such instructions cannot be used in the delay slot of return insn on v9.
6611 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6615 epilogue_renumber (register rtx *where, int test)
6617 register const char *fmt;
6619 register enum rtx_code code;
6624 code = GET_CODE (*where);
6629 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6631 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6632 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6640 /* Do not replace the frame pointer with the stack pointer because
6641 it can cause the delayed instruction to load below the stack.
6642 This occurs when instructions like:
6644 (set (reg/i:SI 24 %i0)
6645 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6646 (const_int -20 [0xffffffec])) 0))
6648 are in the return delayed slot. */
6650 if (GET_CODE (XEXP (*where, 0)) == REG
6651 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6652 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6653 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6658 if (SPARC_STACK_BIAS
6659 && GET_CODE (XEXP (*where, 0)) == REG
6660 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6668 fmt = GET_RTX_FORMAT (code);
6670 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6675 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6676 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6679 else if (fmt[i] == 'e'
6680 && epilogue_renumber (&(XEXP (*where, i)), test))
6686 /* Leaf functions and non-leaf functions have different needs. */
6689 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6692 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6694 static const int *const reg_alloc_orders[] = {
6695 reg_leaf_alloc_order,
6696 reg_nonleaf_alloc_order};
6699 order_regs_for_local_alloc (void)
6701 static int last_order_nonleaf = 1;
6703 if (df_regs_ever_live_p (15) != last_order_nonleaf)
6705 last_order_nonleaf = !last_order_nonleaf;
6706 memcpy ((char *) reg_alloc_order,
6707 (const char *) reg_alloc_orders[last_order_nonleaf],
6708 FIRST_PSEUDO_REGISTER * sizeof (int));
6712 /* Return 1 if REG and MEM are legitimate enough to allow the various
6713 mem<-->reg splits to be run. */
6716 sparc_splitdi_legitimate (rtx reg, rtx mem)
6718 /* Punt if we are here by mistake. */
6719 gcc_assert (reload_completed);
6721 /* We must have an offsettable memory reference. */
6722 if (! offsettable_memref_p (mem))
6725 /* If we have legitimate args for ldd/std, we do not want
6726 the split to happen. */
6727 if ((REGNO (reg) % 2) == 0
6728 && mem_min_alignment (mem, 8))
6735 /* Return 1 if x and y are some kind of REG and they refer to
6736 different hard registers. This test is guaranteed to be
6737 run after reload. */
6740 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6742 if (GET_CODE (x) != REG)
6744 if (GET_CODE (y) != REG)
6746 if (REGNO (x) == REGNO (y))
6751 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6752 This makes them candidates for using ldd and std insns.
6754 Note reg1 and reg2 *must* be hard registers. */
6757 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6759 /* We might have been passed a SUBREG. */
6760 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6763 if (REGNO (reg1) % 2 != 0)
6766 /* Integer ldd is deprecated in SPARC V9 */
6767 if (TARGET_V9 && REGNO (reg1) < 32)
6770 return (REGNO (reg1) == REGNO (reg2) - 1);
6773 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6776 This can only happen when addr1 and addr2, the addresses in mem1
6777 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6778 addr1 must also be aligned on a 64-bit boundary.
6780 Also iff dependent_reg_rtx is not null it should not be used to
6781 compute the address for mem1, i.e. we cannot optimize a sequence
6793 But, note that the transformation from:
6798 is perfectly fine. Thus, the peephole2 patterns always pass us
6799 the destination register of the first load, never the second one.
6801 For stores we don't have a similar problem, so dependent_reg_rtx is
6805 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6809 HOST_WIDE_INT offset1;
6811 /* The mems cannot be volatile. */
6812 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6815 /* MEM1 should be aligned on a 64-bit boundary. */
6816 if (MEM_ALIGN (mem1) < 64)
6819 addr1 = XEXP (mem1, 0);
6820 addr2 = XEXP (mem2, 0);
6822 /* Extract a register number and offset (if used) from the first addr. */
6823 if (GET_CODE (addr1) == PLUS)
6825 /* If not a REG, return zero. */
6826 if (GET_CODE (XEXP (addr1, 0)) != REG)
6830 reg1 = REGNO (XEXP (addr1, 0));
6831 /* The offset must be constant! */
6832 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6834 offset1 = INTVAL (XEXP (addr1, 1));
6837 else if (GET_CODE (addr1) != REG)
6841 reg1 = REGNO (addr1);
6842 /* This was a simple (mem (reg)) expression. Offset is 0. */
6846 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6847 if (GET_CODE (addr2) != PLUS)
6850 if (GET_CODE (XEXP (addr2, 0)) != REG
6851 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6854 if (reg1 != REGNO (XEXP (addr2, 0)))
6857 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6860 /* The first offset must be evenly divisible by 8 to ensure the
6861 address is 64 bit aligned. */
6862 if (offset1 % 8 != 0)
6865 /* The offset for the second addr must be 4 more than the first addr. */
6866 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6869 /* All the tests passed. addr1 and addr2 are valid for ldd and std
6874 /* Return 1 if reg is a pseudo, or is the first register in
6875 a hard register pair. This makes it suitable for use in
6876 ldd and std insns. */
6879 register_ok_for_ldd (rtx reg)
6881 /* We might have been passed a SUBREG. */
6885 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6886 return (REGNO (reg) % 2 == 0);
6891 /* Return 1 if OP is a memory whose address is known to be
6892 aligned to 8-byte boundary, or a pseudo during reload.
6893 This makes it suitable for use in ldd and std insns. */
6896 memory_ok_for_ldd (rtx op)
6900 /* In 64-bit mode, we assume that the address is word-aligned. */
6901 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
6904 if ((reload_in_progress || reload_completed)
6905 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
6908 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
6910 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
6919 /* Print operand X (an rtx) in assembler syntax to file FILE.
6920 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6921 For `%' followed by punctuation, CODE is the punctuation and X is null. */
6924 print_operand (FILE *file, rtx x, int code)
6929 /* Output an insn in a delay slot. */
6931 sparc_indent_opcode = 1;
6933 fputs ("\n\t nop", file);
6936 /* Output an annul flag if there's nothing for the delay slot and we
6937 are optimizing. This is always used with '(' below.
6938 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6939 this is a dbx bug. So, we only do this when optimizing.
6940 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6941 Always emit a nop in case the next instruction is a branch. */
6942 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6946 /* Output a 'nop' if there's nothing for the delay slot and we are
6947 not optimizing. This is always used with '*' above. */
6948 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6949 fputs ("\n\t nop", file);
6950 else if (final_sequence)
6951 sparc_indent_opcode = 1;
6954 /* Output the right displacement from the saved PC on function return.
6955 The caller may have placed an "unimp" insn immediately after the call
6956 so we have to account for it. This insn is used in the 32-bit ABI
6957 when calling a function that returns a non zero-sized structure. The
6958 64-bit ABI doesn't have it. Be careful to have this test be the same
6959 as that used on the call. The exception here is that when
6960 sparc_std_struct_return is enabled, the psABI is followed exactly
6961 and the adjustment is made by the code in sparc_struct_value_rtx.
6962 The call emitted is the same when sparc_std_struct_return is
6965 && cfun->returns_struct
6966 && ! sparc_std_struct_return
6967 && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6969 && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6975 /* Output the Embedded Medium/Anywhere code model base register. */
6976 fputs (EMBMEDANY_BASE_REG, file);
6979 /* Print some local dynamic TLS name. */
6980 assemble_name (file, get_some_local_dynamic_name ());
6984 /* Adjust the operand to take into account a RESTORE operation. */
6985 if (GET_CODE (x) == CONST_INT)
6987 else if (GET_CODE (x) != REG)
6988 output_operand_lossage ("invalid %%Y operand");
6989 else if (REGNO (x) < 8)
6990 fputs (reg_names[REGNO (x)], file);
6991 else if (REGNO (x) >= 24 && REGNO (x) < 32)
6992 fputs (reg_names[REGNO (x)-16], file);
6994 output_operand_lossage ("invalid %%Y operand");
6997 /* Print out the low order register name of a register pair. */
6998 if (WORDS_BIG_ENDIAN)
6999 fputs (reg_names[REGNO (x)+1], file);
7001 fputs (reg_names[REGNO (x)], file);
7004 /* Print out the high order register name of a register pair. */
7005 if (WORDS_BIG_ENDIAN)
7006 fputs (reg_names[REGNO (x)], file);
7008 fputs (reg_names[REGNO (x)+1], file);
7011 /* Print out the second register name of a register pair or quad.
7012 I.e., R (%o0) => %o1. */
7013 fputs (reg_names[REGNO (x)+1], file);
7016 /* Print out the third register name of a register quad.
7017 I.e., S (%o0) => %o2. */
7018 fputs (reg_names[REGNO (x)+2], file);
7021 /* Print out the fourth register name of a register quad.
7022 I.e., T (%o0) => %o3. */
7023 fputs (reg_names[REGNO (x)+3], file);
7026 /* Print a condition code register. */
7027 if (REGNO (x) == SPARC_ICC_REG)
7029 /* We don't handle CC[X]_NOOVmode because they're not supposed
7031 if (GET_MODE (x) == CCmode)
7032 fputs ("%icc", file);
7033 else if (GET_MODE (x) == CCXmode)
7034 fputs ("%xcc", file);
7039 /* %fccN register */
7040 fputs (reg_names[REGNO (x)], file);
7043 /* Print the operand's address only. */
7044 output_address (XEXP (x, 0));
7047 /* In this case we need a register. Use %g0 if the
7048 operand is const0_rtx. */
7050 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
7052 fputs ("%g0", file);
7059 switch (GET_CODE (x))
7061 case IOR: fputs ("or", file); break;
7062 case AND: fputs ("and", file); break;
7063 case XOR: fputs ("xor", file); break;
7064 default: output_operand_lossage ("invalid %%A operand");
7069 switch (GET_CODE (x))
7071 case IOR: fputs ("orn", file); break;
7072 case AND: fputs ("andn", file); break;
7073 case XOR: fputs ("xnor", file); break;
7074 default: output_operand_lossage ("invalid %%B operand");
7078 /* These are used by the conditional move instructions. */
7082 enum rtx_code rc = GET_CODE (x);
7086 enum machine_mode mode = GET_MODE (XEXP (x, 0));
7087 if (mode == CCFPmode || mode == CCFPEmode)
7088 rc = reverse_condition_maybe_unordered (GET_CODE (x));
7090 rc = reverse_condition (GET_CODE (x));
7094 case NE: fputs ("ne", file); break;
7095 case EQ: fputs ("e", file); break;
7096 case GE: fputs ("ge", file); break;
7097 case GT: fputs ("g", file); break;
7098 case LE: fputs ("le", file); break;
7099 case LT: fputs ("l", file); break;
7100 case GEU: fputs ("geu", file); break;
7101 case GTU: fputs ("gu", file); break;
7102 case LEU: fputs ("leu", file); break;
7103 case LTU: fputs ("lu", file); break;
7104 case LTGT: fputs ("lg", file); break;
7105 case UNORDERED: fputs ("u", file); break;
7106 case ORDERED: fputs ("o", file); break;
7107 case UNLT: fputs ("ul", file); break;
7108 case UNLE: fputs ("ule", file); break;
7109 case UNGT: fputs ("ug", file); break;
7110 case UNGE: fputs ("uge", file); break;
7111 case UNEQ: fputs ("ue", file); break;
7112 default: output_operand_lossage (code == 'c'
7113 ? "invalid %%c operand"
7114 : "invalid %%C operand");
7119 /* These are used by the movr instruction pattern. */
7123 enum rtx_code rc = (code == 'd'
7124 ? reverse_condition (GET_CODE (x))
7128 case NE: fputs ("ne", file); break;
7129 case EQ: fputs ("e", file); break;
7130 case GE: fputs ("gez", file); break;
7131 case LT: fputs ("lz", file); break;
7132 case LE: fputs ("lez", file); break;
7133 case GT: fputs ("gz", file); break;
7134 default: output_operand_lossage (code == 'd'
7135 ? "invalid %%d operand"
7136 : "invalid %%D operand");
7143 /* Print a sign-extended character. */
7144 int i = trunc_int_for_mode (INTVAL (x), QImode);
7145 fprintf (file, "%d", i);
7150 /* Operand must be a MEM; write its address. */
7151 if (GET_CODE (x) != MEM)
7152 output_operand_lossage ("invalid %%f operand");
7153 output_address (XEXP (x, 0));
7158 /* Print a sign-extended 32-bit value. */
7160 if (GET_CODE(x) == CONST_INT)
7162 else if (GET_CODE(x) == CONST_DOUBLE)
7163 i = CONST_DOUBLE_LOW (x);
7166 output_operand_lossage ("invalid %%s operand");
7169 i = trunc_int_for_mode (i, SImode);
7170 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7175 /* Do nothing special. */
7179 /* Undocumented flag. */
7180 output_operand_lossage ("invalid operand output code");
7183 if (GET_CODE (x) == REG)
7184 fputs (reg_names[REGNO (x)], file);
7185 else if (GET_CODE (x) == MEM)
7188 /* Poor Sun assembler doesn't understand absolute addressing. */
7189 if (CONSTANT_P (XEXP (x, 0)))
7190 fputs ("%g0+", file);
7191 output_address (XEXP (x, 0));
7194 else if (GET_CODE (x) == HIGH)
7196 fputs ("%hi(", file);
7197 output_addr_const (file, XEXP (x, 0));
7200 else if (GET_CODE (x) == LO_SUM)
7202 print_operand (file, XEXP (x, 0), 0);
7203 if (TARGET_CM_MEDMID)
7204 fputs ("+%l44(", file);
7206 fputs ("+%lo(", file);
7207 output_addr_const (file, XEXP (x, 1));
7210 else if (GET_CODE (x) == CONST_DOUBLE
7211 && (GET_MODE (x) == VOIDmode
7212 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7214 if (CONST_DOUBLE_HIGH (x) == 0)
7215 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7216 else if (CONST_DOUBLE_HIGH (x) == -1
7217 && CONST_DOUBLE_LOW (x) < 0)
7218 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7220 output_operand_lossage ("long long constant not a valid immediate operand");
7222 else if (GET_CODE (x) == CONST_DOUBLE)
7223 output_operand_lossage ("floating point constant not a valid immediate operand");
7224 else { output_addr_const (file, x); }
7227 /* Target hook for assembling integer objects. The sparc version has
7228 special handling for aligned DI-mode objects. */
7231 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7233 /* ??? We only output .xword's for symbols and only then in environments
7234 where the assembler can handle them. */
7235 if (aligned_p && size == 8
7236 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7240 assemble_integer_with_op ("\t.xword\t", x);
7245 assemble_aligned_integer (4, const0_rtx);
7246 assemble_aligned_integer (4, x);
7250 return default_assemble_integer (x, size, aligned_p);
7253 /* Return the value of a code used in the .proc pseudo-op that says
7254 what kind of result this function returns. For non-C types, we pick
7255 the closest C type. */
7257 #ifndef SHORT_TYPE_SIZE
7258 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7261 #ifndef INT_TYPE_SIZE
7262 #define INT_TYPE_SIZE BITS_PER_WORD
7265 #ifndef LONG_TYPE_SIZE
7266 #define LONG_TYPE_SIZE BITS_PER_WORD
7269 #ifndef LONG_LONG_TYPE_SIZE
7270 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7273 #ifndef FLOAT_TYPE_SIZE
7274 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7277 #ifndef DOUBLE_TYPE_SIZE
7278 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7281 #ifndef LONG_DOUBLE_TYPE_SIZE
7282 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7286 sparc_type_code (register tree type)
7288 register unsigned long qualifiers = 0;
7289 register unsigned shift;
7291 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7292 setting more, since some assemblers will give an error for this. Also,
7293 we must be careful to avoid shifts of 32 bits or more to avoid getting
7294 unpredictable results. */
7296 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7298 switch (TREE_CODE (type))
7304 qualifiers |= (3 << shift);
7309 qualifiers |= (2 << shift);
7313 case REFERENCE_TYPE:
7315 qualifiers |= (1 << shift);
7319 return (qualifiers | 8);
7322 case QUAL_UNION_TYPE:
7323 return (qualifiers | 9);
7326 return (qualifiers | 10);
7329 return (qualifiers | 16);
7332 /* If this is a range type, consider it to be the underlying
7334 if (TREE_TYPE (type) != 0)
7337 /* Carefully distinguish all the standard types of C,
7338 without messing up if the language is not C. We do this by
7339 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7340 look at both the names and the above fields, but that's redundant.
7341 Any type whose size is between two C types will be considered
7342 to be the wider of the two types. Also, we do not have a
7343 special code to use for "long long", so anything wider than
7344 long is treated the same. Note that we can't distinguish
7345 between "int" and "long" in this code if they are the same
7346 size, but that's fine, since neither can the assembler. */
7348 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7349 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7351 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7352 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7354 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7355 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7358 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7361 /* If this is a range type, consider it to be the underlying
7363 if (TREE_TYPE (type) != 0)
7366 /* Carefully distinguish all the standard types of C,
7367 without messing up if the language is not C. */
7369 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7370 return (qualifiers | 6);
7373 return (qualifiers | 7);
7375 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7376 /* ??? We need to distinguish between double and float complex types,
7377 but I don't know how yet because I can't reach this code from
7378 existing front-ends. */
7379 return (qualifiers | 7); /* Who knows? */
7382 case BOOLEAN_TYPE: /* Boolean truth value type. */
7383 case LANG_TYPE: /* ? */
7387 gcc_unreachable (); /* Not a type! */
7394 /* Nested function support. */
7396 /* Emit RTL insns to initialize the variable parts of a trampoline.
7397 FNADDR is an RTX for the address of the function's pure code.
7398 CXT is an RTX for the static chain value for the function.
7400 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7401 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7402 (to store insns). This is a bit excessive. Perhaps a different
7403 mechanism would be better here.
7405 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7408 sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7410 /* SPARC 32-bit trampoline:
7413 sethi %hi(static), %g2
7415 or %g2, %lo(static), %g2
7417 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7418 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7422 (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
7423 expand_binop (SImode, ior_optab,
7424 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7425 size_int (10), 0, 1),
7426 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7427 NULL_RTX, 1, OPTAB_DIRECT));
7430 (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7431 expand_binop (SImode, ior_optab,
7432 expand_shift (RSHIFT_EXPR, SImode, cxt,
7433 size_int (10), 0, 1),
7434 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7435 NULL_RTX, 1, OPTAB_DIRECT));
7438 (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7439 expand_binop (SImode, ior_optab,
7440 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7441 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7442 NULL_RTX, 1, OPTAB_DIRECT));
7445 (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7446 expand_binop (SImode, ior_optab,
7447 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7448 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7449 NULL_RTX, 1, OPTAB_DIRECT));
7451 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7452 aligned on a 16 byte boundary so one flush clears it all. */
7453 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
7454 if (sparc_cpu != PROCESSOR_ULTRASPARC
7455 && sparc_cpu != PROCESSOR_ULTRASPARC3
7456 && sparc_cpu != PROCESSOR_NIAGARA
7457 && sparc_cpu != PROCESSOR_NIAGARA2)
7458 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
7459 plus_constant (tramp, 8)))));
7461 /* Call __enable_execute_stack after writing onto the stack to make sure
7462 the stack address is accessible. */
7463 #ifdef ENABLE_EXECUTE_STACK
7464 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7465 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7470 /* The 64-bit version is simpler because it makes more sense to load the
7471 values as "immediate" data out of the trampoline. It's also easier since
7472 we can read the PC without clobbering a register. */
7475 sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7477 /* SPARC 64-bit trampoline:
7486 emit_move_insn (gen_rtx_MEM (SImode, tramp),
7487 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7488 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7489 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7490 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7491 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7492 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7493 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7494 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
7495 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
7496 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
7498 if (sparc_cpu != PROCESSOR_ULTRASPARC
7499 && sparc_cpu != PROCESSOR_ULTRASPARC3
7500 && sparc_cpu != PROCESSOR_NIAGARA
7501 && sparc_cpu != PROCESSOR_NIAGARA2)
7502 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
7504 /* Call __enable_execute_stack after writing onto the stack to make sure
7505 the stack address is accessible. */
7506 #ifdef ENABLE_EXECUTE_STACK
7507 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7508 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7512 /* Adjust the cost of a scheduling dependency. Return the new cost of
7513 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7516 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7518 enum attr_type insn_type;
7520 if (! recog_memoized (insn))
7523 insn_type = get_attr_type (insn);
7525 if (REG_NOTE_KIND (link) == 0)
7527 /* Data dependency; DEP_INSN writes a register that INSN reads some
7530 /* if a load, then the dependence must be on the memory address;
7531 add an extra "cycle". Note that the cost could be two cycles
7532 if the reg was written late in an instruction group; we ca not tell
7534 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7537 /* Get the delay only if the address of the store is the dependence. */
7538 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7540 rtx pat = PATTERN(insn);
7541 rtx dep_pat = PATTERN (dep_insn);
7543 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7544 return cost; /* This should not happen! */
7546 /* The dependency between the two instructions was on the data that
7547 is being stored. Assume that this implies that the address of the
7548 store is not dependent. */
7549 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7552 return cost + 3; /* An approximation. */
7555 /* A shift instruction cannot receive its data from an instruction
7556 in the same cycle; add a one cycle penalty. */
7557 if (insn_type == TYPE_SHIFT)
7558 return cost + 3; /* Split before cascade into shift. */
7562 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7563 INSN writes some cycles later. */
7565 /* These are only significant for the fpu unit; writing a fp reg before
7566 the fpu has finished with it stalls the processor. */
7568 /* Reusing an integer register causes no problems. */
7569 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7577 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7579 enum attr_type insn_type, dep_type;
7580 rtx pat = PATTERN(insn);
7581 rtx dep_pat = PATTERN (dep_insn);
7583 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7586 insn_type = get_attr_type (insn);
7587 dep_type = get_attr_type (dep_insn);
7589 switch (REG_NOTE_KIND (link))
7592 /* Data dependency; DEP_INSN writes a register that INSN reads some
7599 /* Get the delay iff the address of the store is the dependence. */
7600 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7603 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7610 /* If a load, then the dependence must be on the memory address. If
7611 the addresses aren't equal, then it might be a false dependency */
7612 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7614 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7615 || GET_CODE (SET_DEST (dep_pat)) != MEM
7616 || GET_CODE (SET_SRC (pat)) != MEM
7617 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7618 XEXP (SET_SRC (pat), 0)))
7626 /* Compare to branch latency is 0. There is no benefit from
7627 separating compare and branch. */
7628 if (dep_type == TYPE_COMPARE)
7630 /* Floating point compare to branch latency is less than
7631 compare to conditional move. */
7632 if (dep_type == TYPE_FPCMP)
7641 /* Anti-dependencies only penalize the fpu unit. */
7642 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7654 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7658 case PROCESSOR_SUPERSPARC:
7659 cost = supersparc_adjust_cost (insn, link, dep, cost);
7661 case PROCESSOR_HYPERSPARC:
7662 case PROCESSOR_SPARCLITE86X:
7663 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7672 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7673 int sched_verbose ATTRIBUTE_UNUSED,
7674 int max_ready ATTRIBUTE_UNUSED)
7679 sparc_use_sched_lookahead (void)
7681 if (sparc_cpu == PROCESSOR_NIAGARA
7682 || sparc_cpu == PROCESSOR_NIAGARA2)
7684 if (sparc_cpu == PROCESSOR_ULTRASPARC
7685 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7687 if ((1 << sparc_cpu) &
7688 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7689 (1 << PROCESSOR_SPARCLITE86X)))
7695 sparc_issue_rate (void)
7699 case PROCESSOR_NIAGARA:
7700 case PROCESSOR_NIAGARA2:
7704 /* Assume V9 processors are capable of at least dual-issue. */
7706 case PROCESSOR_SUPERSPARC:
7708 case PROCESSOR_HYPERSPARC:
7709 case PROCESSOR_SPARCLITE86X:
7711 case PROCESSOR_ULTRASPARC:
7712 case PROCESSOR_ULTRASPARC3:
7718 set_extends (rtx insn)
7720 register rtx pat = PATTERN (insn);
7722 switch (GET_CODE (SET_SRC (pat)))
7724 /* Load and some shift instructions zero extend. */
7727 /* sethi clears the high bits */
7729 /* LO_SUM is used with sethi. sethi cleared the high
7730 bits and the values used with lo_sum are positive */
7732 /* Store flag stores 0 or 1 */
7742 rtx op0 = XEXP (SET_SRC (pat), 0);
7743 rtx op1 = XEXP (SET_SRC (pat), 1);
7744 if (GET_CODE (op1) == CONST_INT)
7745 return INTVAL (op1) >= 0;
7746 if (GET_CODE (op0) != REG)
7748 if (sparc_check_64 (op0, insn) == 1)
7750 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7755 rtx op0 = XEXP (SET_SRC (pat), 0);
7756 rtx op1 = XEXP (SET_SRC (pat), 1);
7757 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7759 if (GET_CODE (op1) == CONST_INT)
7760 return INTVAL (op1) >= 0;
7761 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7764 return GET_MODE (SET_SRC (pat)) == SImode;
7765 /* Positive integers leave the high bits zero. */
7767 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7769 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7772 return - (GET_MODE (SET_SRC (pat)) == SImode);
7774 return sparc_check_64 (SET_SRC (pat), insn);
7780 /* We _ought_ to have only one kind per function, but... */
7781 static GTY(()) rtx sparc_addr_diff_list;
7782 static GTY(()) rtx sparc_addr_list;
7785 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7787 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7789 sparc_addr_diff_list
7790 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7792 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7796 sparc_output_addr_vec (rtx vec)
7798 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7799 int idx, vlen = XVECLEN (body, 0);
7801 #ifdef ASM_OUTPUT_ADDR_VEC_START
7802 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7805 #ifdef ASM_OUTPUT_CASE_LABEL
7806 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7809 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7812 for (idx = 0; idx < vlen; idx++)
7814 ASM_OUTPUT_ADDR_VEC_ELT
7815 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7818 #ifdef ASM_OUTPUT_ADDR_VEC_END
7819 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7824 sparc_output_addr_diff_vec (rtx vec)
7826 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7827 rtx base = XEXP (XEXP (body, 0), 0);
7828 int idx, vlen = XVECLEN (body, 1);
7830 #ifdef ASM_OUTPUT_ADDR_VEC_START
7831 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7834 #ifdef ASM_OUTPUT_CASE_LABEL
7835 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7838 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7841 for (idx = 0; idx < vlen; idx++)
7843 ASM_OUTPUT_ADDR_DIFF_ELT
7846 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7847 CODE_LABEL_NUMBER (base));
7850 #ifdef ASM_OUTPUT_ADDR_VEC_END
7851 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7856 sparc_output_deferred_case_vectors (void)
7861 if (sparc_addr_list == NULL_RTX
7862 && sparc_addr_diff_list == NULL_RTX)
7865 /* Align to cache line in the function's code section. */
7866 switch_to_section (current_function_section ());
7868 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7870 ASM_OUTPUT_ALIGN (asm_out_file, align);
7872 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7873 sparc_output_addr_vec (XEXP (t, 0));
7874 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7875 sparc_output_addr_diff_vec (XEXP (t, 0));
7877 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7880 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7881 unknown. Return 1 if the high bits are zero, -1 if the register is
7884 sparc_check_64 (rtx x, rtx insn)
7886 /* If a register is set only once it is safe to ignore insns this
7887 code does not know how to handle. The loop will either recognize
7888 the single set and return the correct value or fail to recognize
7893 gcc_assert (GET_CODE (x) == REG);
7895 if (GET_MODE (x) == DImode)
7896 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7898 if (flag_expensive_optimizations
7899 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
7905 insn = get_last_insn_anywhere ();
7910 while ((insn = PREV_INSN (insn)))
7912 switch (GET_CODE (insn))
7925 rtx pat = PATTERN (insn);
7926 if (GET_CODE (pat) != SET)
7928 if (rtx_equal_p (x, SET_DEST (pat)))
7929 return set_extends (insn);
7930 if (y && rtx_equal_p (y, SET_DEST (pat)))
7931 return set_extends (insn);
7932 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7940 /* Returns assembly code to perform a DImode shift using
7941 a 64-bit global or out register on SPARC-V8+. */
7943 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7945 static char asm_code[60];
7947 /* The scratch register is only required when the destination
7948 register is not a 64-bit global or out register. */
7949 if (which_alternative != 2)
7950 operands[3] = operands[0];
7952 /* We can only shift by constants <= 63. */
7953 if (GET_CODE (operands[2]) == CONST_INT)
7954 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7956 if (GET_CODE (operands[1]) == CONST_INT)
7958 output_asm_insn ("mov\t%1, %3", operands);
7962 output_asm_insn ("sllx\t%H1, 32, %3", operands);
7963 if (sparc_check_64 (operands[1], insn) <= 0)
7964 output_asm_insn ("srl\t%L1, 0, %L1", operands);
7965 output_asm_insn ("or\t%L1, %3, %3", operands);
7968 strcpy(asm_code, opcode);
7970 if (which_alternative != 2)
7971 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7973 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7976 /* Output rtl to increment the profiler label LABELNO
7977 for profiling a function entry. */
7980 sparc_profile_hook (int labelno)
7985 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7986 if (NO_PROFILE_COUNTERS)
7988 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
7992 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7993 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7994 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7998 #ifdef OBJECT_FORMAT_ELF
8000 sparc_elf_asm_named_section (const char *name, unsigned int flags,
8003 if (flags & SECTION_MERGE)
8005 /* entsize cannot be expressed in this section attributes
8007 default_elf_asm_named_section (name, flags, decl);
8011 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
8013 if (!(flags & SECTION_DEBUG))
8014 fputs (",#alloc", asm_out_file);
8015 if (flags & SECTION_WRITE)
8016 fputs (",#write", asm_out_file);
8017 if (flags & SECTION_TLS)
8018 fputs (",#tls", asm_out_file);
8019 if (flags & SECTION_CODE)
8020 fputs (",#execinstr", asm_out_file);
8022 /* ??? Handle SECTION_BSS. */
8024 fputc ('\n', asm_out_file);
8026 #endif /* OBJECT_FORMAT_ELF */
8028 /* We do not allow indirect calls to be optimized into sibling calls.
8030 We cannot use sibling calls when delayed branches are disabled
8031 because they will likely require the call delay slot to be filled.
8033 Also, on SPARC 32-bit we cannot emit a sibling call when the
8034 current function returns a structure. This is because the "unimp
8035 after call" convention would cause the callee to return to the
8036 wrong place. The generic code already disallows cases where the
8037 function being called returns a structure.
8039 It may seem strange how this last case could occur. Usually there
8040 is code after the call which jumps to epilogue code which dumps the
8041 return value into the struct return area. That ought to invalidate
8042 the sibling call right? Well, in the C++ case we can end up passing
8043 the pointer to the struct return area to a constructor (which returns
8044 void) and then nothing else happens. Such a sibling call would look
8045 valid without the added check here.
8047 VxWorks PIC PLT entries require the global pointer to be initialized
8048 on entry. We therefore can't emit sibling calls to them. */
8050 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8053 && flag_delayed_branch
8054 && (TARGET_ARCH64 || ! cfun->returns_struct)
8055 && !(TARGET_VXWORKS_RTP
8057 && !targetm.binds_local_p (decl)));
8060 /* libfunc renaming. */
8061 #include "config/gofast.h"
8064 sparc_init_libfuncs (void)
8068 /* Use the subroutines that Sun's library provides for integer
8069 multiply and divide. The `*' prevents an underscore from
8070 being prepended by the compiler. .umul is a little faster
8072 set_optab_libfunc (smul_optab, SImode, "*.umul");
8073 set_optab_libfunc (sdiv_optab, SImode, "*.div");
8074 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
8075 set_optab_libfunc (smod_optab, SImode, "*.rem");
8076 set_optab_libfunc (umod_optab, SImode, "*.urem");
8078 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
8079 set_optab_libfunc (add_optab, TFmode, "_Q_add");
8080 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
8081 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
8082 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
8083 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
8085 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
8086 is because with soft-float, the SFmode and DFmode sqrt
8087 instructions will be absent, and the compiler will notice and
8088 try to use the TFmode sqrt instruction for calls to the
8089 builtin function sqrt, but this fails. */
8091 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
8093 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
8094 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
8095 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
8096 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
8097 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
8098 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
8100 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
8101 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
8102 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
8103 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
8105 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
8106 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
8107 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
8108 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
8110 if (DITF_CONVERSION_LIBFUNCS)
8112 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
8113 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
8114 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
8115 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
8118 if (SUN_CONVERSION_LIBFUNCS)
8120 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
8121 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
8122 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
8123 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
8128 /* In the SPARC 64bit ABI, SImode multiply and divide functions
8129 do not exist in the library. Make sure the compiler does not
8130 emit calls to them by accident. (It should always use the
8131 hardware instructions.) */
8132 set_optab_libfunc (smul_optab, SImode, 0);
8133 set_optab_libfunc (sdiv_optab, SImode, 0);
8134 set_optab_libfunc (udiv_optab, SImode, 0);
8135 set_optab_libfunc (smod_optab, SImode, 0);
8136 set_optab_libfunc (umod_optab, SImode, 0);
8138 if (SUN_INTEGER_MULTIPLY_64)
8140 set_optab_libfunc (smul_optab, DImode, "__mul64");
8141 set_optab_libfunc (sdiv_optab, DImode, "__div64");
8142 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
8143 set_optab_libfunc (smod_optab, DImode, "__rem64");
8144 set_optab_libfunc (umod_optab, DImode, "__urem64");
8147 if (SUN_CONVERSION_LIBFUNCS)
8149 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
8150 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
8151 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
8152 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
8156 gofast_maybe_init_libfuncs ();
8159 #define def_builtin(NAME, CODE, TYPE) \
8160 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
8163 /* Implement the TARGET_INIT_BUILTINS target hook.
8164 Create builtin functions for special SPARC instructions. */
8167 sparc_init_builtins (void)
8170 sparc_vis_init_builtins ();
8173 /* Create builtin functions for VIS 1.0 instructions. */
8176 sparc_vis_init_builtins (void)
8178 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
8179 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
8180 tree v4hi = build_vector_type (intHI_type_node, 4);
8181 tree v2hi = build_vector_type (intHI_type_node, 2);
8182 tree v2si = build_vector_type (intSI_type_node, 2);
8184 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
8185 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
8186 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
8187 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
8188 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
8189 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
8190 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
8191 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
8192 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
8193 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
8194 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
8195 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
8196 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
8198 intDI_type_node, 0);
8199 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
8201 intDI_type_node, 0);
8202 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
8204 intSI_type_node, 0);
8205 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
8207 intDI_type_node, 0);
8209 /* Packing and expanding vectors. */
8210 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
8211 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
8212 v8qi_ftype_v2si_v8qi);
8213 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
8215 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
8216 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
8217 v8qi_ftype_v4qi_v4qi);
8219 /* Multiplications. */
8220 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
8221 v4hi_ftype_v4qi_v4hi);
8222 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
8223 v4hi_ftype_v4qi_v2hi);
8224 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
8225 v4hi_ftype_v4qi_v2hi);
8226 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
8227 v4hi_ftype_v8qi_v4hi);
8228 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
8229 v4hi_ftype_v8qi_v4hi);
8230 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
8231 v2si_ftype_v4qi_v2hi);
8232 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
8233 v2si_ftype_v4qi_v2hi);
8235 /* Data aligning. */
8236 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
8237 v4hi_ftype_v4hi_v4hi);
8238 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
8239 v8qi_ftype_v8qi_v8qi);
8240 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
8241 v2si_ftype_v2si_v2si);
8242 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
8245 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
8248 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
8251 /* Pixel distance. */
8252 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
8253 di_ftype_v8qi_v8qi_di);
8256 /* Handle TARGET_EXPAND_BUILTIN target hook.
8257 Expand builtin functions for sparc intrinsics. */
8260 sparc_expand_builtin (tree exp, rtx target,
8261 rtx subtarget ATTRIBUTE_UNUSED,
8262 enum machine_mode tmode ATTRIBUTE_UNUSED,
8263 int ignore ATTRIBUTE_UNUSED)
8266 call_expr_arg_iterator iter;
8267 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8268 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
8270 enum machine_mode mode[4];
8273 mode[0] = insn_data[icode].operand[0].mode;
8275 || GET_MODE (target) != mode[0]
8276 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
8277 op[0] = gen_reg_rtx (mode[0]);
8281 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8284 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8285 op[arg_count] = expand_normal (arg);
8287 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8289 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8295 pat = GEN_FCN (icode) (op[0], op[1]);
8298 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8301 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8316 sparc_vis_mul8x16 (int e8, int e16)
8318 return (e8 * e16 + 128) / 256;
8321 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8322 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8323 constants. A tree list with the results of the multiplications is returned,
8324 and each element in the list is of INNER_TYPE. */
8327 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8329 tree n_elts = NULL_TREE;
8334 case CODE_FOR_fmul8x16_vis:
8335 for (; elts0 && elts1;
8336 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8339 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8340 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8341 n_elts = tree_cons (NULL_TREE,
8342 build_int_cst (inner_type, val),
8347 case CODE_FOR_fmul8x16au_vis:
8348 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8350 for (; elts0; elts0 = TREE_CHAIN (elts0))
8353 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8355 n_elts = tree_cons (NULL_TREE,
8356 build_int_cst (inner_type, val),
8361 case CODE_FOR_fmul8x16al_vis:
8362 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8364 for (; elts0; elts0 = TREE_CHAIN (elts0))
8367 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8369 n_elts = tree_cons (NULL_TREE,
8370 build_int_cst (inner_type, val),
8379 return nreverse (n_elts);
8382 /* Handle TARGET_FOLD_BUILTIN target hook.
8383 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8384 result of the function call is ignored. NULL_TREE is returned if the
8385 function could not be folded. */
8388 sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
8390 tree arg0, arg1, arg2;
8391 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8392 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
8395 && icode != CODE_FOR_alignaddrsi_vis
8396 && icode != CODE_FOR_alignaddrdi_vis)
8397 return fold_convert (rtype, integer_zero_node);
8401 case CODE_FOR_fexpand_vis:
8402 arg0 = TREE_VALUE (arglist);
8405 if (TREE_CODE (arg0) == VECTOR_CST)
8407 tree inner_type = TREE_TYPE (rtype);
8408 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8409 tree n_elts = NULL_TREE;
8411 for (; elts; elts = TREE_CHAIN (elts))
8413 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8414 n_elts = tree_cons (NULL_TREE,
8415 build_int_cst (inner_type, val),
8418 return build_vector (rtype, nreverse (n_elts));
8422 case CODE_FOR_fmul8x16_vis:
8423 case CODE_FOR_fmul8x16au_vis:
8424 case CODE_FOR_fmul8x16al_vis:
8425 arg0 = TREE_VALUE (arglist);
8426 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8430 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8432 tree inner_type = TREE_TYPE (rtype);
8433 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8434 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8435 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
8438 return build_vector (rtype, n_elts);
8442 case CODE_FOR_fpmerge_vis:
8443 arg0 = TREE_VALUE (arglist);
8444 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8448 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8450 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8451 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8452 tree n_elts = NULL_TREE;
8454 for (; elts0 && elts1;
8455 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8457 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8458 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8461 return build_vector (rtype, nreverse (n_elts));
8465 case CODE_FOR_pdist_vis:
8466 arg0 = TREE_VALUE (arglist);
8467 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8468 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8473 if (TREE_CODE (arg0) == VECTOR_CST
8474 && TREE_CODE (arg1) == VECTOR_CST
8475 && TREE_CODE (arg2) == INTEGER_CST)
8478 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8479 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8480 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8481 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8483 for (; elts0 && elts1;
8484 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8486 unsigned HOST_WIDE_INT
8487 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8488 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8489 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8490 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8492 unsigned HOST_WIDE_INT l;
8495 overflow |= neg_double (low1, high1, &l, &h);
8496 overflow |= add_double (low0, high0, l, h, &l, &h);
8498 overflow |= neg_double (l, h, &l, &h);
8500 overflow |= add_double (low, high, l, h, &low, &high);
8503 gcc_assert (overflow == 0);
8505 return build_int_cst_wide (rtype, low, high);
8515 /* ??? This duplicates information provided to the compiler by the
8516 ??? scheduler description. Some day, teach genautomata to output
8517 ??? the latencies and then CSE will just use that. */
8520 sparc_rtx_costs (rtx x, int code, int outer_code, int *total,
8521 bool speed ATTRIBUTE_UNUSED)
8523 enum machine_mode mode = GET_MODE (x);
8524 bool float_mode_p = FLOAT_MODE_P (mode);
8529 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8547 if (GET_MODE (x) == VOIDmode
8548 && ((CONST_DOUBLE_HIGH (x) == 0
8549 && CONST_DOUBLE_LOW (x) < 0x1000)
8550 || (CONST_DOUBLE_HIGH (x) == -1
8551 && CONST_DOUBLE_LOW (x) < 0
8552 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8559 /* If outer-code was a sign or zero extension, a cost
8560 of COSTS_N_INSNS (1) was already added in. This is
8561 why we are subtracting it back out. */
8562 if (outer_code == ZERO_EXTEND)
8564 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8566 else if (outer_code == SIGN_EXTEND)
8568 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8570 else if (float_mode_p)
8572 *total = sparc_costs->float_load;
8576 *total = sparc_costs->int_load;
8584 *total = sparc_costs->float_plusminus;
8586 *total = COSTS_N_INSNS (1);
8591 *total = sparc_costs->float_mul;
8592 else if (! TARGET_HARD_MUL)
8593 *total = COSTS_N_INSNS (25);
8599 if (sparc_costs->int_mul_bit_factor)
8603 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8605 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8606 for (nbits = 0; value != 0; value &= value - 1)
8609 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8610 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8612 rtx x1 = XEXP (x, 1);
8613 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8614 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8616 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8618 for (; value2 != 0; value2 &= value2 - 1)
8626 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8627 bit_cost = COSTS_N_INSNS (bit_cost);
8631 *total = sparc_costs->int_mulX + bit_cost;
8633 *total = sparc_costs->int_mul + bit_cost;
8640 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8650 *total = sparc_costs->float_div_df;
8652 *total = sparc_costs->float_div_sf;
8657 *total = sparc_costs->int_divX;
8659 *total = sparc_costs->int_div;
8666 *total = COSTS_N_INSNS (1);
8673 case UNSIGNED_FLOAT:
8677 case FLOAT_TRUNCATE:
8678 *total = sparc_costs->float_move;
8683 *total = sparc_costs->float_sqrt_df;
8685 *total = sparc_costs->float_sqrt_sf;
8690 *total = sparc_costs->float_cmp;
8692 *total = COSTS_N_INSNS (1);
8697 *total = sparc_costs->float_cmove;
8699 *total = sparc_costs->int_cmove;
8703 /* Handle the NAND vector patterns. */
8704 if (sparc_vector_mode_supported_p (GET_MODE (x))
8705 && GET_CODE (XEXP (x, 0)) == NOT
8706 && GET_CODE (XEXP (x, 1)) == NOT)
8708 *total = COSTS_N_INSNS (1);
8719 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
8720 This is achieved by means of a manual dynamic stack space allocation in
8721 the current frame. We make the assumption that SEQ doesn't contain any
8722 function calls, with the possible exception of calls to the PIC helper. */
8725 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8727 /* We must preserve the lowest 16 words for the register save area. */
8728 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
8729 /* We really need only 2 words of fresh stack space. */
8730 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
8733 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
8734 SPARC_STACK_BIAS + offset));
8736 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
8737 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8739 emit_insn (gen_rtx_SET (VOIDmode,
8740 adjust_address (slot, word_mode, UNITS_PER_WORD),
8744 emit_insn (gen_rtx_SET (VOIDmode,
8746 adjust_address (slot, word_mode, UNITS_PER_WORD)));
8747 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8748 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
8751 /* Output the assembler code for a thunk function. THUNK_DECL is the
8752 declaration for the thunk function itself, FUNCTION is the decl for
8753 the target function. DELTA is an immediate constant offset to be
8754 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8755 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8758 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8759 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8762 rtx this_rtx, insn, funexp;
8763 unsigned int int_arg_first;
8765 reload_completed = 1;
8766 epilogue_completed = 1;
8768 emit_note (NOTE_INSN_PROLOGUE_END);
8770 if (flag_delayed_branch)
8772 /* We will emit a regular sibcall below, so we need to instruct
8773 output_sibcall that we are in a leaf function. */
8774 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8776 /* This will cause final.c to invoke leaf_renumber_regs so we
8777 must behave as if we were in a not-yet-leafified function. */
8778 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8782 /* We will emit the sibcall manually below, so we will need to
8783 manually spill non-leaf registers. */
8784 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8786 /* We really are in a leaf function. */
8787 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8790 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8791 returns a structure, the structure return pointer is there instead. */
8792 if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8793 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
8795 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
8797 /* Add DELTA. When possible use a plain add, otherwise load it into
8798 a register first. */
8801 rtx delta_rtx = GEN_INT (delta);
8803 if (! SPARC_SIMM13_P (delta))
8805 rtx scratch = gen_rtx_REG (Pmode, 1);
8806 emit_move_insn (scratch, delta_rtx);
8807 delta_rtx = scratch;
8810 /* THIS_RTX += DELTA. */
8811 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
8814 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
8817 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8818 rtx scratch = gen_rtx_REG (Pmode, 1);
8820 gcc_assert (vcall_offset < 0);
8822 /* SCRATCH = *THIS_RTX. */
8823 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
8825 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8826 may not have any available scratch register at this point. */
8827 if (SPARC_SIMM13_P (vcall_offset))
8829 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8830 else if (! fixed_regs[5]
8831 /* The below sequence is made up of at least 2 insns,
8832 while the default method may need only one. */
8833 && vcall_offset < -8192)
8835 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8836 emit_move_insn (scratch2, vcall_offset_rtx);
8837 vcall_offset_rtx = scratch2;
8841 rtx increment = GEN_INT (-4096);
8843 /* VCALL_OFFSET is a negative number whose typical range can be
8844 estimated as -32768..0 in 32-bit mode. In almost all cases
8845 it is therefore cheaper to emit multiple add insns than
8846 spilling and loading the constant into a register (at least
8848 while (! SPARC_SIMM13_P (vcall_offset))
8850 emit_insn (gen_add2_insn (scratch, increment));
8851 vcall_offset += 4096;
8853 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8856 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
8857 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8858 gen_rtx_PLUS (Pmode,
8860 vcall_offset_rtx)));
8862 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
8863 emit_insn (gen_add2_insn (this_rtx, scratch));
8866 /* Generate a tail call to the target function. */
8867 if (! TREE_USED (function))
8869 assemble_external (function);
8870 TREE_USED (function) = 1;
8872 funexp = XEXP (DECL_RTL (function), 0);
8874 if (flag_delayed_branch)
8876 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8877 insn = emit_call_insn (gen_sibcall (funexp));
8878 SIBLING_CALL_P (insn) = 1;
8882 /* The hoops we have to jump through in order to generate a sibcall
8883 without using delay slots... */
8884 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
8888 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8889 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
8891 /* Delay emitting the PIC helper function because it needs to
8892 change the section and we are emitting assembly code. */
8893 load_pic_register (true); /* clobbers %o7 */
8894 scratch = legitimize_pic_address (funexp, Pmode, scratch);
8897 emit_and_preserve (seq, spill_reg, spill_reg2);
8899 else if (TARGET_ARCH32)
8901 emit_insn (gen_rtx_SET (VOIDmode,
8903 gen_rtx_HIGH (SImode, funexp)));
8904 emit_insn (gen_rtx_SET (VOIDmode,
8906 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8908 else /* TARGET_ARCH64 */
8910 switch (sparc_cmodel)
8914 /* The destination can serve as a temporary. */
8915 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8920 /* The destination cannot serve as a temporary. */
8921 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8923 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8926 emit_and_preserve (seq, spill_reg, 0);
8934 emit_jump_insn (gen_indirect_jump (scratch));
8939 /* Run just enough of rest_of_compilation to get the insns emitted.
8940 There's not really enough bulk here to make other passes such as
8941 instruction scheduling worth while. Note that use_thunk calls
8942 assemble_start_function and assemble_end_function. */
8943 insn = get_insns ();
8944 insn_locators_alloc ();
8945 shorten_branches (insn);
8946 final_start_function (insn, file, 1);
8947 final (insn, file, 1);
8948 final_end_function ();
8950 reload_completed = 0;
8951 epilogue_completed = 0;
8954 /* Return true if sparc_output_mi_thunk would be able to output the
8955 assembler code for the thunk function specified by the arguments
8956 it is passed, and false otherwise. */
8958 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
8959 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8960 HOST_WIDE_INT vcall_offset,
8961 const_tree function ATTRIBUTE_UNUSED)
8963 /* Bound the loop used in the default method above. */
8964 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8967 /* How to allocate a 'struct machine_function'. */
8969 static struct machine_function *
8970 sparc_init_machine_status (void)
8972 return GGC_CNEW (struct machine_function);
8975 /* Locate some local-dynamic symbol still in use by this function
8976 so that we can print its name in local-dynamic base patterns. */
8979 get_some_local_dynamic_name (void)
8983 if (cfun->machine->some_ld_name)
8984 return cfun->machine->some_ld_name;
8986 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8988 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8989 return cfun->machine->some_ld_name;
8995 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
9000 && GET_CODE (x) == SYMBOL_REF
9001 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
9003 cfun->machine->some_ld_name = XSTR (x, 0);
9010 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
9011 This is called from dwarf2out.c to emit call frame instructions
9012 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
9014 sparc_dwarf_handle_frame_unspec (const char *label,
9015 rtx pattern ATTRIBUTE_UNUSED,
9016 int index ATTRIBUTE_UNUSED)
9018 gcc_assert (index == UNSPECV_SAVEW);
9019 dwarf2out_window_save (label);
9022 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
9023 We need to emit DTP-relative relocations. */
9026 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
9031 fputs ("\t.word\t%r_tls_dtpoff32(", file);
9034 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
9039 output_addr_const (file, x);
9043 /* Do whatever processing is required at the end of a file. */
9046 sparc_file_end (void)
9048 /* If we haven't emitted the special PIC helper function, do so now. */
9049 if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
9052 if (NEED_INDICATE_EXEC_STACK)
9053 file_end_indicate_exec_stack ();
9056 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9057 /* Implement TARGET_MANGLE_TYPE. */
9060 sparc_mangle_type (const_tree type)
9063 && TYPE_MAIN_VARIANT (type) == long_double_type_node
9064 && TARGET_LONG_DOUBLE_128)
9067 /* For all other types, use normal C++ mangling. */
9072 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
9073 compare and swap on the word containing the byte or half-word. */
9076 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
9078 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
9079 rtx addr = gen_reg_rtx (Pmode);
9080 rtx off = gen_reg_rtx (SImode);
9081 rtx oldv = gen_reg_rtx (SImode);
9082 rtx newv = gen_reg_rtx (SImode);
9083 rtx oldvalue = gen_reg_rtx (SImode);
9084 rtx newvalue = gen_reg_rtx (SImode);
9085 rtx res = gen_reg_rtx (SImode);
9086 rtx resv = gen_reg_rtx (SImode);
9087 rtx memsi, val, mask, end_label, loop_label, cc;
9089 emit_insn (gen_rtx_SET (VOIDmode, addr,
9090 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
9092 if (Pmode != SImode)
9093 addr1 = gen_lowpart (SImode, addr1);
9094 emit_insn (gen_rtx_SET (VOIDmode, off,
9095 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
9097 memsi = gen_rtx_MEM (SImode, addr);
9098 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
9099 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
9101 val = force_reg (SImode, memsi);
9103 emit_insn (gen_rtx_SET (VOIDmode, off,
9104 gen_rtx_XOR (SImode, off,
9105 GEN_INT (GET_MODE (mem) == QImode
9108 emit_insn (gen_rtx_SET (VOIDmode, off,
9109 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
9111 if (GET_MODE (mem) == QImode)
9112 mask = force_reg (SImode, GEN_INT (0xff));
9114 mask = force_reg (SImode, GEN_INT (0xffff));
9116 emit_insn (gen_rtx_SET (VOIDmode, mask,
9117 gen_rtx_ASHIFT (SImode, mask, off)));
9119 emit_insn (gen_rtx_SET (VOIDmode, val,
9120 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9123 oldval = gen_lowpart (SImode, oldval);
9124 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9125 gen_rtx_ASHIFT (SImode, oldval, off)));
9127 newval = gen_lowpart_common (SImode, newval);
9128 emit_insn (gen_rtx_SET (VOIDmode, newv,
9129 gen_rtx_ASHIFT (SImode, newval, off)));
9131 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9132 gen_rtx_AND (SImode, oldv, mask)));
9134 emit_insn (gen_rtx_SET (VOIDmode, newv,
9135 gen_rtx_AND (SImode, newv, mask)));
9137 end_label = gen_label_rtx ();
9138 loop_label = gen_label_rtx ();
9139 emit_label (loop_label);
9141 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
9142 gen_rtx_IOR (SImode, oldv, val)));
9144 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
9145 gen_rtx_IOR (SImode, newv, val)));
9147 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
9149 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
9151 emit_insn (gen_rtx_SET (VOIDmode, resv,
9152 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9155 cc = gen_compare_reg_1 (NE, resv, val);
9156 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
9158 /* Use cbranchcc4 to separate the compare and branch! */
9159 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
9160 cc, const0_rtx, loop_label));
9162 emit_label (end_label);
9164 emit_insn (gen_rtx_SET (VOIDmode, res,
9165 gen_rtx_AND (SImode, res, mask)));
9167 emit_insn (gen_rtx_SET (VOIDmode, res,
9168 gen_rtx_LSHIFTRT (SImode, res, off)));
9170 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
9173 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
9176 sparc_frame_pointer_required (void)
9178 return !(leaf_function_p () && only_leaf_regs_used ());
9181 /* The way this is structured, we can't eliminate SFP in favor of SP
9182 if the frame pointer is required: we want to use the SFP->HFP elimination
9183 in that case. But the test in update_eliminables doesn't know we are
9184 assuming below that we only do the former elimination. */
9187 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
9189 return (to == HARD_FRAME_POINTER_REGNUM
9190 || !targetm.frame_pointer_required ());
9193 #include "gt-sparc.h"