1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
38 #include "insn-attr.h"
49 #include "target-def.h"
50 #include "cfglayout.h"
52 #include "langhooks.h"
58 struct processor_costs cypress_costs = {
59 COSTS_N_INSNS (2), /* int load */
60 COSTS_N_INSNS (2), /* int signed load */
61 COSTS_N_INSNS (2), /* int zeroed load */
62 COSTS_N_INSNS (2), /* float load */
63 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
64 COSTS_N_INSNS (5), /* fadd, fsub */
65 COSTS_N_INSNS (1), /* fcmp */
66 COSTS_N_INSNS (1), /* fmov, fmovr */
67 COSTS_N_INSNS (7), /* fmul */
68 COSTS_N_INSNS (37), /* fdivs */
69 COSTS_N_INSNS (37), /* fdivd */
70 COSTS_N_INSNS (63), /* fsqrts */
71 COSTS_N_INSNS (63), /* fsqrtd */
72 COSTS_N_INSNS (1), /* imul */
73 COSTS_N_INSNS (1), /* imulX */
74 0, /* imul bit factor */
75 COSTS_N_INSNS (1), /* idiv */
76 COSTS_N_INSNS (1), /* idivX */
77 COSTS_N_INSNS (1), /* movcc/movr */
78 0, /* shift penalty */
82 struct processor_costs supersparc_costs = {
83 COSTS_N_INSNS (1), /* int load */
84 COSTS_N_INSNS (1), /* int signed load */
85 COSTS_N_INSNS (1), /* int zeroed load */
86 COSTS_N_INSNS (0), /* float load */
87 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
88 COSTS_N_INSNS (3), /* fadd, fsub */
89 COSTS_N_INSNS (3), /* fcmp */
90 COSTS_N_INSNS (1), /* fmov, fmovr */
91 COSTS_N_INSNS (3), /* fmul */
92 COSTS_N_INSNS (6), /* fdivs */
93 COSTS_N_INSNS (9), /* fdivd */
94 COSTS_N_INSNS (12), /* fsqrts */
95 COSTS_N_INSNS (12), /* fsqrtd */
96 COSTS_N_INSNS (4), /* imul */
97 COSTS_N_INSNS (4), /* imulX */
98 0, /* imul bit factor */
99 COSTS_N_INSNS (4), /* idiv */
100 COSTS_N_INSNS (4), /* idivX */
101 COSTS_N_INSNS (1), /* movcc/movr */
102 1, /* shift penalty */
106 struct processor_costs hypersparc_costs = {
107 COSTS_N_INSNS (1), /* int load */
108 COSTS_N_INSNS (1), /* int signed load */
109 COSTS_N_INSNS (1), /* int zeroed load */
110 COSTS_N_INSNS (1), /* float load */
111 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
112 COSTS_N_INSNS (1), /* fadd, fsub */
113 COSTS_N_INSNS (1), /* fcmp */
114 COSTS_N_INSNS (1), /* fmov, fmovr */
115 COSTS_N_INSNS (1), /* fmul */
116 COSTS_N_INSNS (8), /* fdivs */
117 COSTS_N_INSNS (12), /* fdivd */
118 COSTS_N_INSNS (17), /* fsqrts */
119 COSTS_N_INSNS (17), /* fsqrtd */
120 COSTS_N_INSNS (17), /* imul */
121 COSTS_N_INSNS (17), /* imulX */
122 0, /* imul bit factor */
123 COSTS_N_INSNS (17), /* idiv */
124 COSTS_N_INSNS (17), /* idivX */
125 COSTS_N_INSNS (1), /* movcc/movr */
126 0, /* shift penalty */
130 struct processor_costs sparclet_costs = {
131 COSTS_N_INSNS (3), /* int load */
132 COSTS_N_INSNS (3), /* int signed load */
133 COSTS_N_INSNS (1), /* int zeroed load */
134 COSTS_N_INSNS (1), /* float load */
135 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
136 COSTS_N_INSNS (1), /* fadd, fsub */
137 COSTS_N_INSNS (1), /* fcmp */
138 COSTS_N_INSNS (1), /* fmov, fmovr */
139 COSTS_N_INSNS (1), /* fmul */
140 COSTS_N_INSNS (1), /* fdivs */
141 COSTS_N_INSNS (1), /* fdivd */
142 COSTS_N_INSNS (1), /* fsqrts */
143 COSTS_N_INSNS (1), /* fsqrtd */
144 COSTS_N_INSNS (5), /* imul */
145 COSTS_N_INSNS (5), /* imulX */
146 0, /* imul bit factor */
147 COSTS_N_INSNS (5), /* idiv */
148 COSTS_N_INSNS (5), /* idivX */
149 COSTS_N_INSNS (1), /* movcc/movr */
150 0, /* shift penalty */
154 struct processor_costs ultrasparc_costs = {
155 COSTS_N_INSNS (2), /* int load */
156 COSTS_N_INSNS (3), /* int signed load */
157 COSTS_N_INSNS (2), /* int zeroed load */
158 COSTS_N_INSNS (2), /* float load */
159 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
160 COSTS_N_INSNS (4), /* fadd, fsub */
161 COSTS_N_INSNS (1), /* fcmp */
162 COSTS_N_INSNS (2), /* fmov, fmovr */
163 COSTS_N_INSNS (4), /* fmul */
164 COSTS_N_INSNS (13), /* fdivs */
165 COSTS_N_INSNS (23), /* fdivd */
166 COSTS_N_INSNS (13), /* fsqrts */
167 COSTS_N_INSNS (23), /* fsqrtd */
168 COSTS_N_INSNS (4), /* imul */
169 COSTS_N_INSNS (4), /* imulX */
170 2, /* imul bit factor */
171 COSTS_N_INSNS (37), /* idiv */
172 COSTS_N_INSNS (68), /* idivX */
173 COSTS_N_INSNS (2), /* movcc/movr */
174 2, /* shift penalty */
178 struct processor_costs ultrasparc3_costs = {
179 COSTS_N_INSNS (2), /* int load */
180 COSTS_N_INSNS (3), /* int signed load */
181 COSTS_N_INSNS (3), /* int zeroed load */
182 COSTS_N_INSNS (2), /* float load */
183 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
184 COSTS_N_INSNS (4), /* fadd, fsub */
185 COSTS_N_INSNS (5), /* fcmp */
186 COSTS_N_INSNS (3), /* fmov, fmovr */
187 COSTS_N_INSNS (4), /* fmul */
188 COSTS_N_INSNS (17), /* fdivs */
189 COSTS_N_INSNS (20), /* fdivd */
190 COSTS_N_INSNS (20), /* fsqrts */
191 COSTS_N_INSNS (29), /* fsqrtd */
192 COSTS_N_INSNS (6), /* imul */
193 COSTS_N_INSNS (6), /* imulX */
194 0, /* imul bit factor */
195 COSTS_N_INSNS (40), /* idiv */
196 COSTS_N_INSNS (71), /* idivX */
197 COSTS_N_INSNS (2), /* movcc/movr */
198 0, /* shift penalty */
202 struct processor_costs niagara_costs = {
203 COSTS_N_INSNS (3), /* int load */
204 COSTS_N_INSNS (3), /* int signed load */
205 COSTS_N_INSNS (3), /* int zeroed load */
206 COSTS_N_INSNS (9), /* float load */
207 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
208 COSTS_N_INSNS (8), /* fadd, fsub */
209 COSTS_N_INSNS (26), /* fcmp */
210 COSTS_N_INSNS (8), /* fmov, fmovr */
211 COSTS_N_INSNS (29), /* fmul */
212 COSTS_N_INSNS (54), /* fdivs */
213 COSTS_N_INSNS (83), /* fdivd */
214 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
215 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
216 COSTS_N_INSNS (11), /* imul */
217 COSTS_N_INSNS (11), /* imulX */
218 0, /* imul bit factor */
219 COSTS_N_INSNS (72), /* idiv */
220 COSTS_N_INSNS (72), /* idivX */
221 COSTS_N_INSNS (1), /* movcc/movr */
222 0, /* shift penalty */
226 struct processor_costs niagara2_costs = {
227 COSTS_N_INSNS (3), /* int load */
228 COSTS_N_INSNS (3), /* int signed load */
229 COSTS_N_INSNS (3), /* int zeroed load */
230 COSTS_N_INSNS (3), /* float load */
231 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
232 COSTS_N_INSNS (6), /* fadd, fsub */
233 COSTS_N_INSNS (6), /* fcmp */
234 COSTS_N_INSNS (6), /* fmov, fmovr */
235 COSTS_N_INSNS (6), /* fmul */
236 COSTS_N_INSNS (19), /* fdivs */
237 COSTS_N_INSNS (33), /* fdivd */
238 COSTS_N_INSNS (19), /* fsqrts */
239 COSTS_N_INSNS (33), /* fsqrtd */
240 COSTS_N_INSNS (5), /* imul */
241 COSTS_N_INSNS (5), /* imulX */
242 0, /* imul bit factor */
243 COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
244 COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
245 COSTS_N_INSNS (1), /* movcc/movr */
246 0, /* shift penalty */
249 const struct processor_costs *sparc_costs = &cypress_costs;
251 #ifdef HAVE_AS_RELAX_OPTION
252 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
253 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
254 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
255 somebody does not branch between the sethi and jmp. */
256 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
258 #define LEAF_SIBCALL_SLOT_RESERVED_P \
259 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
262 /* Global variables for machine-dependent things. */
264 /* Size of frame. Need to know this to emit return insns from leaf procedures.
265 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
266 reload pass. This is important as the value is later used for scheduling
267 (to see what can go in a delay slot).
268 APPARENT_FSIZE is the size of the stack less the register save area and less
269 the outgoing argument area. It is used when saving call preserved regs. */
270 static HOST_WIDE_INT apparent_fsize;
271 static HOST_WIDE_INT actual_fsize;
273 /* Number of live general or floating point registers needed to be
274 saved (as 4-byte quantities). */
275 static int num_gfregs;
277 /* The alias set for prologue/epilogue register save/restore. */
278 static GTY(()) alias_set_type sparc_sr_alias_set;
280 /* The alias set for the structure return value. */
281 static GTY(()) alias_set_type struct_value_alias_set;
283 /* Vector to say how input registers are mapped to output registers.
284 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
285 eliminate it. You must use -fomit-frame-pointer to get that. */
286 char leaf_reg_remap[] =
287 { 0, 1, 2, 3, 4, 5, 6, 7,
288 -1, -1, -1, -1, -1, -1, 14, -1,
289 -1, -1, -1, -1, -1, -1, -1, -1,
290 8, 9, 10, 11, 12, 13, -1, 15,
292 32, 33, 34, 35, 36, 37, 38, 39,
293 40, 41, 42, 43, 44, 45, 46, 47,
294 48, 49, 50, 51, 52, 53, 54, 55,
295 56, 57, 58, 59, 60, 61, 62, 63,
296 64, 65, 66, 67, 68, 69, 70, 71,
297 72, 73, 74, 75, 76, 77, 78, 79,
298 80, 81, 82, 83, 84, 85, 86, 87,
299 88, 89, 90, 91, 92, 93, 94, 95,
300 96, 97, 98, 99, 100};
302 /* Vector, indexed by hard register number, which contains 1
303 for a register that is allowable in a candidate for leaf
304 function treatment. */
305 char sparc_leaf_regs[] =
306 { 1, 1, 1, 1, 1, 1, 1, 1,
307 0, 0, 0, 0, 0, 0, 1, 0,
308 0, 0, 0, 0, 0, 0, 0, 0,
309 1, 1, 1, 1, 1, 1, 0, 1,
310 1, 1, 1, 1, 1, 1, 1, 1,
311 1, 1, 1, 1, 1, 1, 1, 1,
312 1, 1, 1, 1, 1, 1, 1, 1,
313 1, 1, 1, 1, 1, 1, 1, 1,
314 1, 1, 1, 1, 1, 1, 1, 1,
315 1, 1, 1, 1, 1, 1, 1, 1,
316 1, 1, 1, 1, 1, 1, 1, 1,
317 1, 1, 1, 1, 1, 1, 1, 1,
320 struct GTY(()) machine_function
322 /* Some local-dynamic TLS symbol name. */
323 const char *some_ld_name;
325 /* True if the current function is leaf and uses only leaf regs,
326 so that the SPARC leaf function optimization can be applied.
327 Private version of current_function_uses_only_leaf_regs, see
328 sparc_expand_prologue for the rationale. */
331 /* True if the data calculated by sparc_expand_prologue are valid. */
332 bool prologue_data_valid_p;
335 #define sparc_leaf_function_p cfun->machine->leaf_function_p
336 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
338 /* Register we pretend to think the frame pointer is allocated to.
339 Normally, this is %fp, but if we are in a leaf procedure, this
340 is %sp+"something". We record "something" separately as it may
341 be too big for reg+constant addressing. */
342 static rtx frame_base_reg;
343 static HOST_WIDE_INT frame_base_offset;
345 /* 1 if the next opcode is to be specially indented. */
346 int sparc_indent_opcode = 0;
348 static bool sparc_handle_option (size_t, const char *, int);
349 static void sparc_init_modes (void);
350 static void scan_record_type (tree, int *, int *, int *);
351 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
352 tree, int, int, int *, int *);
354 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
355 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
357 static void sparc_output_addr_vec (rtx);
358 static void sparc_output_addr_diff_vec (rtx);
359 static void sparc_output_deferred_case_vectors (void);
360 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
361 static rtx sparc_builtin_saveregs (void);
362 static int epilogue_renumber (rtx *, int);
363 static bool sparc_assemble_integer (rtx, unsigned int, int);
364 static int set_extends (rtx);
365 static void emit_pic_helper (void);
366 static void load_pic_register (bool);
367 static int save_or_restore_regs (int, int, rtx, int, int);
368 static void emit_save_or_restore_regs (int);
369 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
370 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
371 #if defined (OBJECT_FORMAT_ELF) && !HAVE_GNU_AS
372 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
375 static int sparc_adjust_cost (rtx, rtx, rtx, int);
376 static int sparc_issue_rate (void);
377 static void sparc_sched_init (FILE *, int, int);
378 static int sparc_use_sched_lookahead (void);
380 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
381 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
382 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
383 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
384 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
386 static bool sparc_function_ok_for_sibcall (tree, tree);
387 static void sparc_init_libfuncs (void);
388 static void sparc_init_builtins (void);
389 static void sparc_vis_init_builtins (void);
390 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
391 static tree sparc_fold_builtin (tree, tree, bool);
392 static int sparc_vis_mul8x16 (int, int);
393 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
394 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
395 HOST_WIDE_INT, tree);
396 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
397 HOST_WIDE_INT, const_tree);
398 static struct machine_function * sparc_init_machine_status (void);
399 static bool sparc_cannot_force_const_mem (rtx);
400 static rtx sparc_tls_get_addr (void);
401 static rtx sparc_tls_got (void);
402 static const char *get_some_local_dynamic_name (void);
403 static int get_some_local_dynamic_name_1 (rtx *, void *);
404 static bool sparc_rtx_costs (rtx, int, int, int *, bool);
405 static bool sparc_promote_prototypes (const_tree);
406 static rtx sparc_struct_value_rtx (tree, int);
407 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
408 int *, const_tree, int);
409 static bool sparc_return_in_memory (const_tree, const_tree);
410 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
411 static void sparc_va_start (tree, rtx);
412 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
413 static bool sparc_vector_mode_supported_p (enum machine_mode);
414 static bool sparc_tls_referenced_p (rtx);
415 static rtx legitimize_tls_address (rtx);
416 static rtx legitimize_pic_address (rtx, rtx);
417 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
418 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
419 enum machine_mode, const_tree, bool);
420 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
421 enum machine_mode, tree, bool);
422 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
423 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
424 static void sparc_file_end (void);
425 static bool sparc_frame_pointer_required (void);
426 static bool sparc_can_eliminate (const int, const int);
427 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
428 static const char *sparc_mangle_type (const_tree);
430 static void sparc_trampoline_init (rtx, tree, rtx);
432 #ifdef SUBTARGET_ATTRIBUTE_TABLE
433 /* Table of valid machine attributes. */
434 static const struct attribute_spec sparc_attribute_table[] =
436 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
437 SUBTARGET_ATTRIBUTE_TABLE,
438 { NULL, 0, 0, false, false, false, NULL }
442 /* Option handling. */
445 enum cmodel sparc_cmodel;
447 char sparc_hard_reg_printed[8];
449 struct sparc_cpu_select sparc_select[] =
451 /* switch name, tune arch */
452 { (char *)0, "default", 1, 1 },
453 { (char *)0, "-mcpu=", 1, 1 },
454 { (char *)0, "-mtune=", 1, 0 },
458 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
459 enum processor_type sparc_cpu;
461 /* Whether
\fan FPU option was specified. */
462 static bool fpu_option_set = false;
464 /* Initialize the GCC target structure. */
466 /* The sparc default is to use .half rather than .short for aligned
467 HI objects. Use .word instead of .long on non-ELF systems. */
468 #undef TARGET_ASM_ALIGNED_HI_OP
469 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
470 #ifndef OBJECT_FORMAT_ELF
471 #undef TARGET_ASM_ALIGNED_SI_OP
472 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
475 #undef TARGET_ASM_UNALIGNED_HI_OP
476 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
477 #undef TARGET_ASM_UNALIGNED_SI_OP
478 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
479 #undef TARGET_ASM_UNALIGNED_DI_OP
480 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
482 /* The target hook has to handle DI-mode values. */
483 #undef TARGET_ASM_INTEGER
484 #define TARGET_ASM_INTEGER sparc_assemble_integer
486 #undef TARGET_ASM_FUNCTION_PROLOGUE
487 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
488 #undef TARGET_ASM_FUNCTION_EPILOGUE
489 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
491 #undef TARGET_SCHED_ADJUST_COST
492 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
493 #undef TARGET_SCHED_ISSUE_RATE
494 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
495 #undef TARGET_SCHED_INIT
496 #define TARGET_SCHED_INIT sparc_sched_init
497 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
498 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
500 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
501 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
503 #undef TARGET_INIT_LIBFUNCS
504 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
505 #undef TARGET_INIT_BUILTINS
506 #define TARGET_INIT_BUILTINS sparc_init_builtins
508 #undef TARGET_LEGITIMIZE_ADDRESS
509 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
511 #undef TARGET_EXPAND_BUILTIN
512 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
513 #undef TARGET_FOLD_BUILTIN
514 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
517 #undef TARGET_HAVE_TLS
518 #define TARGET_HAVE_TLS true
521 #undef TARGET_CANNOT_FORCE_CONST_MEM
522 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
524 #undef TARGET_ASM_OUTPUT_MI_THUNK
525 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
526 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
527 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
529 #undef TARGET_RTX_COSTS
530 #define TARGET_RTX_COSTS sparc_rtx_costs
531 #undef TARGET_ADDRESS_COST
532 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
534 #undef TARGET_PROMOTE_FUNCTION_MODE
535 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
537 #undef TARGET_PROMOTE_PROTOTYPES
538 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
540 #undef TARGET_STRUCT_VALUE_RTX
541 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
542 #undef TARGET_RETURN_IN_MEMORY
543 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
544 #undef TARGET_MUST_PASS_IN_STACK
545 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
546 #undef TARGET_PASS_BY_REFERENCE
547 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
548 #undef TARGET_ARG_PARTIAL_BYTES
549 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
551 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
552 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
553 #undef TARGET_STRICT_ARGUMENT_NAMING
554 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
556 #undef TARGET_EXPAND_BUILTIN_VA_START
557 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
558 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
559 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
561 #undef TARGET_VECTOR_MODE_SUPPORTED_P
562 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
564 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
565 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
567 #ifdef SUBTARGET_INSERT_ATTRIBUTES
568 #undef TARGET_INSERT_ATTRIBUTES
569 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
572 #ifdef SUBTARGET_ATTRIBUTE_TABLE
573 #undef TARGET_ATTRIBUTE_TABLE
574 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
577 #undef TARGET_RELAXED_ORDERING
578 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
580 #undef TARGET_DEFAULT_TARGET_FLAGS
581 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
582 #undef TARGET_HANDLE_OPTION
583 #define TARGET_HANDLE_OPTION sparc_handle_option
585 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
586 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
587 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
590 #undef TARGET_ASM_FILE_END
591 #define TARGET_ASM_FILE_END sparc_file_end
593 #undef TARGET_FRAME_POINTER_REQUIRED
594 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
596 #undef TARGET_CAN_ELIMINATE
597 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
599 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
600 #undef TARGET_MANGLE_TYPE
601 #define TARGET_MANGLE_TYPE sparc_mangle_type
604 #undef TARGET_LEGITIMATE_ADDRESS_P
605 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
607 #undef TARGET_TRAMPOLINE_INIT
608 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
610 struct gcc_target targetm = TARGET_INITIALIZER;
612 /* Implement TARGET_HANDLE_OPTION. */
615 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
620 case OPT_mhard_float:
621 case OPT_msoft_float:
622 fpu_option_set = true;
626 sparc_select[1].string = arg;
630 sparc_select[2].string = arg;
637 /* Validate and override various options, and do some machine dependent
641 sparc_override_options (void)
643 static struct code_model {
644 const char *const name;
645 const enum cmodel value;
646 } const cmodels[] = {
648 { "medlow", CM_MEDLOW },
649 { "medmid", CM_MEDMID },
650 { "medany", CM_MEDANY },
651 { "embmedany", CM_EMBMEDANY },
652 { NULL, (enum cmodel) 0 }
654 const struct code_model *cmodel;
655 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
656 static struct cpu_default {
658 const char *const name;
659 } const cpu_default[] = {
660 /* There must be one entry here for each TARGET_CPU value. */
661 { TARGET_CPU_sparc, "cypress" },
662 { TARGET_CPU_sparclet, "tsc701" },
663 { TARGET_CPU_sparclite, "f930" },
664 { TARGET_CPU_v8, "v8" },
665 { TARGET_CPU_hypersparc, "hypersparc" },
666 { TARGET_CPU_sparclite86x, "sparclite86x" },
667 { TARGET_CPU_supersparc, "supersparc" },
668 { TARGET_CPU_v9, "v9" },
669 { TARGET_CPU_ultrasparc, "ultrasparc" },
670 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
671 { TARGET_CPU_niagara, "niagara" },
672 { TARGET_CPU_niagara2, "niagara2" },
675 const struct cpu_default *def;
676 /* Table of values for -m{cpu,tune}=. */
677 static struct cpu_table {
678 const char *const name;
679 const enum processor_type processor;
682 } const cpu_table[] = {
683 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
684 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
685 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
686 /* TI TMS390Z55 supersparc */
687 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
688 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
689 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
690 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
691 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
692 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
693 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
694 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
696 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
698 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
699 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
700 /* TI ultrasparc I, II, IIi */
701 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
702 /* Although insns using %y are deprecated, it is a clear win on current
704 |MASK_DEPRECATED_V8_INSNS},
705 /* TI ultrasparc III */
706 /* ??? Check if %y issue still holds true in ultra3. */
707 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
709 { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
710 { "niagara2", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9},
711 { 0, (enum processor_type) 0, 0, 0 }
713 const struct cpu_table *cpu;
714 const struct sparc_cpu_select *sel;
717 #ifndef SPARC_BI_ARCH
718 /* Check for unsupported architecture size. */
719 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
720 error ("%s is not supported by this configuration",
721 DEFAULT_ARCH32_P ? "-m64" : "-m32");
724 /* We force all 64bit archs to use 128 bit long double */
725 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
727 error ("-mlong-double-64 not allowed with -m64");
728 target_flags |= MASK_LONG_DOUBLE_128;
731 /* Code model selection. */
732 sparc_cmodel = SPARC_DEFAULT_CMODEL;
736 sparc_cmodel = CM_32;
739 if (sparc_cmodel_string != NULL)
743 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
744 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
746 if (cmodel->name == NULL)
747 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
749 sparc_cmodel = cmodel->value;
752 error ("-mcmodel= is not supported on 32 bit systems");
755 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
757 /* Set the default CPU. */
758 for (def = &cpu_default[0]; def->name; ++def)
759 if (def->cpu == TARGET_CPU_DEFAULT)
761 gcc_assert (def->name);
762 sparc_select[0].string = def->name;
764 for (sel = &sparc_select[0]; sel->name; ++sel)
768 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
769 if (! strcmp (sel->string, cpu->name))
772 sparc_cpu = cpu->processor;
776 target_flags &= ~cpu->disable;
777 target_flags |= cpu->enable;
783 error ("bad value (%s) for %s switch", sel->string, sel->name);
787 /* If -mfpu or -mno-fpu was explicitly used, don't override with
788 the processor default. */
790 target_flags = (target_flags & ~MASK_FPU) | fpu;
792 /* Don't allow -mvis if FPU is disabled. */
794 target_flags &= ~MASK_VIS;
796 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
798 -m64 also implies v9. */
799 if (TARGET_VIS || TARGET_ARCH64)
801 target_flags |= MASK_V9;
802 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
805 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
806 if (TARGET_V9 && TARGET_ARCH32)
807 target_flags |= MASK_DEPRECATED_V8_INSNS;
809 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
810 if (! TARGET_V9 || TARGET_ARCH64)
811 target_flags &= ~MASK_V8PLUS;
813 /* Don't use stack biasing in 32 bit mode. */
815 target_flags &= ~MASK_STACK_BIAS;
817 /* Supply a default value for align_functions. */
818 if (align_functions == 0
819 && (sparc_cpu == PROCESSOR_ULTRASPARC
820 || sparc_cpu == PROCESSOR_ULTRASPARC3
821 || sparc_cpu == PROCESSOR_NIAGARA
822 || sparc_cpu == PROCESSOR_NIAGARA2))
823 align_functions = 32;
825 /* Validate PCC_STRUCT_RETURN. */
826 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
827 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
829 /* Only use .uaxword when compiling for a 64-bit target. */
831 targetm.asm_out.unaligned_op.di = NULL;
833 /* Do various machine dependent initializations. */
836 /* Acquire unique alias sets for our private stuff. */
837 sparc_sr_alias_set = new_alias_set ();
838 struct_value_alias_set = new_alias_set ();
840 /* Set up function hooks. */
841 init_machine_status = sparc_init_machine_status;
846 case PROCESSOR_CYPRESS:
847 sparc_costs = &cypress_costs;
850 case PROCESSOR_SPARCLITE:
851 case PROCESSOR_SUPERSPARC:
852 sparc_costs = &supersparc_costs;
856 case PROCESSOR_HYPERSPARC:
857 case PROCESSOR_SPARCLITE86X:
858 sparc_costs = &hypersparc_costs;
860 case PROCESSOR_SPARCLET:
861 case PROCESSOR_TSC701:
862 sparc_costs = &sparclet_costs;
865 case PROCESSOR_ULTRASPARC:
866 sparc_costs = &ultrasparc_costs;
868 case PROCESSOR_ULTRASPARC3:
869 sparc_costs = &ultrasparc3_costs;
871 case PROCESSOR_NIAGARA:
872 sparc_costs = &niagara_costs;
874 case PROCESSOR_NIAGARA2:
875 sparc_costs = &niagara2_costs;
879 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
880 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
881 target_flags |= MASK_LONG_DOUBLE_128;
884 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
885 set_param_value ("simultaneous-prefetches",
886 ((sparc_cpu == PROCESSOR_ULTRASPARC
887 || sparc_cpu == PROCESSOR_NIAGARA
888 || sparc_cpu == PROCESSOR_NIAGARA2)
890 : (sparc_cpu == PROCESSOR_ULTRASPARC3
892 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
893 set_param_value ("l1-cache-line-size",
894 ((sparc_cpu == PROCESSOR_ULTRASPARC
895 || sparc_cpu == PROCESSOR_ULTRASPARC3
896 || sparc_cpu == PROCESSOR_NIAGARA
897 || sparc_cpu == PROCESSOR_NIAGARA2)
901 /* Miscellaneous utilities. */
903 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
904 or branch on register contents instructions. */
907 v9_regcmp_p (enum rtx_code code)
909 return (code == EQ || code == NE || code == GE || code == LT
910 || code == LE || code == GT);
913 /* Nonzero if OP is a floating point constant which can
914 be loaded into an integer register using a single
915 sethi instruction. */
920 if (GET_CODE (op) == CONST_DOUBLE)
925 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
926 REAL_VALUE_TO_TARGET_SINGLE (r, i);
927 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
933 /* Nonzero if OP is a floating point constant which can
934 be loaded into an integer register using a single
940 if (GET_CODE (op) == CONST_DOUBLE)
945 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
946 REAL_VALUE_TO_TARGET_SINGLE (r, i);
947 return SPARC_SIMM13_P (i);
953 /* Nonzero if OP is a floating point constant which can
954 be loaded into an integer register using a high/losum
955 instruction sequence. */
958 fp_high_losum_p (rtx op)
960 /* The constraints calling this should only be in
961 SFmode move insns, so any constant which cannot
962 be moved using a single insn will do. */
963 if (GET_CODE (op) == CONST_DOUBLE)
968 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
969 REAL_VALUE_TO_TARGET_SINGLE (r, i);
970 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
976 /* Expand a move instruction. Return true if all work is done. */
979 sparc_expand_move (enum machine_mode mode, rtx *operands)
981 /* Handle sets of MEM first. */
982 if (GET_CODE (operands[0]) == MEM)
984 /* 0 is a register (or a pair of registers) on SPARC. */
985 if (register_or_zero_operand (operands[1], mode))
988 if (!reload_in_progress)
990 operands[0] = validize_mem (operands[0]);
991 operands[1] = force_reg (mode, operands[1]);
995 /* Fixup TLS cases. */
997 && CONSTANT_P (operands[1])
998 && sparc_tls_referenced_p (operands [1]))
1000 operands[1] = legitimize_tls_address (operands[1]);
1004 /* Fixup PIC cases. */
1005 if (flag_pic && CONSTANT_P (operands[1]))
1007 if (pic_address_needs_scratch (operands[1]))
1008 operands[1] = legitimize_pic_address (operands[1], NULL_RTX);
1010 /* VxWorks does not impose a fixed gap between segments; the run-time
1011 gap can be different from the object-file gap. We therefore can't
1012 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1013 are absolutely sure that X is in the same segment as the GOT.
1014 Unfortunately, the flexibility of linker scripts means that we
1015 can't be sure of that in general, so assume that _G_O_T_-relative
1016 accesses are never valid on VxWorks. */
1017 if (GET_CODE (operands[1]) == LABEL_REF && !TARGET_VXWORKS_RTP)
1021 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1027 gcc_assert (TARGET_ARCH64);
1028 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1033 if (symbolic_operand (operands[1], mode))
1035 operands[1] = legitimize_pic_address (operands[1],
1037 ? operands[0] : NULL_RTX);
1042 /* If we are trying to toss an integer constant into FP registers,
1043 or loading a FP or vector constant, force it into memory. */
1044 if (CONSTANT_P (operands[1])
1045 && REG_P (operands[0])
1046 && (SPARC_FP_REG_P (REGNO (operands[0]))
1047 || SCALAR_FLOAT_MODE_P (mode)
1048 || VECTOR_MODE_P (mode)))
1050 /* emit_group_store will send such bogosity to us when it is
1051 not storing directly into memory. So fix this up to avoid
1052 crashes in output_constant_pool. */
1053 if (operands [1] == const0_rtx)
1054 operands[1] = CONST0_RTX (mode);
1056 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1057 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1058 && const_zero_operand (operands[1], mode))
1061 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1062 /* We are able to build any SF constant in integer registers
1063 with at most 2 instructions. */
1065 /* And any DF constant in integer registers. */
1067 && (reload_completed || reload_in_progress))))
1070 operands[1] = force_const_mem (mode, operands[1]);
1071 if (!reload_in_progress)
1072 operands[1] = validize_mem (operands[1]);
1076 /* Accept non-constants and valid constants unmodified. */
1077 if (!CONSTANT_P (operands[1])
1078 || GET_CODE (operands[1]) == HIGH
1079 || input_operand (operands[1], mode))
1085 /* All QImode constants require only one insn, so proceed. */
1090 sparc_emit_set_const32 (operands[0], operands[1]);
1094 /* input_operand should have filtered out 32-bit mode. */
1095 sparc_emit_set_const64 (operands[0], operands[1]);
1105 /* Load OP1, a 32-bit constant, into OP0, a register.
1106 We know it can't be done in one insn when we get
1107 here, the move expander guarantees this. */
1110 sparc_emit_set_const32 (rtx op0, rtx op1)
1112 enum machine_mode mode = GET_MODE (op0);
1115 if (reload_in_progress || reload_completed)
1118 temp = gen_reg_rtx (mode);
1120 if (GET_CODE (op1) == CONST_INT)
1122 gcc_assert (!small_int_operand (op1, mode)
1123 && !const_high_operand (op1, mode));
1125 /* Emit them as real moves instead of a HIGH/LO_SUM,
1126 this way CSE can see everything and reuse intermediate
1127 values if it wants. */
1128 emit_insn (gen_rtx_SET (VOIDmode, temp,
1129 GEN_INT (INTVAL (op1)
1130 & ~(HOST_WIDE_INT)0x3ff)));
1132 emit_insn (gen_rtx_SET (VOIDmode,
1134 gen_rtx_IOR (mode, temp,
1135 GEN_INT (INTVAL (op1) & 0x3ff))));
1139 /* A symbol, emit in the traditional way. */
1140 emit_insn (gen_rtx_SET (VOIDmode, temp,
1141 gen_rtx_HIGH (mode, op1)));
1142 emit_insn (gen_rtx_SET (VOIDmode,
1143 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1147 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1148 If TEMP is nonzero, we are forbidden to use any other scratch
1149 registers. Otherwise, we are allowed to generate them as needed.
1151 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1152 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1155 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1157 rtx temp1, temp2, temp3, temp4, temp5;
1160 if (temp && GET_MODE (temp) == TImode)
1163 temp = gen_rtx_REG (DImode, REGNO (temp));
1166 /* SPARC-V9 code-model support. */
1167 switch (sparc_cmodel)
1170 /* The range spanned by all instructions in the object is less
1171 than 2^31 bytes (2GB) and the distance from any instruction
1172 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1173 than 2^31 bytes (2GB).
1175 The executable must be in the low 4TB of the virtual address
1178 sethi %hi(symbol), %temp1
1179 or %temp1, %lo(symbol), %reg */
1181 temp1 = temp; /* op0 is allowed. */
1183 temp1 = gen_reg_rtx (DImode);
1185 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1186 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1190 /* The range spanned by all instructions in the object is less
1191 than 2^31 bytes (2GB) and the distance from any instruction
1192 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1193 than 2^31 bytes (2GB).
1195 The executable must be in the low 16TB of the virtual address
1198 sethi %h44(symbol), %temp1
1199 or %temp1, %m44(symbol), %temp2
1200 sllx %temp2, 12, %temp3
1201 or %temp3, %l44(symbol), %reg */
1206 temp3 = temp; /* op0 is allowed. */
1210 temp1 = gen_reg_rtx (DImode);
1211 temp2 = gen_reg_rtx (DImode);
1212 temp3 = gen_reg_rtx (DImode);
1215 emit_insn (gen_seth44 (temp1, op1));
1216 emit_insn (gen_setm44 (temp2, temp1, op1));
1217 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1218 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1219 emit_insn (gen_setl44 (op0, temp3, op1));
1223 /* The range spanned by all instructions in the object is less
1224 than 2^31 bytes (2GB) and the distance from any instruction
1225 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1226 than 2^31 bytes (2GB).
1228 The executable can be placed anywhere in the virtual address
1231 sethi %hh(symbol), %temp1
1232 sethi %lm(symbol), %temp2
1233 or %temp1, %hm(symbol), %temp3
1234 sllx %temp3, 32, %temp4
1235 or %temp4, %temp2, %temp5
1236 or %temp5, %lo(symbol), %reg */
1239 /* It is possible that one of the registers we got for operands[2]
1240 might coincide with that of operands[0] (which is why we made
1241 it TImode). Pick the other one to use as our scratch. */
1242 if (rtx_equal_p (temp, op0))
1244 gcc_assert (ti_temp);
1245 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1248 temp2 = temp; /* op0 is _not_ allowed, see above. */
1255 temp1 = gen_reg_rtx (DImode);
1256 temp2 = gen_reg_rtx (DImode);
1257 temp3 = gen_reg_rtx (DImode);
1258 temp4 = gen_reg_rtx (DImode);
1259 temp5 = gen_reg_rtx (DImode);
1262 emit_insn (gen_sethh (temp1, op1));
1263 emit_insn (gen_setlm (temp2, op1));
1264 emit_insn (gen_sethm (temp3, temp1, op1));
1265 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1266 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1267 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1268 gen_rtx_PLUS (DImode, temp4, temp2)));
1269 emit_insn (gen_setlo (op0, temp5, op1));
1273 /* Old old old backwards compatibility kruft here.
1274 Essentially it is MEDLOW with a fixed 64-bit
1275 virtual base added to all data segment addresses.
1276 Text-segment stuff is computed like MEDANY, we can't
1277 reuse the code above because the relocation knobs
1280 Data segment: sethi %hi(symbol), %temp1
1281 add %temp1, EMBMEDANY_BASE_REG, %temp2
1282 or %temp2, %lo(symbol), %reg */
1283 if (data_segment_operand (op1, GET_MODE (op1)))
1287 temp1 = temp; /* op0 is allowed. */
1292 temp1 = gen_reg_rtx (DImode);
1293 temp2 = gen_reg_rtx (DImode);
1296 emit_insn (gen_embmedany_sethi (temp1, op1));
1297 emit_insn (gen_embmedany_brsum (temp2, temp1));
1298 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1301 /* Text segment: sethi %uhi(symbol), %temp1
1302 sethi %hi(symbol), %temp2
1303 or %temp1, %ulo(symbol), %temp3
1304 sllx %temp3, 32, %temp4
1305 or %temp4, %temp2, %temp5
1306 or %temp5, %lo(symbol), %reg */
1311 /* It is possible that one of the registers we got for operands[2]
1312 might coincide with that of operands[0] (which is why we made
1313 it TImode). Pick the other one to use as our scratch. */
1314 if (rtx_equal_p (temp, op0))
1316 gcc_assert (ti_temp);
1317 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1320 temp2 = temp; /* op0 is _not_ allowed, see above. */
1327 temp1 = gen_reg_rtx (DImode);
1328 temp2 = gen_reg_rtx (DImode);
1329 temp3 = gen_reg_rtx (DImode);
1330 temp4 = gen_reg_rtx (DImode);
1331 temp5 = gen_reg_rtx (DImode);
1334 emit_insn (gen_embmedany_textuhi (temp1, op1));
1335 emit_insn (gen_embmedany_texthi (temp2, op1));
1336 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1337 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1338 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1339 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1340 gen_rtx_PLUS (DImode, temp4, temp2)));
1341 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1350 #if HOST_BITS_PER_WIDE_INT == 32
1352 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1357 /* These avoid problems when cross compiling. If we do not
1358 go through all this hair then the optimizer will see
1359 invalid REG_EQUAL notes or in some cases none at all. */
1360 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1361 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1362 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1363 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1365 /* The optimizer is not to assume anything about exactly
1366 which bits are set for a HIGH, they are unspecified.
1367 Unfortunately this leads to many missed optimizations
1368 during CSE. We mask out the non-HIGH bits, and matches
1369 a plain movdi, to alleviate this problem. */
1371 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1373 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1377 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1379 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1383 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1385 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1389 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1391 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1394 /* Worker routines for 64-bit constant formation on arch64.
1395 One of the key things to be doing in these emissions is
1396 to create as many temp REGs as possible. This makes it
1397 possible for half-built constants to be used later when
1398 such values are similar to something required later on.
1399 Without doing this, the optimizer cannot see such
1402 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1403 unsigned HOST_WIDE_INT, int);
1406 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1407 unsigned HOST_WIDE_INT low_bits, int is_neg)
1409 unsigned HOST_WIDE_INT high_bits;
1412 high_bits = (~low_bits) & 0xffffffff;
1414 high_bits = low_bits;
1416 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1419 emit_insn (gen_rtx_SET (VOIDmode, op0,
1420 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1424 /* If we are XOR'ing with -1, then we should emit a one's complement
1425 instead. This way the combiner will notice logical operations
1426 such as ANDN later on and substitute. */
1427 if ((low_bits & 0x3ff) == 0x3ff)
1429 emit_insn (gen_rtx_SET (VOIDmode, op0,
1430 gen_rtx_NOT (DImode, temp)));
1434 emit_insn (gen_rtx_SET (VOIDmode, op0,
1435 gen_safe_XOR64 (temp,
1436 (-(HOST_WIDE_INT)0x400
1437 | (low_bits & 0x3ff)))));
1442 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1443 unsigned HOST_WIDE_INT, int);
1446 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1447 unsigned HOST_WIDE_INT high_bits,
1448 unsigned HOST_WIDE_INT low_immediate,
1453 if ((high_bits & 0xfffffc00) != 0)
1455 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1456 if ((high_bits & ~0xfffffc00) != 0)
1457 emit_insn (gen_rtx_SET (VOIDmode, op0,
1458 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1464 emit_insn (gen_safe_SET64 (temp, high_bits));
1468 /* Now shift it up into place. */
1469 emit_insn (gen_rtx_SET (VOIDmode, op0,
1470 gen_rtx_ASHIFT (DImode, temp2,
1471 GEN_INT (shift_count))));
1473 /* If there is a low immediate part piece, finish up by
1474 putting that in as well. */
1475 if (low_immediate != 0)
1476 emit_insn (gen_rtx_SET (VOIDmode, op0,
1477 gen_safe_OR64 (op0, low_immediate)));
1480 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1481 unsigned HOST_WIDE_INT);
1483 /* Full 64-bit constant decomposition. Even though this is the
1484 'worst' case, we still optimize a few things away. */
1486 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1487 unsigned HOST_WIDE_INT high_bits,
1488 unsigned HOST_WIDE_INT low_bits)
1492 if (reload_in_progress || reload_completed)
1495 sub_temp = gen_reg_rtx (DImode);
1497 if ((high_bits & 0xfffffc00) != 0)
1499 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1500 if ((high_bits & ~0xfffffc00) != 0)
1501 emit_insn (gen_rtx_SET (VOIDmode,
1503 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1509 emit_insn (gen_safe_SET64 (temp, high_bits));
1513 if (!reload_in_progress && !reload_completed)
1515 rtx temp2 = gen_reg_rtx (DImode);
1516 rtx temp3 = gen_reg_rtx (DImode);
1517 rtx temp4 = gen_reg_rtx (DImode);
1519 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1520 gen_rtx_ASHIFT (DImode, sub_temp,
1523 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1524 if ((low_bits & ~0xfffffc00) != 0)
1526 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1527 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1528 emit_insn (gen_rtx_SET (VOIDmode, op0,
1529 gen_rtx_PLUS (DImode, temp4, temp3)));
1533 emit_insn (gen_rtx_SET (VOIDmode, op0,
1534 gen_rtx_PLUS (DImode, temp4, temp2)));
1539 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1540 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1541 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1544 /* We are in the middle of reload, so this is really
1545 painful. However we do still make an attempt to
1546 avoid emitting truly stupid code. */
1547 if (low1 != const0_rtx)
1549 emit_insn (gen_rtx_SET (VOIDmode, op0,
1550 gen_rtx_ASHIFT (DImode, sub_temp,
1551 GEN_INT (to_shift))));
1552 emit_insn (gen_rtx_SET (VOIDmode, op0,
1553 gen_rtx_IOR (DImode, op0, low1)));
1561 if (low2 != const0_rtx)
1563 emit_insn (gen_rtx_SET (VOIDmode, op0,
1564 gen_rtx_ASHIFT (DImode, sub_temp,
1565 GEN_INT (to_shift))));
1566 emit_insn (gen_rtx_SET (VOIDmode, op0,
1567 gen_rtx_IOR (DImode, op0, low2)));
1575 emit_insn (gen_rtx_SET (VOIDmode, op0,
1576 gen_rtx_ASHIFT (DImode, sub_temp,
1577 GEN_INT (to_shift))));
1578 if (low3 != const0_rtx)
1579 emit_insn (gen_rtx_SET (VOIDmode, op0,
1580 gen_rtx_IOR (DImode, op0, low3)));
1585 /* Analyze a 64-bit constant for certain properties. */
1586 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1587 unsigned HOST_WIDE_INT,
1588 int *, int *, int *);
1591 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1592 unsigned HOST_WIDE_INT low_bits,
1593 int *hbsp, int *lbsp, int *abbasp)
1595 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1598 lowest_bit_set = highest_bit_set = -1;
1602 if ((lowest_bit_set == -1)
1603 && ((low_bits >> i) & 1))
1605 if ((highest_bit_set == -1)
1606 && ((high_bits >> (32 - i - 1)) & 1))
1607 highest_bit_set = (64 - i - 1);
1610 && ((highest_bit_set == -1)
1611 || (lowest_bit_set == -1)));
1617 if ((lowest_bit_set == -1)
1618 && ((high_bits >> i) & 1))
1619 lowest_bit_set = i + 32;
1620 if ((highest_bit_set == -1)
1621 && ((low_bits >> (32 - i - 1)) & 1))
1622 highest_bit_set = 32 - i - 1;
1625 && ((highest_bit_set == -1)
1626 || (lowest_bit_set == -1)));
1628 /* If there are no bits set this should have gone out
1629 as one instruction! */
1630 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1631 all_bits_between_are_set = 1;
1632 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1636 if ((low_bits & (1 << i)) != 0)
1641 if ((high_bits & (1 << (i - 32))) != 0)
1644 all_bits_between_are_set = 0;
1647 *hbsp = highest_bit_set;
1648 *lbsp = lowest_bit_set;
1649 *abbasp = all_bits_between_are_set;
1652 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1655 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1656 unsigned HOST_WIDE_INT low_bits)
1658 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1661 || high_bits == 0xffffffff)
1664 analyze_64bit_constant (high_bits, low_bits,
1665 &highest_bit_set, &lowest_bit_set,
1666 &all_bits_between_are_set);
1668 if ((highest_bit_set == 63
1669 || lowest_bit_set == 0)
1670 && all_bits_between_are_set != 0)
1673 if ((highest_bit_set - lowest_bit_set) < 21)
1679 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1680 unsigned HOST_WIDE_INT,
1683 static unsigned HOST_WIDE_INT
1684 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1685 unsigned HOST_WIDE_INT low_bits,
1686 int lowest_bit_set, int shift)
1688 HOST_WIDE_INT hi, lo;
1690 if (lowest_bit_set < 32)
1692 lo = (low_bits >> lowest_bit_set) << shift;
1693 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1698 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1700 gcc_assert (! (hi & lo));
1704 /* Here we are sure to be arch64 and this is an integer constant
1705 being loaded into a register. Emit the most efficient
1706 insn sequence possible. Detection of all the 1-insn cases
1707 has been done already. */
1709 sparc_emit_set_const64 (rtx op0, rtx op1)
1711 unsigned HOST_WIDE_INT high_bits, low_bits;
1712 int lowest_bit_set, highest_bit_set;
1713 int all_bits_between_are_set;
1716 /* Sanity check that we know what we are working with. */
1717 gcc_assert (TARGET_ARCH64
1718 && (GET_CODE (op0) == SUBREG
1719 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1721 if (reload_in_progress || reload_completed)
1724 if (GET_CODE (op1) != CONST_INT)
1726 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1731 temp = gen_reg_rtx (DImode);
1733 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1734 low_bits = (INTVAL (op1) & 0xffffffff);
1736 /* low_bits bits 0 --> 31
1737 high_bits bits 32 --> 63 */
1739 analyze_64bit_constant (high_bits, low_bits,
1740 &highest_bit_set, &lowest_bit_set,
1741 &all_bits_between_are_set);
1743 /* First try for a 2-insn sequence. */
1745 /* These situations are preferred because the optimizer can
1746 * do more things with them:
1748 * sllx %reg, shift, %reg
1750 * srlx %reg, shift, %reg
1751 * 3) mov some_small_const, %reg
1752 * sllx %reg, shift, %reg
1754 if (((highest_bit_set == 63
1755 || lowest_bit_set == 0)
1756 && all_bits_between_are_set != 0)
1757 || ((highest_bit_set - lowest_bit_set) < 12))
1759 HOST_WIDE_INT the_const = -1;
1760 int shift = lowest_bit_set;
1762 if ((highest_bit_set != 63
1763 && lowest_bit_set != 0)
1764 || all_bits_between_are_set == 0)
1767 create_simple_focus_bits (high_bits, low_bits,
1770 else if (lowest_bit_set == 0)
1771 shift = -(63 - highest_bit_set);
1773 gcc_assert (SPARC_SIMM13_P (the_const));
1774 gcc_assert (shift != 0);
1776 emit_insn (gen_safe_SET64 (temp, the_const));
1778 emit_insn (gen_rtx_SET (VOIDmode,
1780 gen_rtx_ASHIFT (DImode,
1784 emit_insn (gen_rtx_SET (VOIDmode,
1786 gen_rtx_LSHIFTRT (DImode,
1788 GEN_INT (-shift))));
1792 /* Now a range of 22 or less bits set somewhere.
1793 * 1) sethi %hi(focus_bits), %reg
1794 * sllx %reg, shift, %reg
1795 * 2) sethi %hi(focus_bits), %reg
1796 * srlx %reg, shift, %reg
1798 if ((highest_bit_set - lowest_bit_set) < 21)
1800 unsigned HOST_WIDE_INT focus_bits =
1801 create_simple_focus_bits (high_bits, low_bits,
1802 lowest_bit_set, 10);
1804 gcc_assert (SPARC_SETHI_P (focus_bits));
1805 gcc_assert (lowest_bit_set != 10);
1807 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1809 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1810 if (lowest_bit_set < 10)
1811 emit_insn (gen_rtx_SET (VOIDmode,
1813 gen_rtx_LSHIFTRT (DImode, temp,
1814 GEN_INT (10 - lowest_bit_set))));
1815 else if (lowest_bit_set > 10)
1816 emit_insn (gen_rtx_SET (VOIDmode,
1818 gen_rtx_ASHIFT (DImode, temp,
1819 GEN_INT (lowest_bit_set - 10))));
1823 /* 1) sethi %hi(low_bits), %reg
1824 * or %reg, %lo(low_bits), %reg
1825 * 2) sethi %hi(~low_bits), %reg
1826 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1829 || high_bits == 0xffffffff)
1831 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1832 (high_bits == 0xffffffff));
1836 /* Now, try 3-insn sequences. */
1838 /* 1) sethi %hi(high_bits), %reg
1839 * or %reg, %lo(high_bits), %reg
1840 * sllx %reg, 32, %reg
1844 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1848 /* We may be able to do something quick
1849 when the constant is negated, so try that. */
1850 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1851 (~low_bits) & 0xfffffc00))
1853 /* NOTE: The trailing bits get XOR'd so we need the
1854 non-negated bits, not the negated ones. */
1855 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1857 if ((((~high_bits) & 0xffffffff) == 0
1858 && ((~low_bits) & 0x80000000) == 0)
1859 || (((~high_bits) & 0xffffffff) == 0xffffffff
1860 && ((~low_bits) & 0x80000000) != 0))
1862 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1864 if ((SPARC_SETHI_P (fast_int)
1865 && (~high_bits & 0xffffffff) == 0)
1866 || SPARC_SIMM13_P (fast_int))
1867 emit_insn (gen_safe_SET64 (temp, fast_int));
1869 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1874 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1875 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1876 sparc_emit_set_const64 (temp, negated_const);
1879 /* If we are XOR'ing with -1, then we should emit a one's complement
1880 instead. This way the combiner will notice logical operations
1881 such as ANDN later on and substitute. */
1882 if (trailing_bits == 0x3ff)
1884 emit_insn (gen_rtx_SET (VOIDmode, op0,
1885 gen_rtx_NOT (DImode, temp)));
1889 emit_insn (gen_rtx_SET (VOIDmode,
1891 gen_safe_XOR64 (temp,
1892 (-0x400 | trailing_bits))));
1897 /* 1) sethi %hi(xxx), %reg
1898 * or %reg, %lo(xxx), %reg
1899 * sllx %reg, yyy, %reg
1901 * ??? This is just a generalized version of the low_bits==0
1902 * thing above, FIXME...
1904 if ((highest_bit_set - lowest_bit_set) < 32)
1906 unsigned HOST_WIDE_INT focus_bits =
1907 create_simple_focus_bits (high_bits, low_bits,
1910 /* We can't get here in this state. */
1911 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1913 /* So what we know is that the set bits straddle the
1914 middle of the 64-bit word. */
1915 sparc_emit_set_const64_quick2 (op0, temp,
1921 /* 1) sethi %hi(high_bits), %reg
1922 * or %reg, %lo(high_bits), %reg
1923 * sllx %reg, 32, %reg
1924 * or %reg, low_bits, %reg
1926 if (SPARC_SIMM13_P(low_bits)
1927 && ((int)low_bits > 0))
1929 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1933 /* The easiest way when all else fails, is full decomposition. */
1935 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1936 high_bits, low_bits, ~high_bits, ~low_bits);
1938 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1940 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1942 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1943 return the mode to be used for the comparison. For floating-point,
1944 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1945 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1946 processing is needed. */
1949 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1951 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1977 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1978 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1980 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1981 return CCX_NOOVmode;
1987 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1994 /* Emit the compare insn and return the CC reg for a CODE comparison
1995 with operands X and Y. */
1998 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2000 enum machine_mode mode;
2003 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2006 mode = SELECT_CC_MODE (code, x, y);
2008 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2009 fcc regs (cse can't tell they're really call clobbered regs and will
2010 remove a duplicate comparison even if there is an intervening function
2011 call - it will then try to reload the cc reg via an int reg which is why
2012 we need the movcc patterns). It is possible to provide the movcc
2013 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2014 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2015 to tell cse that CCFPE mode registers (even pseudos) are call
2018 /* ??? This is an experiment. Rather than making changes to cse which may
2019 or may not be easy/clean, we do our own cse. This is possible because
2020 we will generate hard registers. Cse knows they're call clobbered (it
2021 doesn't know the same thing about pseudos). If we guess wrong, no big
2022 deal, but if we win, great! */
2024 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2025 #if 1 /* experiment */
2028 /* We cycle through the registers to ensure they're all exercised. */
2029 static int next_fcc_reg = 0;
2030 /* Previous x,y for each fcc reg. */
2031 static rtx prev_args[4][2];
2033 /* Scan prev_args for x,y. */
2034 for (reg = 0; reg < 4; reg++)
2035 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2040 prev_args[reg][0] = x;
2041 prev_args[reg][1] = y;
2042 next_fcc_reg = (next_fcc_reg + 1) & 3;
2044 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2047 cc_reg = gen_reg_rtx (mode);
2048 #endif /* ! experiment */
2049 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2050 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2052 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2054 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2055 will only result in an unrecognizable insn so no point in asserting. */
2056 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2062 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2065 gen_compare_reg (rtx cmp)
2067 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2070 /* This function is used for v9 only.
2071 DEST is the target of the Scc insn.
2072 CODE is the code for an Scc's comparison.
2073 X and Y are the values we compare.
2075 This function is needed to turn
2078 (gt (reg:CCX 100 %icc)
2082 (gt:DI (reg:CCX 100 %icc)
2085 IE: The instruction recognizer needs to see the mode of the comparison to
2086 find the right instruction. We could use "gt:DI" right in the
2087 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2090 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2093 && (GET_MODE (x) == DImode
2094 || GET_MODE (dest) == DImode))
2097 /* Try to use the movrCC insns. */
2099 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2101 && v9_regcmp_p (compare_code))
2106 /* Special case for op0 != 0. This can be done with one instruction if
2109 if (compare_code == NE
2110 && GET_MODE (dest) == DImode
2111 && rtx_equal_p (op0, dest))
2113 emit_insn (gen_rtx_SET (VOIDmode, dest,
2114 gen_rtx_IF_THEN_ELSE (DImode,
2115 gen_rtx_fmt_ee (compare_code, DImode,
2122 if (reg_overlap_mentioned_p (dest, op0))
2124 /* Handle the case where dest == x.
2125 We "early clobber" the result. */
2126 op0 = gen_reg_rtx (GET_MODE (x));
2127 emit_move_insn (op0, x);
2130 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2131 if (GET_MODE (op0) != DImode)
2133 temp = gen_reg_rtx (DImode);
2134 convert_move (temp, op0, 0);
2138 emit_insn (gen_rtx_SET (VOIDmode, dest,
2139 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2140 gen_rtx_fmt_ee (compare_code, DImode,
2148 x = gen_compare_reg_1 (compare_code, x, y);
2151 gcc_assert (GET_MODE (x) != CC_NOOVmode
2152 && GET_MODE (x) != CCX_NOOVmode);
2154 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2155 emit_insn (gen_rtx_SET (VOIDmode, dest,
2156 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2157 gen_rtx_fmt_ee (compare_code,
2158 GET_MODE (x), x, y),
2159 const1_rtx, dest)));
2165 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2166 without jumps using the addx/subx instructions. */
2169 emit_scc_insn (rtx operands[])
2176 /* The quad-word fp compare library routines all return nonzero to indicate
2177 true, which is different from the equivalent libgcc routines, so we must
2178 handle them specially here. */
2179 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2181 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2182 GET_CODE (operands[1]));
2183 operands[2] = XEXP (operands[1], 0);
2184 operands[3] = XEXP (operands[1], 1);
2187 code = GET_CODE (operands[1]);
2191 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2192 more applications). The exception to this is "reg != 0" which can
2193 be done in one instruction on v9 (so we do it). */
2196 if (GET_MODE (x) == SImode)
2198 rtx pat = gen_seqsi_special (operands[0], x, y);
2202 else if (GET_MODE (x) == DImode)
2204 rtx pat = gen_seqdi_special (operands[0], x, y);
2212 if (GET_MODE (x) == SImode)
2214 rtx pat = gen_snesi_special (operands[0], x, y);
2218 else if (GET_MODE (x) == DImode)
2220 rtx pat = gen_snedi_special (operands[0], x, y);
2226 /* For the rest, on v9 we can use conditional moves. */
2230 if (gen_v9_scc (operands[0], code, x, y))
2234 /* We can do LTU and GEU using the addx/subx instructions too. And
2235 for GTU/LEU, if both operands are registers swap them and fall
2236 back to the easy case. */
2237 if (code == GTU || code == LEU)
2239 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2240 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2245 code = swap_condition (code);
2249 if (code == LTU || code == GEU)
2251 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2252 gen_rtx_fmt_ee (code, SImode,
2253 gen_compare_reg_1 (code, x, y),
2258 /* Nope, do branches. */
2262 /* Emit a conditional jump insn for the v9 architecture using comparison code
2263 CODE and jump target LABEL.
2264 This function exists to take advantage of the v9 brxx insns. */
2267 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2269 emit_jump_insn (gen_rtx_SET (VOIDmode,
2271 gen_rtx_IF_THEN_ELSE (VOIDmode,
2272 gen_rtx_fmt_ee (code, GET_MODE (op0),
2274 gen_rtx_LABEL_REF (VOIDmode, label),
2279 emit_conditional_branch_insn (rtx operands[])
2281 /* The quad-word fp compare library routines all return nonzero to indicate
2282 true, which is different from the equivalent libgcc routines, so we must
2283 handle them specially here. */
2284 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2286 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2287 GET_CODE (operands[0]));
2288 operands[1] = XEXP (operands[0], 0);
2289 operands[2] = XEXP (operands[0], 1);
2292 if (TARGET_ARCH64 && operands[2] == const0_rtx
2293 && GET_CODE (operands[1]) == REG
2294 && GET_MODE (operands[1]) == DImode)
2296 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2300 operands[1] = gen_compare_reg (operands[0]);
2301 operands[2] = const0_rtx;
2302 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2303 operands[1], operands[2]);
2304 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2309 /* Generate a DFmode part of a hard TFmode register.
2310 REG is the TFmode hard register, LOW is 1 for the
2311 low 64bit of the register and 0 otherwise.
2314 gen_df_reg (rtx reg, int low)
2316 int regno = REGNO (reg);
2318 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2319 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2320 return gen_rtx_REG (DFmode, regno);
2323 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2324 Unlike normal calls, TFmode operands are passed by reference. It is
2325 assumed that no more than 3 operands are required. */
2328 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2330 rtx ret_slot = NULL, arg[3], func_sym;
2333 /* We only expect to be called for conversions, unary, and binary ops. */
2334 gcc_assert (nargs == 2 || nargs == 3);
2336 for (i = 0; i < nargs; ++i)
2338 rtx this_arg = operands[i];
2341 /* TFmode arguments and return values are passed by reference. */
2342 if (GET_MODE (this_arg) == TFmode)
2344 int force_stack_temp;
2346 force_stack_temp = 0;
2347 if (TARGET_BUGGY_QP_LIB && i == 0)
2348 force_stack_temp = 1;
2350 if (GET_CODE (this_arg) == MEM
2351 && ! force_stack_temp)
2352 this_arg = XEXP (this_arg, 0);
2353 else if (CONSTANT_P (this_arg)
2354 && ! force_stack_temp)
2356 this_slot = force_const_mem (TFmode, this_arg);
2357 this_arg = XEXP (this_slot, 0);
2361 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2363 /* Operand 0 is the return value. We'll copy it out later. */
2365 emit_move_insn (this_slot, this_arg);
2367 ret_slot = this_slot;
2369 this_arg = XEXP (this_slot, 0);
2376 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2378 if (GET_MODE (operands[0]) == TFmode)
2381 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2382 arg[0], GET_MODE (arg[0]),
2383 arg[1], GET_MODE (arg[1]));
2385 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2386 arg[0], GET_MODE (arg[0]),
2387 arg[1], GET_MODE (arg[1]),
2388 arg[2], GET_MODE (arg[2]));
2391 emit_move_insn (operands[0], ret_slot);
2397 gcc_assert (nargs == 2);
2399 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2400 GET_MODE (operands[0]), 1,
2401 arg[1], GET_MODE (arg[1]));
2403 if (ret != operands[0])
2404 emit_move_insn (operands[0], ret);
2408 /* Expand soft-float TFmode calls to sparc abi routines. */
2411 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2433 emit_soft_tfmode_libcall (func, 3, operands);
2437 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2441 gcc_assert (code == SQRT);
2444 emit_soft_tfmode_libcall (func, 2, operands);
2448 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2455 switch (GET_MODE (operands[1]))
2468 case FLOAT_TRUNCATE:
2469 switch (GET_MODE (operands[0]))
2483 switch (GET_MODE (operands[1]))
2488 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2498 case UNSIGNED_FLOAT:
2499 switch (GET_MODE (operands[1]))
2504 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2515 switch (GET_MODE (operands[0]))
2529 switch (GET_MODE (operands[0]))
2546 emit_soft_tfmode_libcall (func, 2, operands);
2549 /* Expand a hard-float tfmode operation. All arguments must be in
2553 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2557 if (GET_RTX_CLASS (code) == RTX_UNARY)
2559 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2560 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2564 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2565 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2566 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2567 operands[1], operands[2]);
2570 if (register_operand (operands[0], VOIDmode))
2573 dest = gen_reg_rtx (GET_MODE (operands[0]));
2575 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2577 if (dest != operands[0])
2578 emit_move_insn (operands[0], dest);
2582 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2584 if (TARGET_HARD_QUAD)
2585 emit_hard_tfmode_operation (code, operands);
2587 emit_soft_tfmode_binop (code, operands);
2591 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2593 if (TARGET_HARD_QUAD)
2594 emit_hard_tfmode_operation (code, operands);
2596 emit_soft_tfmode_unop (code, operands);
2600 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2602 if (TARGET_HARD_QUAD)
2603 emit_hard_tfmode_operation (code, operands);
2605 emit_soft_tfmode_cvt (code, operands);
2608 /* Return nonzero if a branch/jump/call instruction will be emitting
2609 nop into its delay slot. */
2612 empty_delay_slot (rtx insn)
2616 /* If no previous instruction (should not happen), return true. */
2617 if (PREV_INSN (insn) == NULL)
2620 seq = NEXT_INSN (PREV_INSN (insn));
2621 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2627 /* Return nonzero if TRIAL can go into the call delay slot. */
2630 tls_call_delay (rtx trial)
2635 call __tls_get_addr, %tgd_call (foo)
2636 add %l7, %o0, %o0, %tgd_add (foo)
2637 while Sun as/ld does not. */
2638 if (TARGET_GNU_TLS || !TARGET_TLS)
2641 pat = PATTERN (trial);
2643 /* We must reject tgd_add{32|64}, i.e.
2644 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2645 and tldm_add{32|64}, i.e.
2646 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2648 if (GET_CODE (pat) == SET
2649 && GET_CODE (SET_SRC (pat)) == PLUS)
2651 rtx unspec = XEXP (SET_SRC (pat), 1);
2653 if (GET_CODE (unspec) == UNSPEC
2654 && (XINT (unspec, 1) == UNSPEC_TLSGD
2655 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2662 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2663 instruction. RETURN_P is true if the v9 variant 'return' is to be
2664 considered in the test too.
2666 TRIAL must be a SET whose destination is a REG appropriate for the
2667 'restore' instruction or, if RETURN_P is true, for the 'return'
2671 eligible_for_restore_insn (rtx trial, bool return_p)
2673 rtx pat = PATTERN (trial);
2674 rtx src = SET_SRC (pat);
2676 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2677 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2678 && arith_operand (src, GET_MODE (src)))
2681 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2683 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2686 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2687 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2688 && arith_double_operand (src, GET_MODE (src)))
2689 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2691 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2692 else if (! TARGET_FPU && register_operand (src, SFmode))
2695 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2696 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2699 /* If we have the 'return' instruction, anything that does not use
2700 local or output registers and can go into a delay slot wins. */
2701 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2702 && (get_attr_in_uncond_branch_delay (trial)
2703 == IN_UNCOND_BRANCH_DELAY_TRUE))
2706 /* The 'restore src1,src2,dest' pattern for SImode. */
2707 else if (GET_CODE (src) == PLUS
2708 && register_operand (XEXP (src, 0), SImode)
2709 && arith_operand (XEXP (src, 1), SImode))
2712 /* The 'restore src1,src2,dest' pattern for DImode. */
2713 else if (GET_CODE (src) == PLUS
2714 && register_operand (XEXP (src, 0), DImode)
2715 && arith_double_operand (XEXP (src, 1), DImode))
2718 /* The 'restore src1,%lo(src2),dest' pattern. */
2719 else if (GET_CODE (src) == LO_SUM
2720 && ! TARGET_CM_MEDMID
2721 && ((register_operand (XEXP (src, 0), SImode)
2722 && immediate_operand (XEXP (src, 1), SImode))
2724 && register_operand (XEXP (src, 0), DImode)
2725 && immediate_operand (XEXP (src, 1), DImode))))
2728 /* The 'restore src,src,dest' pattern. */
2729 else if (GET_CODE (src) == ASHIFT
2730 && (register_operand (XEXP (src, 0), SImode)
2731 || register_operand (XEXP (src, 0), DImode))
2732 && XEXP (src, 1) == const1_rtx)
2738 /* Return nonzero if TRIAL can go into the function return's
2742 eligible_for_return_delay (rtx trial)
2746 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2749 if (get_attr_length (trial) != 1)
2752 /* If there are any call-saved registers, we should scan TRIAL if it
2753 does not reference them. For now just make it easy. */
2757 /* If the function uses __builtin_eh_return, the eh_return machinery
2758 occupies the delay slot. */
2759 if (crtl->calls_eh_return)
2762 /* In the case of a true leaf function, anything can go into the slot. */
2763 if (sparc_leaf_function_p)
2764 return get_attr_in_uncond_branch_delay (trial)
2765 == IN_UNCOND_BRANCH_DELAY_TRUE;
2767 pat = PATTERN (trial);
2769 /* Otherwise, only operations which can be done in tandem with
2770 a `restore' or `return' insn can go into the delay slot. */
2771 if (GET_CODE (SET_DEST (pat)) != REG
2772 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2775 /* If this instruction sets up floating point register and we have a return
2776 instruction, it can probably go in. But restore will not work
2778 if (REGNO (SET_DEST (pat)) >= 32)
2780 && ! epilogue_renumber (&pat, 1)
2781 && (get_attr_in_uncond_branch_delay (trial)
2782 == IN_UNCOND_BRANCH_DELAY_TRUE));
2784 return eligible_for_restore_insn (trial, true);
2787 /* Return nonzero if TRIAL can go into the sibling call's
2791 eligible_for_sibcall_delay (rtx trial)
2795 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2798 if (get_attr_length (trial) != 1)
2801 pat = PATTERN (trial);
2803 if (sparc_leaf_function_p)
2805 /* If the tail call is done using the call instruction,
2806 we have to restore %o7 in the delay slot. */
2807 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2810 /* %g1 is used to build the function address */
2811 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2817 /* Otherwise, only operations which can be done in tandem with
2818 a `restore' insn can go into the delay slot. */
2819 if (GET_CODE (SET_DEST (pat)) != REG
2820 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2821 || REGNO (SET_DEST (pat)) >= 32)
2824 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2826 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2829 return eligible_for_restore_insn (trial, false);
2833 short_branch (int uid1, int uid2)
2835 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2837 /* Leave a few words of "slop". */
2838 if (delta >= -1023 && delta <= 1022)
2844 /* Return nonzero if REG is not used after INSN.
2845 We assume REG is a reload reg, and therefore does
2846 not live past labels or calls or jumps. */
2848 reg_unused_after (rtx reg, rtx insn)
2850 enum rtx_code code, prev_code = UNKNOWN;
2852 while ((insn = NEXT_INSN (insn)))
2854 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2857 code = GET_CODE (insn);
2858 if (GET_CODE (insn) == CODE_LABEL)
2863 rtx set = single_set (insn);
2864 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2867 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2869 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2877 /* Determine if it's legal to put X into the constant pool. This
2878 is not possible if X contains the address of a symbol that is
2879 not constant (TLS) or not known at final link time (PIC). */
2882 sparc_cannot_force_const_mem (rtx x)
2884 switch (GET_CODE (x))
2889 /* Accept all non-symbolic constants. */
2893 /* Labels are OK iff we are non-PIC. */
2894 return flag_pic != 0;
2897 /* 'Naked' TLS symbol references are never OK,
2898 non-TLS symbols are OK iff we are non-PIC. */
2899 if (SYMBOL_REF_TLS_MODEL (x))
2902 return flag_pic != 0;
2905 return sparc_cannot_force_const_mem (XEXP (x, 0));
2908 return sparc_cannot_force_const_mem (XEXP (x, 0))
2909 || sparc_cannot_force_const_mem (XEXP (x, 1));
2918 static GTY(()) char pic_helper_symbol_name[256];
2919 static GTY(()) rtx pic_helper_symbol;
2920 static GTY(()) bool pic_helper_emitted_p = false;
2921 static GTY(()) rtx global_offset_table;
2923 /* Ensure that we are not using patterns that are not OK with PIC. */
2931 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2932 && (GET_CODE (recog_data.operand[i]) != CONST
2933 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2934 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2935 == global_offset_table)
2936 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2944 /* Return true if X is an address which needs a temporary register when
2945 reloaded while generating PIC code. */
2948 pic_address_needs_scratch (rtx x)
2950 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2951 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2952 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2953 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2954 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2960 /* Determine if a given RTX is a valid constant. We already know this
2961 satisfies CONSTANT_P. */
2964 legitimate_constant_p (rtx x)
2966 switch (GET_CODE (x))
2970 if (sparc_tls_referenced_p (x))
2975 if (GET_MODE (x) == VOIDmode)
2978 /* Floating point constants are generally not ok.
2979 The only exception is 0.0 in VIS. */
2981 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
2982 && const_zero_operand (x, GET_MODE (x)))
2988 /* Vector constants are generally not ok.
2989 The only exception is 0 in VIS. */
2991 && const_zero_operand (x, GET_MODE (x)))
3003 /* Determine if a given RTX is a valid constant address. */
3006 constant_address_p (rtx x)
3008 switch (GET_CODE (x))
3016 if (flag_pic && pic_address_needs_scratch (x))
3018 return legitimate_constant_p (x);
3021 return !flag_pic && legitimate_constant_p (x);
3028 /* Nonzero if the constant value X is a legitimate general operand
3029 when generating PIC code. It is given that flag_pic is on and
3030 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3033 legitimate_pic_operand_p (rtx x)
3035 if (pic_address_needs_scratch (x))
3037 if (sparc_tls_referenced_p (x))
3042 /* Return nonzero if ADDR is a valid memory address.
3043 STRICT specifies whether strict register checking applies. */
3046 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3048 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3050 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3052 else if (GET_CODE (addr) == PLUS)
3054 rs1 = XEXP (addr, 0);
3055 rs2 = XEXP (addr, 1);
3057 /* Canonicalize. REG comes first, if there are no regs,
3058 LO_SUM comes first. */
3060 && GET_CODE (rs1) != SUBREG
3062 || GET_CODE (rs2) == SUBREG
3063 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3065 rs1 = XEXP (addr, 1);
3066 rs2 = XEXP (addr, 0);
3070 && rs1 == pic_offset_table_rtx
3072 && GET_CODE (rs2) != SUBREG
3073 && GET_CODE (rs2) != LO_SUM
3074 && GET_CODE (rs2) != MEM
3075 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
3076 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3077 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3079 || GET_CODE (rs1) == SUBREG)
3080 && RTX_OK_FOR_OFFSET_P (rs2)))
3085 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3086 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3088 /* We prohibit REG + REG for TFmode when there are no quad move insns
3089 and we consequently need to split. We do this because REG+REG
3090 is not an offsettable address. If we get the situation in reload
3091 where source and destination of a movtf pattern are both MEMs with
3092 REG+REG address, then only one of them gets converted to an
3093 offsettable address. */
3095 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3098 /* We prohibit REG + REG on ARCH32 if not optimizing for
3099 DFmode/DImode because then mem_min_alignment is likely to be zero
3100 after reload and the forced split would lack a matching splitter
3102 if (TARGET_ARCH32 && !optimize
3103 && (mode == DFmode || mode == DImode))
3106 else if (USE_AS_OFFSETABLE_LO10
3107 && GET_CODE (rs1) == LO_SUM
3109 && ! TARGET_CM_MEDMID
3110 && RTX_OK_FOR_OLO10_P (rs2))
3113 imm1 = XEXP (rs1, 1);
3114 rs1 = XEXP (rs1, 0);
3115 if (!CONSTANT_P (imm1)
3116 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3120 else if (GET_CODE (addr) == LO_SUM)
3122 rs1 = XEXP (addr, 0);
3123 imm1 = XEXP (addr, 1);
3125 if (!CONSTANT_P (imm1)
3126 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3129 /* We can't allow TFmode in 32-bit mode, because an offset greater
3130 than the alignment (8) may cause the LO_SUM to overflow. */
3131 if (mode == TFmode && TARGET_ARCH32)
3134 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3139 if (GET_CODE (rs1) == SUBREG)
3140 rs1 = SUBREG_REG (rs1);
3146 if (GET_CODE (rs2) == SUBREG)
3147 rs2 = SUBREG_REG (rs2);
3154 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3155 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3160 if ((REGNO (rs1) >= 32
3161 && REGNO (rs1) != FRAME_POINTER_REGNUM
3162 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3164 && (REGNO (rs2) >= 32
3165 && REGNO (rs2) != FRAME_POINTER_REGNUM
3166 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3172 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3174 static GTY(()) rtx sparc_tls_symbol;
3177 sparc_tls_get_addr (void)
3179 if (!sparc_tls_symbol)
3180 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3182 return sparc_tls_symbol;
3186 sparc_tls_got (void)
3191 crtl->uses_pic_offset_table = 1;
3192 return pic_offset_table_rtx;
3195 if (!global_offset_table)
3196 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3197 temp = gen_reg_rtx (Pmode);
3198 emit_move_insn (temp, global_offset_table);
3202 /* Return true if X contains a thread-local symbol. */
3205 sparc_tls_referenced_p (rtx x)
3207 if (!TARGET_HAVE_TLS)
3210 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3211 x = XEXP (XEXP (x, 0), 0);
3213 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
3216 /* That's all we handle in legitimize_tls_address for now. */
3220 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3221 this (thread-local) address. */
3224 legitimize_tls_address (rtx addr)
3226 rtx temp1, temp2, temp3, ret, o0, got, insn;
3228 gcc_assert (can_create_pseudo_p ());
3230 if (GET_CODE (addr) == SYMBOL_REF)
3231 switch (SYMBOL_REF_TLS_MODEL (addr))
3233 case TLS_MODEL_GLOBAL_DYNAMIC:
3235 temp1 = gen_reg_rtx (SImode);
3236 temp2 = gen_reg_rtx (SImode);
3237 ret = gen_reg_rtx (Pmode);
3238 o0 = gen_rtx_REG (Pmode, 8);
3239 got = sparc_tls_got ();
3240 emit_insn (gen_tgd_hi22 (temp1, addr));
3241 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3244 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3245 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3250 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3251 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3254 CALL_INSN_FUNCTION_USAGE (insn)
3255 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3256 CALL_INSN_FUNCTION_USAGE (insn));
3257 insn = get_insns ();
3259 emit_libcall_block (insn, ret, o0, addr);
3262 case TLS_MODEL_LOCAL_DYNAMIC:
3264 temp1 = gen_reg_rtx (SImode);
3265 temp2 = gen_reg_rtx (SImode);
3266 temp3 = gen_reg_rtx (Pmode);
3267 ret = gen_reg_rtx (Pmode);
3268 o0 = gen_rtx_REG (Pmode, 8);
3269 got = sparc_tls_got ();
3270 emit_insn (gen_tldm_hi22 (temp1));
3271 emit_insn (gen_tldm_lo10 (temp2, temp1));
3274 emit_insn (gen_tldm_add32 (o0, got, temp2));
3275 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3280 emit_insn (gen_tldm_add64 (o0, got, temp2));
3281 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3284 CALL_INSN_FUNCTION_USAGE (insn)
3285 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3286 CALL_INSN_FUNCTION_USAGE (insn));
3287 insn = get_insns ();
3289 emit_libcall_block (insn, temp3, o0,
3290 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3291 UNSPEC_TLSLD_BASE));
3292 temp1 = gen_reg_rtx (SImode);
3293 temp2 = gen_reg_rtx (SImode);
3294 emit_insn (gen_tldo_hix22 (temp1, addr));
3295 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3297 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3299 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3302 case TLS_MODEL_INITIAL_EXEC:
3303 temp1 = gen_reg_rtx (SImode);
3304 temp2 = gen_reg_rtx (SImode);
3305 temp3 = gen_reg_rtx (Pmode);
3306 got = sparc_tls_got ();
3307 emit_insn (gen_tie_hi22 (temp1, addr));
3308 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3310 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3312 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3315 ret = gen_reg_rtx (Pmode);
3317 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3320 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3324 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3327 case TLS_MODEL_LOCAL_EXEC:
3328 temp1 = gen_reg_rtx (Pmode);
3329 temp2 = gen_reg_rtx (Pmode);
3332 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3333 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3337 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3338 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3340 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3347 else if (GET_CODE (addr) == CONST)
3351 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
3353 base = legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
3354 offset = XEXP (XEXP (addr, 0), 1);
3356 base = force_operand (base, NULL_RTX);
3357 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
3358 offset = force_reg (Pmode, offset);
3359 ret = gen_rtx_PLUS (Pmode, base, offset);
3363 gcc_unreachable (); /* for now ... */
3368 /* Legitimize PIC addresses. If the address is already position-independent,
3369 we return ORIG. Newly generated position-independent addresses go into a
3370 reg. This is REG if nonzero, otherwise we allocate register(s) as
3374 legitimize_pic_address (rtx orig, rtx reg)
3376 if (GET_CODE (orig) == SYMBOL_REF
3377 /* See the comment in sparc_expand_move. */
3378 || (TARGET_VXWORKS_RTP && GET_CODE (orig) == LABEL_REF))
3380 rtx pic_ref, address;
3385 gcc_assert (! reload_in_progress && ! reload_completed);
3386 reg = gen_reg_rtx (Pmode);
3391 /* If not during reload, allocate another temp reg here for loading
3392 in the address, so that these instructions can be optimized
3394 rtx temp_reg = ((reload_in_progress || reload_completed)
3395 ? reg : gen_reg_rtx (Pmode));
3397 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3398 won't get confused into thinking that these two instructions
3399 are loading in the true address of the symbol. If in the
3400 future a PIC rtx exists, that should be used instead. */
3403 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3404 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3408 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3409 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3416 pic_ref = gen_const_mem (Pmode,
3417 gen_rtx_PLUS (Pmode,
3418 pic_offset_table_rtx, address));
3419 crtl->uses_pic_offset_table = 1;
3420 insn = emit_move_insn (reg, pic_ref);
3421 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3423 set_unique_reg_note (insn, REG_EQUAL, orig);
3426 else if (GET_CODE (orig) == CONST)
3430 if (GET_CODE (XEXP (orig, 0)) == PLUS
3431 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3436 gcc_assert (! reload_in_progress && ! reload_completed);
3437 reg = gen_reg_rtx (Pmode);
3440 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3441 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
3442 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
3443 base == reg ? NULL_RTX : reg);
3445 if (GET_CODE (offset) == CONST_INT)
3447 if (SMALL_INT (offset))
3448 return plus_constant (base, INTVAL (offset));
3449 else if (! reload_in_progress && ! reload_completed)
3450 offset = force_reg (Pmode, offset);
3452 /* If we reach here, then something is seriously wrong. */
3455 return gen_rtx_PLUS (Pmode, base, offset);
3457 else if (GET_CODE (orig) == LABEL_REF)
3458 /* ??? Why do we do this? */
3459 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3460 the register is live instead, in case it is eliminated. */
3461 crtl->uses_pic_offset_table = 1;
3466 /* Try machine-dependent ways of modifying an illegitimate address X
3467 to be legitimate. If we find one, return the new, valid address.
3469 OLDX is the address as it was before break_out_memory_refs was called.
3470 In some cases it is useful to look at this to decide what needs to be done.
3472 MODE is the mode of the operand pointed to by X.
3474 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3477 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3478 enum machine_mode mode)
3482 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3483 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3484 force_operand (XEXP (x, 0), NULL_RTX));
3485 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3486 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3487 force_operand (XEXP (x, 1), NULL_RTX));
3488 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3489 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3491 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3492 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3493 force_operand (XEXP (x, 1), NULL_RTX));
3495 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3498 if (sparc_tls_referenced_p (x))
3499 x = legitimize_tls_address (x);
3501 x = legitimize_pic_address (x, NULL_RTX);
3502 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3503 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3504 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3505 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3506 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3507 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3508 else if (GET_CODE (x) == SYMBOL_REF
3509 || GET_CODE (x) == CONST
3510 || GET_CODE (x) == LABEL_REF)
3511 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3516 /* Emit the special PIC helper function. */
3519 emit_pic_helper (void)
3521 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3524 switch_to_section (text_section);
3526 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3528 ASM_OUTPUT_ALIGN (asm_out_file, align);
3529 ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3530 if (flag_delayed_branch)
3531 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3532 pic_name, pic_name);
3534 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3535 pic_name, pic_name);
3537 pic_helper_emitted_p = true;
3540 /* Emit code to load the PIC register. */
3543 load_pic_register (bool delay_pic_helper)
3545 int orig_flag_pic = flag_pic;
3547 if (TARGET_VXWORKS_RTP)
3549 emit_insn (gen_vxworks_load_got ());
3550 emit_use (pic_offset_table_rtx);
3554 /* If we haven't initialized the special PIC symbols, do so now. */
3555 if (!pic_helper_symbol_name[0])
3557 ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3558 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3559 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3562 /* If we haven't emitted the special PIC helper function, do so now unless
3563 we are requested to delay it. */
3564 if (!delay_pic_helper && !pic_helper_emitted_p)
3569 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3570 pic_helper_symbol));
3572 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3573 pic_helper_symbol));
3574 flag_pic = orig_flag_pic;
3576 /* Need to emit this whether or not we obey regdecls,
3577 since setjmp/longjmp can cause life info to screw up.
3578 ??? In the case where we don't obey regdecls, this is not sufficient
3579 since we may not fall out the bottom. */
3580 emit_use (pic_offset_table_rtx);
3583 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3584 address of the call target. */
3587 sparc_emit_call_insn (rtx pat, rtx addr)
3591 insn = emit_call_insn (pat);
3593 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3594 if (TARGET_VXWORKS_RTP
3596 && GET_CODE (addr) == SYMBOL_REF
3597 && (SYMBOL_REF_DECL (addr)
3598 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3599 : !SYMBOL_REF_LOCAL_P (addr)))
3601 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3602 crtl->uses_pic_offset_table = 1;
3606 /* Return 1 if RTX is a MEM which is known to be aligned to at
3607 least a DESIRED byte boundary. */
3610 mem_min_alignment (rtx mem, int desired)
3612 rtx addr, base, offset;
3614 /* If it's not a MEM we can't accept it. */
3615 if (GET_CODE (mem) != MEM)
3619 if (!TARGET_UNALIGNED_DOUBLES
3620 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3623 /* ??? The rest of the function predates MEM_ALIGN so
3624 there is probably a bit of redundancy. */
3625 addr = XEXP (mem, 0);
3626 base = offset = NULL_RTX;
3627 if (GET_CODE (addr) == PLUS)
3629 if (GET_CODE (XEXP (addr, 0)) == REG)
3631 base = XEXP (addr, 0);
3633 /* What we are saying here is that if the base
3634 REG is aligned properly, the compiler will make
3635 sure any REG based index upon it will be so
3637 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3638 offset = XEXP (addr, 1);
3640 offset = const0_rtx;
3643 else if (GET_CODE (addr) == REG)
3646 offset = const0_rtx;
3649 if (base != NULL_RTX)
3651 int regno = REGNO (base);
3653 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3655 /* Check if the compiler has recorded some information
3656 about the alignment of the base REG. If reload has
3657 completed, we already matched with proper alignments.
3658 If not running global_alloc, reload might give us
3659 unaligned pointer to local stack though. */
3661 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3662 || (optimize && reload_completed))
3663 && (INTVAL (offset) & (desired - 1)) == 0)
3668 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3672 else if (! TARGET_UNALIGNED_DOUBLES
3673 || CONSTANT_P (addr)
3674 || GET_CODE (addr) == LO_SUM)
3676 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3677 is true, in which case we can only assume that an access is aligned if
3678 it is to a constant address, or the address involves a LO_SUM. */
3682 /* An obviously unaligned address. */
3687 /* Vectors to keep interesting information about registers where it can easily
3688 be got. We used to use the actual mode value as the bit number, but there
3689 are more than 32 modes now. Instead we use two tables: one indexed by
3690 hard register number, and one indexed by mode. */
3692 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3693 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3694 mapped into one sparc_mode_class mode. */
3696 enum sparc_mode_class {
3697 S_MODE, D_MODE, T_MODE, O_MODE,
3698 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3702 /* Modes for single-word and smaller quantities. */
3703 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3705 /* Modes for double-word and smaller quantities. */
3706 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3708 /* Modes for quad-word and smaller quantities. */
3709 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3711 /* Modes for 8-word and smaller quantities. */
3712 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3714 /* Modes for single-float quantities. We must allow any single word or
3715 smaller quantity. This is because the fix/float conversion instructions
3716 take integer inputs/outputs from the float registers. */
3717 #define SF_MODES (S_MODES)
3719 /* Modes for double-float and smaller quantities. */
3720 #define DF_MODES (D_MODES)
3722 /* Modes for quad-float and smaller quantities. */
3723 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
3725 /* Modes for quad-float pairs and smaller quantities. */
3726 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
3728 /* Modes for double-float only quantities. */
3729 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3731 /* Modes for quad-float and double-float only quantities. */
3732 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
3734 /* Modes for quad-float pairs and double-float only quantities. */
3735 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
3737 /* Modes for condition codes. */
3738 #define CC_MODES (1 << (int) CC_MODE)
3739 #define CCFP_MODES (1 << (int) CCFP_MODE)
3741 /* Value is 1 if register/mode pair is acceptable on sparc.
3742 The funny mixture of D and T modes is because integer operations
3743 do not specially operate on tetra quantities, so non-quad-aligned
3744 registers can hold quadword quantities (except %o4 and %i4 because
3745 they cross fixed registers). */
3747 /* This points to either the 32 bit or the 64 bit version. */
3748 const int *hard_regno_mode_classes;
3750 static const int hard_32bit_mode_classes[] = {
3751 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3752 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3753 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3754 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3756 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3757 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3758 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3759 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3761 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3762 and none can hold SFmode/SImode values. */
3763 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3764 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3765 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3766 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3769 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3775 static const int hard_64bit_mode_classes[] = {
3776 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3777 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3778 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3779 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3781 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3782 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3783 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3784 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3786 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3787 and none can hold SFmode/SImode values. */
3788 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3789 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3790 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3791 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3794 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3800 int sparc_mode_class [NUM_MACHINE_MODES];
3802 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3805 sparc_init_modes (void)
3809 for (i = 0; i < NUM_MACHINE_MODES; i++)
3811 switch (GET_MODE_CLASS (i))
3814 case MODE_PARTIAL_INT:
3815 case MODE_COMPLEX_INT:
3816 if (GET_MODE_SIZE (i) <= 4)
3817 sparc_mode_class[i] = 1 << (int) S_MODE;
3818 else if (GET_MODE_SIZE (i) == 8)
3819 sparc_mode_class[i] = 1 << (int) D_MODE;
3820 else if (GET_MODE_SIZE (i) == 16)
3821 sparc_mode_class[i] = 1 << (int) T_MODE;
3822 else if (GET_MODE_SIZE (i) == 32)
3823 sparc_mode_class[i] = 1 << (int) O_MODE;
3825 sparc_mode_class[i] = 0;
3827 case MODE_VECTOR_INT:
3828 if (GET_MODE_SIZE (i) <= 4)
3829 sparc_mode_class[i] = 1 << (int)SF_MODE;
3830 else if (GET_MODE_SIZE (i) == 8)
3831 sparc_mode_class[i] = 1 << (int)DF_MODE;
3834 case MODE_COMPLEX_FLOAT:
3835 if (GET_MODE_SIZE (i) <= 4)
3836 sparc_mode_class[i] = 1 << (int) SF_MODE;
3837 else if (GET_MODE_SIZE (i) == 8)
3838 sparc_mode_class[i] = 1 << (int) DF_MODE;
3839 else if (GET_MODE_SIZE (i) == 16)
3840 sparc_mode_class[i] = 1 << (int) TF_MODE;
3841 else if (GET_MODE_SIZE (i) == 32)
3842 sparc_mode_class[i] = 1 << (int) OF_MODE;
3844 sparc_mode_class[i] = 0;
3847 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3848 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3850 sparc_mode_class[i] = 1 << (int) CC_MODE;
3853 sparc_mode_class[i] = 0;
3859 hard_regno_mode_classes = hard_64bit_mode_classes;
3861 hard_regno_mode_classes = hard_32bit_mode_classes;
3863 /* Initialize the array used by REGNO_REG_CLASS. */
3864 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3866 if (i < 16 && TARGET_V8PLUS)
3867 sparc_regno_reg_class[i] = I64_REGS;
3868 else if (i < 32 || i == FRAME_POINTER_REGNUM)
3869 sparc_regno_reg_class[i] = GENERAL_REGS;
3871 sparc_regno_reg_class[i] = FP_REGS;
3873 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3875 sparc_regno_reg_class[i] = FPCC_REGS;
3877 sparc_regno_reg_class[i] = NO_REGS;
3881 /* Compute the frame size required by the function. This function is called
3882 during the reload pass and also by sparc_expand_prologue. */
3885 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3887 int outgoing_args_size = (crtl->outgoing_args_size
3888 + REG_PARM_STACK_SPACE (current_function_decl));
3889 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
3894 for (i = 0; i < 8; i++)
3895 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3900 for (i = 0; i < 8; i += 2)
3901 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3902 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3906 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3907 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3908 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3911 /* Set up values for use in prologue and epilogue. */
3912 num_gfregs = n_regs;
3917 && crtl->outgoing_args_size == 0)
3918 actual_fsize = apparent_fsize = 0;
3921 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
3922 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3923 apparent_fsize += n_regs * 4;
3924 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3927 /* Make sure nothing can clobber our register windows.
3928 If a SAVE must be done, or there is a stack-local variable,
3929 the register window area must be allocated. */
3930 if (! leaf_function_p || size > 0)
3931 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3933 return SPARC_STACK_ALIGN (actual_fsize);
3936 /* Output any necessary .register pseudo-ops. */
3939 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3941 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3947 /* Check if %g[2367] were used without
3948 .register being printed for them already. */
3949 for (i = 2; i < 8; i++)
3951 if (df_regs_ever_live_p (i)
3952 && ! sparc_hard_reg_printed [i])
3954 sparc_hard_reg_printed [i] = 1;
3955 /* %g7 is used as TLS base register, use #ignore
3956 for it instead of #scratch. */
3957 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
3958 i == 7 ? "ignore" : "scratch");
3965 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3966 as needed. LOW should be double-word aligned for 32-bit registers.
3967 Return the new OFFSET. */
3970 #define SORR_RESTORE 1
3973 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3978 if (TARGET_ARCH64 && high <= 32)
3980 for (i = low; i < high; i++)
3982 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3984 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
3985 set_mem_alias_set (mem, sparc_sr_alias_set);
3986 if (action == SORR_SAVE)
3988 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
3989 RTX_FRAME_RELATED_P (insn) = 1;
3991 else /* action == SORR_RESTORE */
3992 emit_move_insn (gen_rtx_REG (DImode, i), mem);
3999 for (i = low; i < high; i += 2)
4001 bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i];
4002 bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1];
4003 enum machine_mode mode;
4008 mode = i < 32 ? DImode : DFmode;
4013 mode = i < 32 ? SImode : SFmode;
4018 mode = i < 32 ? SImode : SFmode;
4025 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
4026 set_mem_alias_set (mem, sparc_sr_alias_set);
4027 if (action == SORR_SAVE)
4029 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4030 RTX_FRAME_RELATED_P (insn) = 1;
4032 else /* action == SORR_RESTORE */
4033 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4035 /* Always preserve double-word alignment. */
4036 offset = (offset + 7) & -8;
4043 /* Emit code to save call-saved registers. */
4046 emit_save_or_restore_regs (int action)
4048 HOST_WIDE_INT offset;
4051 offset = frame_base_offset - apparent_fsize;
4053 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
4055 /* ??? This might be optimized a little as %g1 might already have a
4056 value close enough that a single add insn will do. */
4057 /* ??? Although, all of this is probably only a temporary fix
4058 because if %g1 can hold a function result, then
4059 sparc_expand_epilogue will lose (the result will be
4061 base = gen_rtx_REG (Pmode, 1);
4062 emit_move_insn (base, GEN_INT (offset));
4063 emit_insn (gen_rtx_SET (VOIDmode,
4065 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
4069 base = frame_base_reg;
4071 offset = save_or_restore_regs (0, 8, base, offset, action);
4072 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
4075 /* Generate a save_register_window insn. */
4078 gen_save_register_window (rtx increment)
4081 return gen_save_register_windowdi (increment);
4083 return gen_save_register_windowsi (increment);
4086 /* Generate an increment for the stack pointer. */
4089 gen_stack_pointer_inc (rtx increment)
4091 return gen_rtx_SET (VOIDmode,
4093 gen_rtx_PLUS (Pmode,
4098 /* Generate a decrement for the stack pointer. */
4101 gen_stack_pointer_dec (rtx decrement)
4103 return gen_rtx_SET (VOIDmode,
4105 gen_rtx_MINUS (Pmode,
4110 /* Expand the function prologue. The prologue is responsible for reserving
4111 storage for the frame, saving the call-saved registers and loading the
4112 PIC register if needed. */
4115 sparc_expand_prologue (void)
4120 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4121 on the final value of the flag means deferring the prologue/epilogue
4122 expansion until just before the second scheduling pass, which is too
4123 late to emit multiple epilogues or return insns.
4125 Of course we are making the assumption that the value of the flag
4126 will not change between now and its final value. Of the three parts
4127 of the formula, only the last one can reasonably vary. Let's take a
4128 closer look, after assuming that the first two ones are set to true
4129 (otherwise the last value is effectively silenced).
4131 If only_leaf_regs_used returns false, the global predicate will also
4132 be false so the actual frame size calculated below will be positive.
4133 As a consequence, the save_register_window insn will be emitted in
4134 the instruction stream; now this insn explicitly references %fp
4135 which is not a leaf register so only_leaf_regs_used will always
4136 return false subsequently.
4138 If only_leaf_regs_used returns true, we hope that the subsequent
4139 optimization passes won't cause non-leaf registers to pop up. For
4140 example, the regrename pass has special provisions to not rename to
4141 non-leaf registers in a leaf function. */
4142 sparc_leaf_function_p
4143 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
4145 /* Need to use actual_fsize, since we are also allocating
4146 space for our callee (and our own register save area). */
4148 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4150 /* Advertise that the data calculated just above are now valid. */
4151 sparc_prologue_data_valid_p = true;
4153 if (sparc_leaf_function_p)
4155 frame_base_reg = stack_pointer_rtx;
4156 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4160 frame_base_reg = hard_frame_pointer_rtx;
4161 frame_base_offset = SPARC_STACK_BIAS;
4164 if (actual_fsize == 0)
4166 else if (sparc_leaf_function_p)
4168 if (actual_fsize <= 4096)
4169 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4170 else if (actual_fsize <= 8192)
4172 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4173 /* %sp is still the CFA register. */
4174 RTX_FRAME_RELATED_P (insn) = 1;
4176 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4180 rtx reg = gen_rtx_REG (Pmode, 1);
4181 emit_move_insn (reg, GEN_INT (-actual_fsize));
4182 insn = emit_insn (gen_stack_pointer_inc (reg));
4183 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4184 gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4187 RTX_FRAME_RELATED_P (insn) = 1;
4191 if (actual_fsize <= 4096)
4192 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4193 else if (actual_fsize <= 8192)
4195 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
4196 /* %sp is not the CFA register anymore. */
4197 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4201 rtx reg = gen_rtx_REG (Pmode, 1);
4202 emit_move_insn (reg, GEN_INT (-actual_fsize));
4203 insn = emit_insn (gen_save_register_window (reg));
4206 RTX_FRAME_RELATED_P (insn) = 1;
4207 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4208 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4212 emit_save_or_restore_regs (SORR_SAVE);
4214 /* Load the PIC register if needed. */
4215 if (flag_pic && crtl->uses_pic_offset_table)
4216 load_pic_register (false);
4219 /* This function generates the assembly code for function entry, which boils
4220 down to emitting the necessary .register directives. */
4223 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4225 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4226 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4228 sparc_output_scratch_registers (file);
4231 /* Expand the function epilogue, either normal or part of a sibcall.
4232 We emit all the instructions except the return or the call. */
4235 sparc_expand_epilogue (void)
4238 emit_save_or_restore_regs (SORR_RESTORE);
4240 if (actual_fsize == 0)
4242 else if (sparc_leaf_function_p)
4244 if (actual_fsize <= 4096)
4245 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4246 else if (actual_fsize <= 8192)
4248 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4249 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4253 rtx reg = gen_rtx_REG (Pmode, 1);
4254 emit_move_insn (reg, GEN_INT (-actual_fsize));
4255 emit_insn (gen_stack_pointer_dec (reg));
4260 /* Return true if it is appropriate to emit `return' instructions in the
4261 body of a function. */
4264 sparc_can_use_return_insn_p (void)
4266 return sparc_prologue_data_valid_p
4267 && (actual_fsize == 0 || !sparc_leaf_function_p);
4270 /* This function generates the assembly code for function exit. */
4273 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4275 /* If code does not drop into the epilogue, we have to still output
4276 a dummy nop for the sake of sane backtraces. Otherwise, if the
4277 last two instructions of a function were "call foo; dslot;" this
4278 can make the return PC of foo (i.e. address of call instruction
4279 plus 8) point to the first instruction in the next function. */
4281 rtx insn, last_real_insn;
4283 insn = get_last_insn ();
4285 last_real_insn = prev_real_insn (insn);
4287 && GET_CODE (last_real_insn) == INSN
4288 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4289 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4291 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4292 fputs("\tnop\n", file);
4294 sparc_output_deferred_case_vectors ();
4297 /* Output a 'restore' instruction. */
4300 output_restore (rtx pat)
4306 fputs ("\t restore\n", asm_out_file);
4310 gcc_assert (GET_CODE (pat) == SET);
4312 operands[0] = SET_DEST (pat);
4313 pat = SET_SRC (pat);
4315 switch (GET_CODE (pat))
4318 operands[1] = XEXP (pat, 0);
4319 operands[2] = XEXP (pat, 1);
4320 output_asm_insn (" restore %r1, %2, %Y0", operands);
4323 operands[1] = XEXP (pat, 0);
4324 operands[2] = XEXP (pat, 1);
4325 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4328 operands[1] = XEXP (pat, 0);
4329 gcc_assert (XEXP (pat, 1) == const1_rtx);
4330 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4334 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4339 /* Output a return. */
4342 output_return (rtx insn)
4344 if (sparc_leaf_function_p)
4346 /* This is a leaf function so we don't have to bother restoring the
4347 register window, which frees us from dealing with the convoluted
4348 semantics of restore/return. We simply output the jump to the
4349 return address and the insn in the delay slot (if any). */
4351 gcc_assert (! crtl->calls_eh_return);
4353 return "jmp\t%%o7+%)%#";
4357 /* This is a regular function so we have to restore the register window.
4358 We may have a pending insn for the delay slot, which will be either
4359 combined with the 'restore' instruction or put in the delay slot of
4360 the 'return' instruction. */
4362 if (crtl->calls_eh_return)
4364 /* If the function uses __builtin_eh_return, the eh_return
4365 machinery occupies the delay slot. */
4366 gcc_assert (! final_sequence);
4368 if (! flag_delayed_branch)
4369 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4372 fputs ("\treturn\t%i7+8\n", asm_out_file);
4374 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4376 if (flag_delayed_branch)
4377 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4379 fputs ("\t nop\n", asm_out_file);
4381 else if (final_sequence)
4385 delay = NEXT_INSN (insn);
4388 pat = PATTERN (delay);
4390 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4392 epilogue_renumber (&pat, 0);
4393 return "return\t%%i7+%)%#";
4397 output_asm_insn ("jmp\t%%i7+%)", NULL);
4398 output_restore (pat);
4399 PATTERN (delay) = gen_blockage ();
4400 INSN_CODE (delay) = -1;
4405 /* The delay slot is empty. */
4407 return "return\t%%i7+%)\n\t nop";
4408 else if (flag_delayed_branch)
4409 return "jmp\t%%i7+%)\n\t restore";
4411 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4418 /* Output a sibling call. */
4421 output_sibcall (rtx insn, rtx call_operand)
4425 gcc_assert (flag_delayed_branch);
4427 operands[0] = call_operand;
4429 if (sparc_leaf_function_p)
4431 /* This is a leaf function so we don't have to bother restoring the
4432 register window. We simply output the jump to the function and
4433 the insn in the delay slot (if any). */
4435 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4438 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4441 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4442 it into branch if possible. */
4443 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4448 /* This is a regular function so we have to restore the register window.
4449 We may have a pending insn for the delay slot, which will be combined
4450 with the 'restore' instruction. */
4452 output_asm_insn ("call\t%a0, 0", operands);
4456 rtx delay = NEXT_INSN (insn);
4459 output_restore (PATTERN (delay));
4461 PATTERN (delay) = gen_blockage ();
4462 INSN_CODE (delay) = -1;
4465 output_restore (NULL_RTX);
4471 /* Functions for handling argument passing.
4473 For 32-bit, the first 6 args are normally in registers and the rest are
4474 pushed. Any arg that starts within the first 6 words is at least
4475 partially passed in a register unless its data type forbids.
4477 For 64-bit, the argument registers are laid out as an array of 16 elements
4478 and arguments are added sequentially. The first 6 int args and up to the
4479 first 16 fp args (depending on size) are passed in regs.
4481 Slot Stack Integral Float Float in structure Double Long Double
4482 ---- ----- -------- ----- ------------------ ------ -----------
4483 15 [SP+248] %f31 %f30,%f31 %d30
4484 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4485 13 [SP+232] %f27 %f26,%f27 %d26
4486 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4487 11 [SP+216] %f23 %f22,%f23 %d22
4488 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4489 9 [SP+200] %f19 %f18,%f19 %d18
4490 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4491 7 [SP+184] %f15 %f14,%f15 %d14
4492 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4493 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4494 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4495 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4496 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4497 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4498 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4500 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4502 Integral arguments are always passed as 64-bit quantities appropriately
4505 Passing of floating point values is handled as follows.
4506 If a prototype is in scope:
4507 If the value is in a named argument (i.e. not a stdarg function or a
4508 value not part of the `...') then the value is passed in the appropriate
4510 If the value is part of the `...' and is passed in one of the first 6
4511 slots then the value is passed in the appropriate int reg.
4512 If the value is part of the `...' and is not passed in one of the first 6
4513 slots then the value is passed in memory.
4514 If a prototype is not in scope:
4515 If the value is one of the first 6 arguments the value is passed in the
4516 appropriate integer reg and the appropriate fp reg.
4517 If the value is not one of the first 6 arguments the value is passed in
4518 the appropriate fp reg and in memory.
4521 Summary of the calling conventions implemented by GCC on the SPARC:
4524 size argument return value
4526 small integer <4 int. reg. int. reg.
4527 word 4 int. reg. int. reg.
4528 double word 8 int. reg. int. reg.
4530 _Complex small integer <8 int. reg. int. reg.
4531 _Complex word 8 int. reg. int. reg.
4532 _Complex double word 16 memory int. reg.
4534 vector integer <=8 int. reg. FP reg.
4535 vector integer >8 memory memory
4537 float 4 int. reg. FP reg.
4538 double 8 int. reg. FP reg.
4539 long double 16 memory memory
4541 _Complex float 8 memory FP reg.
4542 _Complex double 16 memory FP reg.
4543 _Complex long double 32 memory FP reg.
4545 vector float any memory memory
4547 aggregate any memory memory
4552 size argument return value
4554 small integer <8 int. reg. int. reg.
4555 word 8 int. reg. int. reg.
4556 double word 16 int. reg. int. reg.
4558 _Complex small integer <16 int. reg. int. reg.
4559 _Complex word 16 int. reg. int. reg.
4560 _Complex double word 32 memory int. reg.
4562 vector integer <=16 FP reg. FP reg.
4563 vector integer 16<s<=32 memory FP reg.
4564 vector integer >32 memory memory
4566 float 4 FP reg. FP reg.
4567 double 8 FP reg. FP reg.
4568 long double 16 FP reg. FP reg.
4570 _Complex float 8 FP reg. FP reg.
4571 _Complex double 16 FP reg. FP reg.
4572 _Complex long double 32 memory FP reg.
4574 vector float <=16 FP reg. FP reg.
4575 vector float 16<s<=32 memory FP reg.
4576 vector float >32 memory memory
4578 aggregate <=16 reg. reg.
4579 aggregate 16<s<=32 memory reg.
4580 aggregate >32 memory memory
4584 Note #1: complex floating-point types follow the extended SPARC ABIs as
4585 implemented by the Sun compiler.
4587 Note #2: integral vector types follow the scalar floating-point types
4588 conventions to match what is implemented by the Sun VIS SDK.
4590 Note #3: floating-point vector types follow the aggregate types
4594 /* Maximum number of int regs for args. */
4595 #define SPARC_INT_ARG_MAX 6
4596 /* Maximum number of fp regs for args. */
4597 #define SPARC_FP_ARG_MAX 16
4599 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4601 /* Handle the INIT_CUMULATIVE_ARGS macro.
4602 Initialize a variable CUM of type CUMULATIVE_ARGS
4603 for a call to a function whose data type is FNTYPE.
4604 For a library call, FNTYPE is 0. */
4607 init_cumulative_args (struct sparc_args *cum, tree fntype,
4608 rtx libname ATTRIBUTE_UNUSED,
4609 tree fndecl ATTRIBUTE_UNUSED)
4612 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4613 cum->libcall_p = fntype == 0;
4616 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4617 When a prototype says `char' or `short', really pass an `int'. */
4620 sparc_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED)
4622 return TARGET_ARCH32 ? true : false;
4625 /* Handle promotion of pointer and integer arguments. */
4627 static enum machine_mode
4628 sparc_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
4629 enum machine_mode mode,
4630 int *punsignedp ATTRIBUTE_UNUSED,
4631 const_tree fntype ATTRIBUTE_UNUSED,
4632 int for_return ATTRIBUTE_UNUSED)
4634 if (POINTER_TYPE_P (type))
4636 *punsignedp = POINTERS_EXTEND_UNSIGNED;
4640 /* For TARGET_ARCH64 we need this, as we don't have instructions
4641 for arithmetic operations which do zero/sign extension at the same time,
4642 so without this we end up with a srl/sra after every assignment to an
4643 user variable, which means very very bad code. */
4645 && GET_MODE_CLASS (mode) == MODE_INT
4646 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4652 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4655 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4657 return TARGET_ARCH64 ? true : false;
4660 /* Scan the record type TYPE and return the following predicates:
4661 - INTREGS_P: the record contains at least one field or sub-field
4662 that is eligible for promotion in integer registers.
4663 - FP_REGS_P: the record contains at least one field or sub-field
4664 that is eligible for promotion in floating-point registers.
4665 - PACKED_P: the record contains at least one field that is packed.
4667 Sub-fields are not taken into account for the PACKED_P predicate. */
4670 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4674 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4676 if (TREE_CODE (field) == FIELD_DECL)
4678 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4679 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4680 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4681 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4687 if (packed_p && DECL_PACKED (field))
4693 /* Compute the slot number to pass an argument in.
4694 Return the slot number or -1 if passing on the stack.
4696 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4697 the preceding args and about the function being called.
4698 MODE is the argument's machine mode.
4699 TYPE is the data type of the argument (as a tree).
4700 This is null for libcalls where that information may
4702 NAMED is nonzero if this argument is a named parameter
4703 (otherwise it is an extra parameter matching an ellipsis).
4704 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4705 *PREGNO records the register number to use if scalar type.
4706 *PPADDING records the amount of padding needed in words. */
4709 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4710 tree type, int named, int incoming_p,
4711 int *pregno, int *ppadding)
4713 int regbase = (incoming_p
4714 ? SPARC_INCOMING_INT_ARG_FIRST
4715 : SPARC_OUTGOING_INT_ARG_FIRST);
4716 int slotno = cum->words;
4717 enum mode_class mclass;
4722 if (type && TREE_ADDRESSABLE (type))
4728 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4731 /* For SPARC64, objects requiring 16-byte alignment get it. */
4733 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4734 && (slotno & 1) != 0)
4735 slotno++, *ppadding = 1;
4737 mclass = GET_MODE_CLASS (mode);
4738 if (type && TREE_CODE (type) == VECTOR_TYPE)
4740 /* Vector types deserve special treatment because they are
4741 polymorphic wrt their mode, depending upon whether VIS
4742 instructions are enabled. */
4743 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4745 /* The SPARC port defines no floating-point vector modes. */
4746 gcc_assert (mode == BLKmode);
4750 /* Integral vector types should either have a vector
4751 mode or an integral mode, because we are guaranteed
4752 by pass_by_reference that their size is not greater
4753 than 16 bytes and TImode is 16-byte wide. */
4754 gcc_assert (mode != BLKmode);
4756 /* Vector integers are handled like floats according to
4758 mclass = MODE_FLOAT;
4765 case MODE_COMPLEX_FLOAT:
4766 case MODE_VECTOR_INT:
4767 if (TARGET_ARCH64 && TARGET_FPU && named)
4769 if (slotno >= SPARC_FP_ARG_MAX)
4771 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4772 /* Arguments filling only one single FP register are
4773 right-justified in the outer double FP register. */
4774 if (GET_MODE_SIZE (mode) <= 4)
4781 case MODE_COMPLEX_INT:
4782 if (slotno >= SPARC_INT_ARG_MAX)
4784 regno = regbase + slotno;
4788 if (mode == VOIDmode)
4789 /* MODE is VOIDmode when generating the actual call. */
4792 gcc_assert (mode == BLKmode);
4796 || (TREE_CODE (type) != VECTOR_TYPE
4797 && TREE_CODE (type) != RECORD_TYPE))
4799 if (slotno >= SPARC_INT_ARG_MAX)
4801 regno = regbase + slotno;
4803 else /* TARGET_ARCH64 && type */
4805 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4807 /* First see what kinds of registers we would need. */
4808 if (TREE_CODE (type) == VECTOR_TYPE)
4811 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4813 /* The ABI obviously doesn't specify how packed structures
4814 are passed. These are defined to be passed in int regs
4815 if possible, otherwise memory. */
4816 if (packed_p || !named)
4817 fpregs_p = 0, intregs_p = 1;
4819 /* If all arg slots are filled, then must pass on stack. */
4820 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4823 /* If there are only int args and all int arg slots are filled,
4824 then must pass on stack. */
4825 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4828 /* Note that even if all int arg slots are filled, fp members may
4829 still be passed in regs if such regs are available.
4830 *PREGNO isn't set because there may be more than one, it's up
4831 to the caller to compute them. */
4844 /* Handle recursive register counting for structure field layout. */
4846 struct function_arg_record_value_parms
4848 rtx ret; /* return expression being built. */
4849 int slotno; /* slot number of the argument. */
4850 int named; /* whether the argument is named. */
4851 int regbase; /* regno of the base register. */
4852 int stack; /* 1 if part of the argument is on the stack. */
4853 int intoffset; /* offset of the first pending integer field. */
4854 unsigned int nregs; /* number of words passed in registers. */
4857 static void function_arg_record_value_3
4858 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4859 static void function_arg_record_value_2
4860 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4861 static void function_arg_record_value_1
4862 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4863 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
4864 static rtx function_arg_union_value (int, enum machine_mode, int, int);
4866 /* A subroutine of function_arg_record_value. Traverse the structure
4867 recursively and determine how many registers will be required. */
4870 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
4871 struct function_arg_record_value_parms *parms,
4876 /* We need to compute how many registers are needed so we can
4877 allocate the PARALLEL but before we can do that we need to know
4878 whether there are any packed fields. The ABI obviously doesn't
4879 specify how structures are passed in this case, so they are
4880 defined to be passed in int regs if possible, otherwise memory,
4881 regardless of whether there are fp values present. */
4884 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4886 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4893 /* Compute how many registers we need. */
4894 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4896 if (TREE_CODE (field) == FIELD_DECL)
4898 HOST_WIDE_INT bitpos = startbitpos;
4900 if (DECL_SIZE (field) != 0)
4902 if (integer_zerop (DECL_SIZE (field)))
4905 if (host_integerp (bit_position (field), 1))
4906 bitpos += int_bit_position (field);
4909 /* ??? FIXME: else assume zero offset. */
4911 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4912 function_arg_record_value_1 (TREE_TYPE (field),
4916 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4917 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4922 if (parms->intoffset != -1)
4924 unsigned int startbit, endbit;
4925 int intslots, this_slotno;
4927 startbit = parms->intoffset & -BITS_PER_WORD;
4928 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4930 intslots = (endbit - startbit) / BITS_PER_WORD;
4931 this_slotno = parms->slotno + parms->intoffset
4934 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4936 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4937 /* We need to pass this field on the stack. */
4941 parms->nregs += intslots;
4942 parms->intoffset = -1;
4945 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4946 If it wasn't true we wouldn't be here. */
4947 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4948 && DECL_MODE (field) == BLKmode)
4949 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4950 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4957 if (parms->intoffset == -1)
4958 parms->intoffset = bitpos;
4964 /* A subroutine of function_arg_record_value. Assign the bits of the
4965 structure between parms->intoffset and bitpos to integer registers. */
4968 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4969 struct function_arg_record_value_parms *parms)
4971 enum machine_mode mode;
4973 unsigned int startbit, endbit;
4974 int this_slotno, intslots, intoffset;
4977 if (parms->intoffset == -1)
4980 intoffset = parms->intoffset;
4981 parms->intoffset = -1;
4983 startbit = intoffset & -BITS_PER_WORD;
4984 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4985 intslots = (endbit - startbit) / BITS_PER_WORD;
4986 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
4988 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4992 /* If this is the trailing part of a word, only load that much into
4993 the register. Otherwise load the whole register. Note that in
4994 the latter case we may pick up unwanted bits. It's not a problem
4995 at the moment but may wish to revisit. */
4997 if (intoffset % BITS_PER_WORD != 0)
4998 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5003 intoffset /= BITS_PER_UNIT;
5006 regno = parms->regbase + this_slotno;
5007 reg = gen_rtx_REG (mode, regno);
5008 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5009 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5012 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5017 while (intslots > 0);
5020 /* A subroutine of function_arg_record_value. Traverse the structure
5021 recursively and assign bits to floating point registers. Track which
5022 bits in between need integer registers; invoke function_arg_record_value_3
5023 to make that happen. */
5026 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
5027 struct function_arg_record_value_parms *parms,
5033 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5035 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5042 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5044 if (TREE_CODE (field) == FIELD_DECL)
5046 HOST_WIDE_INT bitpos = startbitpos;
5048 if (DECL_SIZE (field) != 0)
5050 if (integer_zerop (DECL_SIZE (field)))
5053 if (host_integerp (bit_position (field), 1))
5054 bitpos += int_bit_position (field);
5057 /* ??? FIXME: else assume zero offset. */
5059 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5060 function_arg_record_value_2 (TREE_TYPE (field),
5064 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5065 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5070 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5071 int regno, nregs, pos;
5072 enum machine_mode mode = DECL_MODE (field);
5075 function_arg_record_value_3 (bitpos, parms);
5077 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5080 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5081 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5083 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5085 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5091 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5092 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5094 reg = gen_rtx_REG (mode, regno);
5095 pos = bitpos / BITS_PER_UNIT;
5096 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5097 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5101 regno += GET_MODE_SIZE (mode) / 4;
5102 reg = gen_rtx_REG (mode, regno);
5103 pos += GET_MODE_SIZE (mode);
5104 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5105 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5111 if (parms->intoffset == -1)
5112 parms->intoffset = bitpos;
5118 /* Used by function_arg and function_value to implement the complex
5119 conventions of the 64-bit ABI for passing and returning structures.
5120 Return an expression valid as a return value for the two macros
5121 FUNCTION_ARG and FUNCTION_VALUE.
5123 TYPE is the data type of the argument (as a tree).
5124 This is null for libcalls where that information may
5126 MODE is the argument's machine mode.
5127 SLOTNO is the index number of the argument's slot in the parameter array.
5128 NAMED is nonzero if this argument is a named parameter
5129 (otherwise it is an extra parameter matching an ellipsis).
5130 REGBASE is the regno of the base register for the parameter array. */
5133 function_arg_record_value (const_tree type, enum machine_mode mode,
5134 int slotno, int named, int regbase)
5136 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5137 struct function_arg_record_value_parms parms;
5140 parms.ret = NULL_RTX;
5141 parms.slotno = slotno;
5142 parms.named = named;
5143 parms.regbase = regbase;
5146 /* Compute how many registers we need. */
5148 parms.intoffset = 0;
5149 function_arg_record_value_1 (type, 0, &parms, false);
5151 /* Take into account pending integer fields. */
5152 if (parms.intoffset != -1)
5154 unsigned int startbit, endbit;
5155 int intslots, this_slotno;
5157 startbit = parms.intoffset & -BITS_PER_WORD;
5158 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5159 intslots = (endbit - startbit) / BITS_PER_WORD;
5160 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5162 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5164 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5165 /* We need to pass this field on the stack. */
5169 parms.nregs += intslots;
5171 nregs = parms.nregs;
5173 /* Allocate the vector and handle some annoying special cases. */
5176 /* ??? Empty structure has no value? Duh? */
5179 /* Though there's nothing really to store, return a word register
5180 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5181 leads to breakage due to the fact that there are zero bytes to
5183 return gen_rtx_REG (mode, regbase);
5187 /* ??? C++ has structures with no fields, and yet a size. Give up
5188 for now and pass everything back in integer registers. */
5189 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5191 if (nregs + slotno > SPARC_INT_ARG_MAX)
5192 nregs = SPARC_INT_ARG_MAX - slotno;
5194 gcc_assert (nregs != 0);
5196 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5198 /* If at least one field must be passed on the stack, generate
5199 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5200 also be passed on the stack. We can't do much better because the
5201 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5202 of structures for which the fields passed exclusively in registers
5203 are not at the beginning of the structure. */
5205 XVECEXP (parms.ret, 0, 0)
5206 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5208 /* Fill in the entries. */
5210 parms.intoffset = 0;
5211 function_arg_record_value_2 (type, 0, &parms, false);
5212 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5214 gcc_assert (parms.nregs == nregs);
5219 /* Used by function_arg and function_value to implement the conventions
5220 of the 64-bit ABI for passing and returning unions.
5221 Return an expression valid as a return value for the two macros
5222 FUNCTION_ARG and FUNCTION_VALUE.
5224 SIZE is the size in bytes of the union.
5225 MODE is the argument's machine mode.
5226 REGNO is the hard register the union will be passed in. */
5229 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5232 int nwords = ROUND_ADVANCE (size), i;
5235 /* See comment in previous function for empty structures. */
5237 return gen_rtx_REG (mode, regno);
5239 if (slotno == SPARC_INT_ARG_MAX - 1)
5242 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5244 for (i = 0; i < nwords; i++)
5246 /* Unions are passed left-justified. */
5247 XVECEXP (regs, 0, i)
5248 = gen_rtx_EXPR_LIST (VOIDmode,
5249 gen_rtx_REG (word_mode, regno),
5250 GEN_INT (UNITS_PER_WORD * i));
5257 /* Used by function_arg and function_value to implement the conventions
5258 for passing and returning large (BLKmode) vectors.
5259 Return an expression valid as a return value for the two macros
5260 FUNCTION_ARG and FUNCTION_VALUE.
5262 SIZE is the size in bytes of the vector (at least 8 bytes).
5263 REGNO is the FP hard register the vector will be passed in. */
5266 function_arg_vector_value (int size, int regno)
5268 int i, nregs = size / 8;
5271 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5273 for (i = 0; i < nregs; i++)
5275 XVECEXP (regs, 0, i)
5276 = gen_rtx_EXPR_LIST (VOIDmode,
5277 gen_rtx_REG (DImode, regno + 2*i),
5284 /* Handle the FUNCTION_ARG macro.
5285 Determine where to put an argument to a function.
5286 Value is zero to push the argument on the stack,
5287 or a hard register in which to store the argument.
5289 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5290 the preceding args and about the function being called.
5291 MODE is the argument's machine mode.
5292 TYPE is the data type of the argument (as a tree).
5293 This is null for libcalls where that information may
5295 NAMED is nonzero if this argument is a named parameter
5296 (otherwise it is an extra parameter matching an ellipsis).
5297 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5300 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5301 tree type, int named, int incoming_p)
5303 int regbase = (incoming_p
5304 ? SPARC_INCOMING_INT_ARG_FIRST
5305 : SPARC_OUTGOING_INT_ARG_FIRST);
5306 int slotno, regno, padding;
5307 enum mode_class mclass = GET_MODE_CLASS (mode);
5309 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5314 /* Vector types deserve special treatment because they are polymorphic wrt
5315 their mode, depending upon whether VIS instructions are enabled. */
5316 if (type && TREE_CODE (type) == VECTOR_TYPE)
5318 HOST_WIDE_INT size = int_size_in_bytes (type);
5319 gcc_assert ((TARGET_ARCH32 && size <= 8)
5320 || (TARGET_ARCH64 && size <= 16));
5322 if (mode == BLKmode)
5323 return function_arg_vector_value (size,
5324 SPARC_FP_ARG_FIRST + 2*slotno);
5326 mclass = MODE_FLOAT;
5330 return gen_rtx_REG (mode, regno);
5332 /* Structures up to 16 bytes in size are passed in arg slots on the stack
5333 and are promoted to registers if possible. */
5334 if (type && TREE_CODE (type) == RECORD_TYPE)
5336 HOST_WIDE_INT size = int_size_in_bytes (type);
5337 gcc_assert (size <= 16);
5339 return function_arg_record_value (type, mode, slotno, named, regbase);
5342 /* Unions up to 16 bytes in size are passed in integer registers. */
5343 else if (type && TREE_CODE (type) == UNION_TYPE)
5345 HOST_WIDE_INT size = int_size_in_bytes (type);
5346 gcc_assert (size <= 16);
5348 return function_arg_union_value (size, mode, slotno, regno);
5351 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5352 but also have the slot allocated for them.
5353 If no prototype is in scope fp values in register slots get passed
5354 in two places, either fp regs and int regs or fp regs and memory. */
5355 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5356 && SPARC_FP_REG_P (regno))
5358 rtx reg = gen_rtx_REG (mode, regno);
5359 if (cum->prototype_p || cum->libcall_p)
5361 /* "* 2" because fp reg numbers are recorded in 4 byte
5364 /* ??? This will cause the value to be passed in the fp reg and
5365 in the stack. When a prototype exists we want to pass the
5366 value in the reg but reserve space on the stack. That's an
5367 optimization, and is deferred [for a bit]. */
5368 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5369 return gen_rtx_PARALLEL (mode,
5371 gen_rtx_EXPR_LIST (VOIDmode,
5372 NULL_RTX, const0_rtx),
5373 gen_rtx_EXPR_LIST (VOIDmode,
5377 /* ??? It seems that passing back a register even when past
5378 the area declared by REG_PARM_STACK_SPACE will allocate
5379 space appropriately, and will not copy the data onto the
5380 stack, exactly as we desire.
5382 This is due to locate_and_pad_parm being called in
5383 expand_call whenever reg_parm_stack_space > 0, which
5384 while beneficial to our example here, would seem to be
5385 in error from what had been intended. Ho hum... -- r~ */
5393 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5397 /* On incoming, we don't need to know that the value
5398 is passed in %f0 and %i0, and it confuses other parts
5399 causing needless spillage even on the simplest cases. */
5403 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5404 + (regno - SPARC_FP_ARG_FIRST) / 2);
5406 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5407 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5409 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5413 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5414 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5415 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5420 /* All other aggregate types are passed in an integer register in a mode
5421 corresponding to the size of the type. */
5422 else if (type && AGGREGATE_TYPE_P (type))
5424 HOST_WIDE_INT size = int_size_in_bytes (type);
5425 gcc_assert (size <= 16);
5427 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5430 return gen_rtx_REG (mode, regno);
5433 /* For an arg passed partly in registers and partly in memory,
5434 this is the number of bytes of registers used.
5435 For args passed entirely in registers or entirely in memory, zero.
5437 Any arg that starts in the first 6 regs but won't entirely fit in them
5438 needs partial registers on v8. On v9, structures with integer
5439 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5440 values that begin in the last fp reg [where "last fp reg" varies with the
5441 mode] will be split between that reg and memory. */
5444 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5445 tree type, bool named)
5447 int slotno, regno, padding;
5449 /* We pass 0 for incoming_p here, it doesn't matter. */
5450 slotno = function_arg_slotno (cum, mode, type, named, 0, ®no, &padding);
5457 if ((slotno + (mode == BLKmode
5458 ? ROUND_ADVANCE (int_size_in_bytes (type))
5459 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5460 > SPARC_INT_ARG_MAX)
5461 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5465 /* We are guaranteed by pass_by_reference that the size of the
5466 argument is not greater than 16 bytes, so we only need to return
5467 one word if the argument is partially passed in registers. */
5469 if (type && AGGREGATE_TYPE_P (type))
5471 int size = int_size_in_bytes (type);
5473 if (size > UNITS_PER_WORD
5474 && slotno == SPARC_INT_ARG_MAX - 1)
5475 return UNITS_PER_WORD;
5477 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5478 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5479 && ! (TARGET_FPU && named)))
5481 /* The complex types are passed as packed types. */
5482 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5483 && slotno == SPARC_INT_ARG_MAX - 1)
5484 return UNITS_PER_WORD;
5486 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5488 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5490 return UNITS_PER_WORD;
5497 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5498 Specify whether to pass the argument by reference. */
5501 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5502 enum machine_mode mode, const_tree type,
5503 bool named ATTRIBUTE_UNUSED)
5506 /* Original SPARC 32-bit ABI says that structures and unions,
5507 and quad-precision floats are passed by reference. For Pascal,
5508 also pass arrays by reference. All other base types are passed
5511 Extended ABI (as implemented by the Sun compiler) says that all
5512 complex floats are passed by reference. Pass complex integers
5513 in registers up to 8 bytes. More generally, enforce the 2-word
5514 cap for passing arguments in registers.
5516 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5517 integers are passed like floats of the same size, that is in
5518 registers up to 8 bytes. Pass all vector floats by reference
5519 like structure and unions. */
5520 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5522 /* Catch CDImode, TFmode, DCmode and TCmode. */
5523 || GET_MODE_SIZE (mode) > 8
5525 && TREE_CODE (type) == VECTOR_TYPE
5526 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5528 /* Original SPARC 64-bit ABI says that structures and unions
5529 smaller than 16 bytes are passed in registers, as well as
5530 all other base types.
5532 Extended ABI (as implemented by the Sun compiler) says that
5533 complex floats are passed in registers up to 16 bytes. Pass
5534 all complex integers in registers up to 16 bytes. More generally,
5535 enforce the 2-word cap for passing arguments in registers.
5537 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5538 integers are passed like floats of the same size, that is in
5539 registers (up to 16 bytes). Pass all vector floats like structure
5542 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5543 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5544 /* Catch CTImode and TCmode. */
5545 || GET_MODE_SIZE (mode) > 16);
5548 /* Handle the FUNCTION_ARG_ADVANCE macro.
5549 Update the data in CUM to advance over an argument
5550 of mode MODE and data type TYPE.
5551 TYPE is null for libcalls where that information may not be available. */
5554 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5555 tree type, int named)
5557 int slotno, regno, padding;
5559 /* We pass 0 for incoming_p here, it doesn't matter. */
5560 slotno = function_arg_slotno (cum, mode, type, named, 0, ®no, &padding);
5562 /* If register required leading padding, add it. */
5564 cum->words += padding;
5568 cum->words += (mode != BLKmode
5569 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5570 : ROUND_ADVANCE (int_size_in_bytes (type)));
5574 if (type && AGGREGATE_TYPE_P (type))
5576 int size = int_size_in_bytes (type);
5580 else if (size <= 16)
5582 else /* passed by reference */
5587 cum->words += (mode != BLKmode
5588 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5589 : ROUND_ADVANCE (int_size_in_bytes (type)));
5594 /* Handle the FUNCTION_ARG_PADDING macro.
5595 For the 64 bit ABI structs are always stored left shifted in their
5599 function_arg_padding (enum machine_mode mode, const_tree type)
5601 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5604 /* Fall back to the default. */
5605 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5608 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5609 Specify whether to return the return value in memory. */
5612 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5615 /* Original SPARC 32-bit ABI says that structures and unions,
5616 and quad-precision floats are returned in memory. All other
5617 base types are returned in registers.
5619 Extended ABI (as implemented by the Sun compiler) says that
5620 all complex floats are returned in registers (8 FP registers
5621 at most for '_Complex long double'). Return all complex integers
5622 in registers (4 at most for '_Complex long long').
5624 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5625 integers are returned like floats of the same size, that is in
5626 registers up to 8 bytes and in memory otherwise. Return all
5627 vector floats in memory like structure and unions; note that
5628 they always have BLKmode like the latter. */
5629 return (TYPE_MODE (type) == BLKmode
5630 || TYPE_MODE (type) == TFmode
5631 || (TREE_CODE (type) == VECTOR_TYPE
5632 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5634 /* Original SPARC 64-bit ABI says that structures and unions
5635 smaller than 32 bytes are returned in registers, as well as
5636 all other base types.
5638 Extended ABI (as implemented by the Sun compiler) says that all
5639 complex floats are returned in registers (8 FP registers at most
5640 for '_Complex long double'). Return all complex integers in
5641 registers (4 at most for '_Complex TItype').
5643 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5644 integers are returned like floats of the same size, that is in
5645 registers. Return all vector floats like structure and unions;
5646 note that they always have BLKmode like the latter. */
5647 return ((TYPE_MODE (type) == BLKmode
5648 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5651 /* Handle the TARGET_STRUCT_VALUE target hook.
5652 Return where to find the structure return value address. */
5655 sparc_struct_value_rtx (tree fndecl, int incoming)
5664 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5665 STRUCT_VALUE_OFFSET));
5667 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5668 STRUCT_VALUE_OFFSET));
5670 /* Only follow the SPARC ABI for fixed-size structure returns.
5671 Variable size structure returns are handled per the normal
5672 procedures in GCC. This is enabled by -mstd-struct-return */
5674 && sparc_std_struct_return
5675 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
5676 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
5678 /* We must check and adjust the return address, as it is
5679 optional as to whether the return object is really
5681 rtx ret_rtx = gen_rtx_REG (Pmode, 31);
5682 rtx scratch = gen_reg_rtx (SImode);
5683 rtx endlab = gen_label_rtx ();
5685 /* Calculate the return object size */
5686 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
5687 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
5688 /* Construct a temporary return value */
5689 rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
5691 /* Implement SPARC 32-bit psABI callee returns struck checking
5694 Fetch the instruction where we will return to and see if
5695 it's an unimp instruction (the most significant 10 bits
5697 emit_move_insn (scratch, gen_rtx_MEM (SImode,
5698 plus_constant (ret_rtx, 8)));
5699 /* Assume the size is valid and pre-adjust */
5700 emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5701 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
5702 emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5703 /* Assign stack temp:
5704 Write the address of the memory pointed to by temp_val into
5705 the memory pointed to by mem */
5706 emit_move_insn (mem, XEXP (temp_val, 0));
5707 emit_label (endlab);
5710 set_mem_alias_set (mem, struct_value_alias_set);
5715 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5716 For v9, function return values are subject to the same rules as arguments,
5717 except that up to 32 bytes may be returned in registers. */
5720 function_value (const_tree type, enum machine_mode mode, int incoming_p)
5722 /* Beware that the two values are swapped here wrt function_arg. */
5723 int regbase = (incoming_p
5724 ? SPARC_OUTGOING_INT_ARG_FIRST
5725 : SPARC_INCOMING_INT_ARG_FIRST);
5726 enum mode_class mclass = GET_MODE_CLASS (mode);
5729 /* Vector types deserve special treatment because they are polymorphic wrt
5730 their mode, depending upon whether VIS instructions are enabled. */
5731 if (type && TREE_CODE (type) == VECTOR_TYPE)
5733 HOST_WIDE_INT size = int_size_in_bytes (type);
5734 gcc_assert ((TARGET_ARCH32 && size <= 8)
5735 || (TARGET_ARCH64 && size <= 32));
5737 if (mode == BLKmode)
5738 return function_arg_vector_value (size,
5739 SPARC_FP_ARG_FIRST);
5741 mclass = MODE_FLOAT;
5744 if (TARGET_ARCH64 && type)
5746 /* Structures up to 32 bytes in size are returned in registers. */
5747 if (TREE_CODE (type) == RECORD_TYPE)
5749 HOST_WIDE_INT size = int_size_in_bytes (type);
5750 gcc_assert (size <= 32);
5752 return function_arg_record_value (type, mode, 0, 1, regbase);
5755 /* Unions up to 32 bytes in size are returned in integer registers. */
5756 else if (TREE_CODE (type) == UNION_TYPE)
5758 HOST_WIDE_INT size = int_size_in_bytes (type);
5759 gcc_assert (size <= 32);
5761 return function_arg_union_value (size, mode, 0, regbase);
5764 /* Objects that require it are returned in FP registers. */
5765 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5768 /* All other aggregate types are returned in an integer register in a
5769 mode corresponding to the size of the type. */
5770 else if (AGGREGATE_TYPE_P (type))
5772 /* All other aggregate types are passed in an integer register
5773 in a mode corresponding to the size of the type. */
5774 HOST_WIDE_INT size = int_size_in_bytes (type);
5775 gcc_assert (size <= 32);
5777 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5779 /* ??? We probably should have made the same ABI change in
5780 3.4.0 as the one we made for unions. The latter was
5781 required by the SCD though, while the former is not
5782 specified, so we favored compatibility and efficiency.
5784 Now we're stuck for aggregates larger than 16 bytes,
5785 because OImode vanished in the meantime. Let's not
5786 try to be unduly clever, and simply follow the ABI
5787 for unions in that case. */
5788 if (mode == BLKmode)
5789 return function_arg_union_value (size, mode, 0, regbase);
5794 /* This must match sparc_promote_function_mode.
5795 ??? Maybe 32-bit pointers should actually remain in Pmode? */
5796 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5800 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
5801 regno = SPARC_FP_ARG_FIRST;
5805 return gen_rtx_REG (mode, regno);
5808 /* Do what is necessary for `va_start'. We look at the current function
5809 to determine if stdarg or varargs is used and return the address of
5810 the first unnamed parameter. */
5813 sparc_builtin_saveregs (void)
5815 int first_reg = crtl->args.info.words;
5819 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5820 emit_move_insn (gen_rtx_MEM (word_mode,
5821 gen_rtx_PLUS (Pmode,
5823 GEN_INT (FIRST_PARM_OFFSET (0)
5826 gen_rtx_REG (word_mode,
5827 SPARC_INCOMING_INT_ARG_FIRST + regno));
5829 address = gen_rtx_PLUS (Pmode,
5831 GEN_INT (FIRST_PARM_OFFSET (0)
5832 + UNITS_PER_WORD * first_reg));
5837 /* Implement `va_start' for stdarg. */
5840 sparc_va_start (tree valist, rtx nextarg)
5842 nextarg = expand_builtin_saveregs ();
5843 std_expand_builtin_va_start (valist, nextarg);
5846 /* Implement `va_arg' for stdarg. */
5849 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
5852 HOST_WIDE_INT size, rsize, align;
5855 tree ptrtype = build_pointer_type (type);
5857 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5860 size = rsize = UNITS_PER_WORD;
5866 size = int_size_in_bytes (type);
5867 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5872 /* For SPARC64, objects requiring 16-byte alignment get it. */
5873 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5874 align = 2 * UNITS_PER_WORD;
5876 /* SPARC-V9 ABI states that structures up to 16 bytes in size
5877 are left-justified in their slots. */
5878 if (AGGREGATE_TYPE_P (type))
5881 size = rsize = UNITS_PER_WORD;
5891 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5892 size_int (align - 1));
5893 incr = fold_convert (sizetype, incr);
5894 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
5896 incr = fold_convert (ptr_type_node, incr);
5899 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5902 if (BYTES_BIG_ENDIAN && size < rsize)
5903 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5904 size_int (rsize - size));
5908 addr = fold_convert (build_pointer_type (ptrtype), addr);
5909 addr = build_va_arg_indirect_ref (addr);
5912 /* If the address isn't aligned properly for the type, we need a temporary.
5913 FIXME: This is inefficient, usually we can do this in registers. */
5914 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
5916 tree tmp = create_tmp_var (type, "va_arg_tmp");
5917 tree dest_addr = build_fold_addr_expr (tmp);
5918 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
5919 3, dest_addr, addr, size_int (rsize));
5920 TREE_ADDRESSABLE (tmp) = 1;
5921 gimplify_and_add (copy, pre_p);
5926 addr = fold_convert (ptrtype, addr);
5929 = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
5930 gimplify_assign (valist, incr, post_p);
5932 return build_va_arg_indirect_ref (addr);
5935 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5936 Specify whether the vector mode is supported by the hardware. */
5939 sparc_vector_mode_supported_p (enum machine_mode mode)
5941 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5944 /* Return the string to output an unconditional branch to LABEL, which is
5945 the operand number of the label.
5947 DEST is the destination insn (i.e. the label), INSN is the source. */
5950 output_ubranch (rtx dest, int label, rtx insn)
5952 static char string[64];
5953 bool v9_form = false;
5956 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5958 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5959 - INSN_ADDRESSES (INSN_UID (insn)));
5960 /* Leave some instructions for "slop". */
5961 if (delta >= -260000 && delta < 260000)
5966 strcpy (string, "ba%*,pt\t%%xcc, ");
5968 strcpy (string, "b%*\t");
5970 p = strchr (string, '\0');
5981 /* Return the string to output a conditional branch to LABEL, which is
5982 the operand number of the label. OP is the conditional expression.
5983 XEXP (OP, 0) is assumed to be a condition code register (integer or
5984 floating point) and its mode specifies what kind of comparison we made.
5986 DEST is the destination insn (i.e. the label), INSN is the source.
5988 REVERSED is nonzero if we should reverse the sense of the comparison.
5990 ANNUL is nonzero if we should generate an annulling branch. */
5993 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
5996 static char string[64];
5997 enum rtx_code code = GET_CODE (op);
5998 rtx cc_reg = XEXP (op, 0);
5999 enum machine_mode mode = GET_MODE (cc_reg);
6000 const char *labelno, *branch;
6001 int spaces = 8, far;
6004 /* v9 branches are limited to +-1MB. If it is too far away,
6017 fbne,a,pn %fcc2, .LC29
6025 far = TARGET_V9 && (get_attr_length (insn) >= 3);
6028 /* Reversal of FP compares takes care -- an ordered compare
6029 becomes an unordered compare and vice versa. */
6030 if (mode == CCFPmode || mode == CCFPEmode)
6031 code = reverse_condition_maybe_unordered (code);
6033 code = reverse_condition (code);
6036 /* Start by writing the branch condition. */
6037 if (mode == CCFPmode || mode == CCFPEmode)
6088 /* ??? !v9: FP branches cannot be preceded by another floating point
6089 insn. Because there is currently no concept of pre-delay slots,
6090 we can fix this only by always emitting a nop before a floating
6095 strcpy (string, "nop\n\t");
6096 strcat (string, branch);
6109 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6121 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6142 strcpy (string, branch);
6144 spaces -= strlen (branch);
6145 p = strchr (string, '\0');
6147 /* Now add the annulling, the label, and a possible noop. */
6160 if (! far && insn && INSN_ADDRESSES_SET_P ())
6162 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6163 - INSN_ADDRESSES (INSN_UID (insn)));
6164 /* Leave some instructions for "slop". */
6165 if (delta < -260000 || delta >= 260000)
6169 if (mode == CCFPmode || mode == CCFPEmode)
6171 static char v9_fcc_labelno[] = "%%fccX, ";
6172 /* Set the char indicating the number of the fcc reg to use. */
6173 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6174 labelno = v9_fcc_labelno;
6177 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6181 else if (mode == CCXmode || mode == CCX_NOOVmode)
6183 labelno = "%%xcc, ";
6188 labelno = "%%icc, ";
6193 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6196 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6209 strcpy (p, labelno);
6210 p = strchr (p, '\0');
6213 strcpy (p, ".+12\n\t nop\n\tb\t");
6214 /* Skip the next insn if requested or
6215 if we know that it will be a nop. */
6216 if (annul || ! final_sequence)
6230 /* Emit a library call comparison between floating point X and Y.
6231 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
6232 Return the new operator to be used in the comparison sequence.
6234 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6235 values as arguments instead of the TFmode registers themselves,
6236 that's why we cannot call emit_float_lib_cmp. */
6239 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6242 rtx slot0, slot1, result, tem, tem2, libfunc;
6243 enum machine_mode mode;
6244 enum rtx_code new_comparison;
6249 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
6253 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
6257 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
6261 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
6265 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
6269 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
6280 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
6293 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6294 emit_move_insn (slot0, x);
6301 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6302 emit_move_insn (slot1, y);
6305 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6306 emit_library_call (libfunc, LCT_NORMAL,
6308 XEXP (slot0, 0), Pmode,
6309 XEXP (slot1, 0), Pmode);
6314 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6315 emit_library_call (libfunc, LCT_NORMAL,
6317 x, TFmode, y, TFmode);
6322 /* Immediately move the result of the libcall into a pseudo
6323 register so reload doesn't clobber the value if it needs
6324 the return register for a spill reg. */
6325 result = gen_reg_rtx (mode);
6326 emit_move_insn (result, hard_libcall_value (mode, libfunc));
6331 return gen_rtx_NE (VOIDmode, result, const0_rtx);
6334 new_comparison = (comparison == UNORDERED ? EQ : NE);
6335 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
6338 new_comparison = (comparison == UNGT ? GT : NE);
6339 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
6341 return gen_rtx_NE (VOIDmode, result, const2_rtx);
6343 tem = gen_reg_rtx (mode);
6345 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6347 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6348 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
6351 tem = gen_reg_rtx (mode);
6353 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6355 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6356 tem2 = gen_reg_rtx (mode);
6358 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6360 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6361 new_comparison = (comparison == UNEQ ? EQ : NE);
6362 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
6368 /* Generate an unsigned DImode to FP conversion. This is the same code
6369 optabs would emit if we didn't have TFmode patterns. */
6372 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6374 rtx neglab, donelab, i0, i1, f0, in, out;
6377 in = force_reg (DImode, operands[1]);
6378 neglab = gen_label_rtx ();
6379 donelab = gen_label_rtx ();
6380 i0 = gen_reg_rtx (DImode);
6381 i1 = gen_reg_rtx (DImode);
6382 f0 = gen_reg_rtx (mode);
6384 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6386 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6387 emit_jump_insn (gen_jump (donelab));
6390 emit_label (neglab);
6392 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6393 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6394 emit_insn (gen_iordi3 (i0, i0, i1));
6395 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6396 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6398 emit_label (donelab);
6401 /* Generate an FP to unsigned DImode conversion. This is the same code
6402 optabs would emit if we didn't have TFmode patterns. */
6405 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6407 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6410 in = force_reg (mode, operands[1]);
6411 neglab = gen_label_rtx ();
6412 donelab = gen_label_rtx ();
6413 i0 = gen_reg_rtx (DImode);
6414 i1 = gen_reg_rtx (DImode);
6415 limit = gen_reg_rtx (mode);
6416 f0 = gen_reg_rtx (mode);
6418 emit_move_insn (limit,
6419 CONST_DOUBLE_FROM_REAL_VALUE (
6420 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6421 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6423 emit_insn (gen_rtx_SET (VOIDmode,
6425 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6426 emit_jump_insn (gen_jump (donelab));
6429 emit_label (neglab);
6431 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6432 emit_insn (gen_rtx_SET (VOIDmode,
6434 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6435 emit_insn (gen_movdi (i1, const1_rtx));
6436 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6437 emit_insn (gen_xordi3 (out, i0, i1));
6439 emit_label (donelab);
6442 /* Return the string to output a conditional branch to LABEL, testing
6443 register REG. LABEL is the operand number of the label; REG is the
6444 operand number of the reg. OP is the conditional expression. The mode
6445 of REG says what kind of comparison we made.
6447 DEST is the destination insn (i.e. the label), INSN is the source.
6449 REVERSED is nonzero if we should reverse the sense of the comparison.
6451 ANNUL is nonzero if we should generate an annulling branch. */
6454 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6455 int annul, rtx insn)
6457 static char string[64];
6458 enum rtx_code code = GET_CODE (op);
6459 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6464 /* branch on register are limited to +-128KB. If it is too far away,
6477 brgez,a,pn %o1, .LC29
6483 ba,pt %xcc, .LC29 */
6485 far = get_attr_length (insn) >= 3;
6487 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6489 code = reverse_condition (code);
6491 /* Only 64 bit versions of these instructions exist. */
6492 gcc_assert (mode == DImode);
6494 /* Start by writing the branch condition. */
6499 strcpy (string, "brnz");
6503 strcpy (string, "brz");
6507 strcpy (string, "brgez");
6511 strcpy (string, "brlz");
6515 strcpy (string, "brlez");
6519 strcpy (string, "brgz");
6526 p = strchr (string, '\0');
6528 /* Now add the annulling, reg, label, and nop. */
6535 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6538 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6543 *p = p < string + 8 ? '\t' : ' ';
6551 int veryfar = 1, delta;
6553 if (INSN_ADDRESSES_SET_P ())
6555 delta = (INSN_ADDRESSES (INSN_UID (dest))
6556 - INSN_ADDRESSES (INSN_UID (insn)));
6557 /* Leave some instructions for "slop". */
6558 if (delta >= -260000 && delta < 260000)
6562 strcpy (p, ".+12\n\t nop\n\t");
6563 /* Skip the next insn if requested or
6564 if we know that it will be a nop. */
6565 if (annul || ! final_sequence)
6575 strcpy (p, "ba,pt\t%%xcc, ");
6589 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6590 Such instructions cannot be used in the delay slot of return insn on v9.
6591 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6595 epilogue_renumber (register rtx *where, int test)
6597 register const char *fmt;
6599 register enum rtx_code code;
6604 code = GET_CODE (*where);
6609 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6611 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6612 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6620 /* Do not replace the frame pointer with the stack pointer because
6621 it can cause the delayed instruction to load below the stack.
6622 This occurs when instructions like:
6624 (set (reg/i:SI 24 %i0)
6625 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6626 (const_int -20 [0xffffffec])) 0))
6628 are in the return delayed slot. */
6630 if (GET_CODE (XEXP (*where, 0)) == REG
6631 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6632 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6633 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6638 if (SPARC_STACK_BIAS
6639 && GET_CODE (XEXP (*where, 0)) == REG
6640 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6648 fmt = GET_RTX_FORMAT (code);
6650 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6655 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6656 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6659 else if (fmt[i] == 'e'
6660 && epilogue_renumber (&(XEXP (*where, i)), test))
6666 /* Leaf functions and non-leaf functions have different needs. */
6669 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6672 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6674 static const int *const reg_alloc_orders[] = {
6675 reg_leaf_alloc_order,
6676 reg_nonleaf_alloc_order};
6679 order_regs_for_local_alloc (void)
6681 static int last_order_nonleaf = 1;
6683 if (df_regs_ever_live_p (15) != last_order_nonleaf)
6685 last_order_nonleaf = !last_order_nonleaf;
6686 memcpy ((char *) reg_alloc_order,
6687 (const char *) reg_alloc_orders[last_order_nonleaf],
6688 FIRST_PSEUDO_REGISTER * sizeof (int));
6692 /* Return 1 if REG and MEM are legitimate enough to allow the various
6693 mem<-->reg splits to be run. */
6696 sparc_splitdi_legitimate (rtx reg, rtx mem)
6698 /* Punt if we are here by mistake. */
6699 gcc_assert (reload_completed);
6701 /* We must have an offsettable memory reference. */
6702 if (! offsettable_memref_p (mem))
6705 /* If we have legitimate args for ldd/std, we do not want
6706 the split to happen. */
6707 if ((REGNO (reg) % 2) == 0
6708 && mem_min_alignment (mem, 8))
6715 /* Return 1 if x and y are some kind of REG and they refer to
6716 different hard registers. This test is guaranteed to be
6717 run after reload. */
6720 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6722 if (GET_CODE (x) != REG)
6724 if (GET_CODE (y) != REG)
6726 if (REGNO (x) == REGNO (y))
6731 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6732 This makes them candidates for using ldd and std insns.
6734 Note reg1 and reg2 *must* be hard registers. */
6737 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6739 /* We might have been passed a SUBREG. */
6740 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6743 if (REGNO (reg1) % 2 != 0)
6746 /* Integer ldd is deprecated in SPARC V9 */
6747 if (TARGET_V9 && REGNO (reg1) < 32)
6750 return (REGNO (reg1) == REGNO (reg2) - 1);
6753 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6756 This can only happen when addr1 and addr2, the addresses in mem1
6757 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6758 addr1 must also be aligned on a 64-bit boundary.
6760 Also iff dependent_reg_rtx is not null it should not be used to
6761 compute the address for mem1, i.e. we cannot optimize a sequence
6773 But, note that the transformation from:
6778 is perfectly fine. Thus, the peephole2 patterns always pass us
6779 the destination register of the first load, never the second one.
6781 For stores we don't have a similar problem, so dependent_reg_rtx is
6785 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6789 HOST_WIDE_INT offset1;
6791 /* The mems cannot be volatile. */
6792 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6795 /* MEM1 should be aligned on a 64-bit boundary. */
6796 if (MEM_ALIGN (mem1) < 64)
6799 addr1 = XEXP (mem1, 0);
6800 addr2 = XEXP (mem2, 0);
6802 /* Extract a register number and offset (if used) from the first addr. */
6803 if (GET_CODE (addr1) == PLUS)
6805 /* If not a REG, return zero. */
6806 if (GET_CODE (XEXP (addr1, 0)) != REG)
6810 reg1 = REGNO (XEXP (addr1, 0));
6811 /* The offset must be constant! */
6812 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6814 offset1 = INTVAL (XEXP (addr1, 1));
6817 else if (GET_CODE (addr1) != REG)
6821 reg1 = REGNO (addr1);
6822 /* This was a simple (mem (reg)) expression. Offset is 0. */
6826 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6827 if (GET_CODE (addr2) != PLUS)
6830 if (GET_CODE (XEXP (addr2, 0)) != REG
6831 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6834 if (reg1 != REGNO (XEXP (addr2, 0)))
6837 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6840 /* The first offset must be evenly divisible by 8 to ensure the
6841 address is 64 bit aligned. */
6842 if (offset1 % 8 != 0)
6845 /* The offset for the second addr must be 4 more than the first addr. */
6846 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6849 /* All the tests passed. addr1 and addr2 are valid for ldd and std
6854 /* Return 1 if reg is a pseudo, or is the first register in
6855 a hard register pair. This makes it suitable for use in
6856 ldd and std insns. */
6859 register_ok_for_ldd (rtx reg)
6861 /* We might have been passed a SUBREG. */
6865 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6866 return (REGNO (reg) % 2 == 0);
6871 /* Return 1 if OP is a memory whose address is known to be
6872 aligned to 8-byte boundary, or a pseudo during reload.
6873 This makes it suitable for use in ldd and std insns. */
6876 memory_ok_for_ldd (rtx op)
6880 /* In 64-bit mode, we assume that the address is word-aligned. */
6881 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
6884 if ((reload_in_progress || reload_completed)
6885 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
6888 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
6890 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
6899 /* Print operand X (an rtx) in assembler syntax to file FILE.
6900 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6901 For `%' followed by punctuation, CODE is the punctuation and X is null. */
6904 print_operand (FILE *file, rtx x, int code)
6909 /* Output an insn in a delay slot. */
6911 sparc_indent_opcode = 1;
6913 fputs ("\n\t nop", file);
6916 /* Output an annul flag if there's nothing for the delay slot and we
6917 are optimizing. This is always used with '(' below.
6918 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6919 this is a dbx bug. So, we only do this when optimizing.
6920 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6921 Always emit a nop in case the next instruction is a branch. */
6922 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6926 /* Output a 'nop' if there's nothing for the delay slot and we are
6927 not optimizing. This is always used with '*' above. */
6928 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6929 fputs ("\n\t nop", file);
6930 else if (final_sequence)
6931 sparc_indent_opcode = 1;
6934 /* Output the right displacement from the saved PC on function return.
6935 The caller may have placed an "unimp" insn immediately after the call
6936 so we have to account for it. This insn is used in the 32-bit ABI
6937 when calling a function that returns a non zero-sized structure. The
6938 64-bit ABI doesn't have it. Be careful to have this test be the same
6939 as that for the call. The exception is when sparc_std_struct_return
6940 is enabled, the psABI is followed exactly and the adjustment is made
6941 by the code in sparc_struct_value_rtx. The call emitted is the same
6942 when sparc_std_struct_return is enabled. */
6944 && cfun->returns_struct
6945 && !sparc_std_struct_return
6946 && DECL_SIZE (DECL_RESULT (current_function_decl))
6947 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6949 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6955 /* Output the Embedded Medium/Anywhere code model base register. */
6956 fputs (EMBMEDANY_BASE_REG, file);
6959 /* Print some local dynamic TLS name. */
6960 assemble_name (file, get_some_local_dynamic_name ());
6964 /* Adjust the operand to take into account a RESTORE operation. */
6965 if (GET_CODE (x) == CONST_INT)
6967 else if (GET_CODE (x) != REG)
6968 output_operand_lossage ("invalid %%Y operand");
6969 else if (REGNO (x) < 8)
6970 fputs (reg_names[REGNO (x)], file);
6971 else if (REGNO (x) >= 24 && REGNO (x) < 32)
6972 fputs (reg_names[REGNO (x)-16], file);
6974 output_operand_lossage ("invalid %%Y operand");
6977 /* Print out the low order register name of a register pair. */
6978 if (WORDS_BIG_ENDIAN)
6979 fputs (reg_names[REGNO (x)+1], file);
6981 fputs (reg_names[REGNO (x)], file);
6984 /* Print out the high order register name of a register pair. */
6985 if (WORDS_BIG_ENDIAN)
6986 fputs (reg_names[REGNO (x)], file);
6988 fputs (reg_names[REGNO (x)+1], file);
6991 /* Print out the second register name of a register pair or quad.
6992 I.e., R (%o0) => %o1. */
6993 fputs (reg_names[REGNO (x)+1], file);
6996 /* Print out the third register name of a register quad.
6997 I.e., S (%o0) => %o2. */
6998 fputs (reg_names[REGNO (x)+2], file);
7001 /* Print out the fourth register name of a register quad.
7002 I.e., T (%o0) => %o3. */
7003 fputs (reg_names[REGNO (x)+3], file);
7006 /* Print a condition code register. */
7007 if (REGNO (x) == SPARC_ICC_REG)
7009 /* We don't handle CC[X]_NOOVmode because they're not supposed
7011 if (GET_MODE (x) == CCmode)
7012 fputs ("%icc", file);
7013 else if (GET_MODE (x) == CCXmode)
7014 fputs ("%xcc", file);
7019 /* %fccN register */
7020 fputs (reg_names[REGNO (x)], file);
7023 /* Print the operand's address only. */
7024 output_address (XEXP (x, 0));
7027 /* In this case we need a register. Use %g0 if the
7028 operand is const0_rtx. */
7030 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
7032 fputs ("%g0", file);
7039 switch (GET_CODE (x))
7041 case IOR: fputs ("or", file); break;
7042 case AND: fputs ("and", file); break;
7043 case XOR: fputs ("xor", file); break;
7044 default: output_operand_lossage ("invalid %%A operand");
7049 switch (GET_CODE (x))
7051 case IOR: fputs ("orn", file); break;
7052 case AND: fputs ("andn", file); break;
7053 case XOR: fputs ("xnor", file); break;
7054 default: output_operand_lossage ("invalid %%B operand");
7058 /* These are used by the conditional move instructions. */
7062 enum rtx_code rc = GET_CODE (x);
7066 enum machine_mode mode = GET_MODE (XEXP (x, 0));
7067 if (mode == CCFPmode || mode == CCFPEmode)
7068 rc = reverse_condition_maybe_unordered (GET_CODE (x));
7070 rc = reverse_condition (GET_CODE (x));
7074 case NE: fputs ("ne", file); break;
7075 case EQ: fputs ("e", file); break;
7076 case GE: fputs ("ge", file); break;
7077 case GT: fputs ("g", file); break;
7078 case LE: fputs ("le", file); break;
7079 case LT: fputs ("l", file); break;
7080 case GEU: fputs ("geu", file); break;
7081 case GTU: fputs ("gu", file); break;
7082 case LEU: fputs ("leu", file); break;
7083 case LTU: fputs ("lu", file); break;
7084 case LTGT: fputs ("lg", file); break;
7085 case UNORDERED: fputs ("u", file); break;
7086 case ORDERED: fputs ("o", file); break;
7087 case UNLT: fputs ("ul", file); break;
7088 case UNLE: fputs ("ule", file); break;
7089 case UNGT: fputs ("ug", file); break;
7090 case UNGE: fputs ("uge", file); break;
7091 case UNEQ: fputs ("ue", file); break;
7092 default: output_operand_lossage (code == 'c'
7093 ? "invalid %%c operand"
7094 : "invalid %%C operand");
7099 /* These are used by the movr instruction pattern. */
7103 enum rtx_code rc = (code == 'd'
7104 ? reverse_condition (GET_CODE (x))
7108 case NE: fputs ("ne", file); break;
7109 case EQ: fputs ("e", file); break;
7110 case GE: fputs ("gez", file); break;
7111 case LT: fputs ("lz", file); break;
7112 case LE: fputs ("lez", file); break;
7113 case GT: fputs ("gz", file); break;
7114 default: output_operand_lossage (code == 'd'
7115 ? "invalid %%d operand"
7116 : "invalid %%D operand");
7123 /* Print a sign-extended character. */
7124 int i = trunc_int_for_mode (INTVAL (x), QImode);
7125 fprintf (file, "%d", i);
7130 /* Operand must be a MEM; write its address. */
7131 if (GET_CODE (x) != MEM)
7132 output_operand_lossage ("invalid %%f operand");
7133 output_address (XEXP (x, 0));
7138 /* Print a sign-extended 32-bit value. */
7140 if (GET_CODE(x) == CONST_INT)
7142 else if (GET_CODE(x) == CONST_DOUBLE)
7143 i = CONST_DOUBLE_LOW (x);
7146 output_operand_lossage ("invalid %%s operand");
7149 i = trunc_int_for_mode (i, SImode);
7150 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7155 /* Do nothing special. */
7159 /* Undocumented flag. */
7160 output_operand_lossage ("invalid operand output code");
7163 if (GET_CODE (x) == REG)
7164 fputs (reg_names[REGNO (x)], file);
7165 else if (GET_CODE (x) == MEM)
7168 /* Poor Sun assembler doesn't understand absolute addressing. */
7169 if (CONSTANT_P (XEXP (x, 0)))
7170 fputs ("%g0+", file);
7171 output_address (XEXP (x, 0));
7174 else if (GET_CODE (x) == HIGH)
7176 fputs ("%hi(", file);
7177 output_addr_const (file, XEXP (x, 0));
7180 else if (GET_CODE (x) == LO_SUM)
7182 print_operand (file, XEXP (x, 0), 0);
7183 if (TARGET_CM_MEDMID)
7184 fputs ("+%l44(", file);
7186 fputs ("+%lo(", file);
7187 output_addr_const (file, XEXP (x, 1));
7190 else if (GET_CODE (x) == CONST_DOUBLE
7191 && (GET_MODE (x) == VOIDmode
7192 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7194 if (CONST_DOUBLE_HIGH (x) == 0)
7195 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7196 else if (CONST_DOUBLE_HIGH (x) == -1
7197 && CONST_DOUBLE_LOW (x) < 0)
7198 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7200 output_operand_lossage ("long long constant not a valid immediate operand");
7202 else if (GET_CODE (x) == CONST_DOUBLE)
7203 output_operand_lossage ("floating point constant not a valid immediate operand");
7204 else { output_addr_const (file, x); }
7207 /* Target hook for assembling integer objects. The sparc version has
7208 special handling for aligned DI-mode objects. */
7211 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7213 /* ??? We only output .xword's for symbols and only then in environments
7214 where the assembler can handle them. */
7215 if (aligned_p && size == 8
7216 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7220 assemble_integer_with_op ("\t.xword\t", x);
7225 assemble_aligned_integer (4, const0_rtx);
7226 assemble_aligned_integer (4, x);
7230 return default_assemble_integer (x, size, aligned_p);
7233 /* Return the value of a code used in the .proc pseudo-op that says
7234 what kind of result this function returns. For non-C types, we pick
7235 the closest C type. */
7237 #ifndef SHORT_TYPE_SIZE
7238 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7241 #ifndef INT_TYPE_SIZE
7242 #define INT_TYPE_SIZE BITS_PER_WORD
7245 #ifndef LONG_TYPE_SIZE
7246 #define LONG_TYPE_SIZE BITS_PER_WORD
7249 #ifndef LONG_LONG_TYPE_SIZE
7250 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7253 #ifndef FLOAT_TYPE_SIZE
7254 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7257 #ifndef DOUBLE_TYPE_SIZE
7258 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7261 #ifndef LONG_DOUBLE_TYPE_SIZE
7262 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7266 sparc_type_code (register tree type)
7268 register unsigned long qualifiers = 0;
7269 register unsigned shift;
7271 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7272 setting more, since some assemblers will give an error for this. Also,
7273 we must be careful to avoid shifts of 32 bits or more to avoid getting
7274 unpredictable results. */
7276 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7278 switch (TREE_CODE (type))
7284 qualifiers |= (3 << shift);
7289 qualifiers |= (2 << shift);
7293 case REFERENCE_TYPE:
7295 qualifiers |= (1 << shift);
7299 return (qualifiers | 8);
7302 case QUAL_UNION_TYPE:
7303 return (qualifiers | 9);
7306 return (qualifiers | 10);
7309 return (qualifiers | 16);
7312 /* If this is a range type, consider it to be the underlying
7314 if (TREE_TYPE (type) != 0)
7317 /* Carefully distinguish all the standard types of C,
7318 without messing up if the language is not C. We do this by
7319 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7320 look at both the names and the above fields, but that's redundant.
7321 Any type whose size is between two C types will be considered
7322 to be the wider of the two types. Also, we do not have a
7323 special code to use for "long long", so anything wider than
7324 long is treated the same. Note that we can't distinguish
7325 between "int" and "long" in this code if they are the same
7326 size, but that's fine, since neither can the assembler. */
7328 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7329 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7331 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7332 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7334 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7335 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7338 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7341 /* If this is a range type, consider it to be the underlying
7343 if (TREE_TYPE (type) != 0)
7346 /* Carefully distinguish all the standard types of C,
7347 without messing up if the language is not C. */
7349 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7350 return (qualifiers | 6);
7353 return (qualifiers | 7);
7355 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7356 /* ??? We need to distinguish between double and float complex types,
7357 but I don't know how yet because I can't reach this code from
7358 existing front-ends. */
7359 return (qualifiers | 7); /* Who knows? */
7362 case BOOLEAN_TYPE: /* Boolean truth value type. */
7363 case LANG_TYPE: /* ? */
7367 gcc_unreachable (); /* Not a type! */
7374 /* Nested function support. */
7376 /* Emit RTL insns to initialize the variable parts of a trampoline.
7377 FNADDR is an RTX for the address of the function's pure code.
7378 CXT is an RTX for the static chain value for the function.
7380 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7381 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7382 (to store insns). This is a bit excessive. Perhaps a different
7383 mechanism would be better here.
7385 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7388 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
7390 /* SPARC 32-bit trampoline:
7393 sethi %hi(static), %g2
7395 or %g2, %lo(static), %g2
7397 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7398 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7402 (adjust_address (m_tramp, SImode, 0),
7403 expand_binop (SImode, ior_optab,
7404 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7405 size_int (10), 0, 1),
7406 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7407 NULL_RTX, 1, OPTAB_DIRECT));
7410 (adjust_address (m_tramp, SImode, 4),
7411 expand_binop (SImode, ior_optab,
7412 expand_shift (RSHIFT_EXPR, SImode, cxt,
7413 size_int (10), 0, 1),
7414 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7415 NULL_RTX, 1, OPTAB_DIRECT));
7418 (adjust_address (m_tramp, SImode, 8),
7419 expand_binop (SImode, ior_optab,
7420 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7421 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7422 NULL_RTX, 1, OPTAB_DIRECT));
7425 (adjust_address (m_tramp, SImode, 12),
7426 expand_binop (SImode, ior_optab,
7427 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7428 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7429 NULL_RTX, 1, OPTAB_DIRECT));
7431 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7432 aligned on a 16 byte boundary so one flush clears it all. */
7433 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
7434 if (sparc_cpu != PROCESSOR_ULTRASPARC
7435 && sparc_cpu != PROCESSOR_ULTRASPARC3
7436 && sparc_cpu != PROCESSOR_NIAGARA
7437 && sparc_cpu != PROCESSOR_NIAGARA2)
7438 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
7440 /* Call __enable_execute_stack after writing onto the stack to make sure
7441 the stack address is accessible. */
7442 #ifdef ENABLE_EXECUTE_STACK
7443 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7444 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
7449 /* The 64-bit version is simpler because it makes more sense to load the
7450 values as "immediate" data out of the trampoline. It's also easier since
7451 we can read the PC without clobbering a register. */
7454 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
7456 /* SPARC 64-bit trampoline:
7465 emit_move_insn (adjust_address (m_tramp, SImode, 0),
7466 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7467 emit_move_insn (adjust_address (m_tramp, SImode, 4),
7468 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7469 emit_move_insn (adjust_address (m_tramp, SImode, 8),
7470 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7471 emit_move_insn (adjust_address (m_tramp, SImode, 12),
7472 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7473 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
7474 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
7475 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
7477 if (sparc_cpu != PROCESSOR_ULTRASPARC
7478 && sparc_cpu != PROCESSOR_ULTRASPARC3
7479 && sparc_cpu != PROCESSOR_NIAGARA
7480 && sparc_cpu != PROCESSOR_NIAGARA2)
7481 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
7483 /* Call __enable_execute_stack after writing onto the stack to make sure
7484 the stack address is accessible. */
7485 #ifdef ENABLE_EXECUTE_STACK
7486 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7487 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
7491 /* Worker for TARGET_TRAMPOLINE_INIT. */
7494 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
7496 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
7497 cxt = force_reg (Pmode, cxt);
7499 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
7501 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
7504 /* Adjust the cost of a scheduling dependency. Return the new cost of
7505 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7508 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7510 enum attr_type insn_type;
7512 if (! recog_memoized (insn))
7515 insn_type = get_attr_type (insn);
7517 if (REG_NOTE_KIND (link) == 0)
7519 /* Data dependency; DEP_INSN writes a register that INSN reads some
7522 /* if a load, then the dependence must be on the memory address;
7523 add an extra "cycle". Note that the cost could be two cycles
7524 if the reg was written late in an instruction group; we ca not tell
7526 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7529 /* Get the delay only if the address of the store is the dependence. */
7530 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7532 rtx pat = PATTERN(insn);
7533 rtx dep_pat = PATTERN (dep_insn);
7535 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7536 return cost; /* This should not happen! */
7538 /* The dependency between the two instructions was on the data that
7539 is being stored. Assume that this implies that the address of the
7540 store is not dependent. */
7541 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7544 return cost + 3; /* An approximation. */
7547 /* A shift instruction cannot receive its data from an instruction
7548 in the same cycle; add a one cycle penalty. */
7549 if (insn_type == TYPE_SHIFT)
7550 return cost + 3; /* Split before cascade into shift. */
7554 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7555 INSN writes some cycles later. */
7557 /* These are only significant for the fpu unit; writing a fp reg before
7558 the fpu has finished with it stalls the processor. */
7560 /* Reusing an integer register causes no problems. */
7561 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7569 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7571 enum attr_type insn_type, dep_type;
7572 rtx pat = PATTERN(insn);
7573 rtx dep_pat = PATTERN (dep_insn);
7575 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7578 insn_type = get_attr_type (insn);
7579 dep_type = get_attr_type (dep_insn);
7581 switch (REG_NOTE_KIND (link))
7584 /* Data dependency; DEP_INSN writes a register that INSN reads some
7591 /* Get the delay iff the address of the store is the dependence. */
7592 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7595 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7602 /* If a load, then the dependence must be on the memory address. If
7603 the addresses aren't equal, then it might be a false dependency */
7604 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7606 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7607 || GET_CODE (SET_DEST (dep_pat)) != MEM
7608 || GET_CODE (SET_SRC (pat)) != MEM
7609 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7610 XEXP (SET_SRC (pat), 0)))
7618 /* Compare to branch latency is 0. There is no benefit from
7619 separating compare and branch. */
7620 if (dep_type == TYPE_COMPARE)
7622 /* Floating point compare to branch latency is less than
7623 compare to conditional move. */
7624 if (dep_type == TYPE_FPCMP)
7633 /* Anti-dependencies only penalize the fpu unit. */
7634 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7646 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7650 case PROCESSOR_SUPERSPARC:
7651 cost = supersparc_adjust_cost (insn, link, dep, cost);
7653 case PROCESSOR_HYPERSPARC:
7654 case PROCESSOR_SPARCLITE86X:
7655 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7664 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7665 int sched_verbose ATTRIBUTE_UNUSED,
7666 int max_ready ATTRIBUTE_UNUSED)
7670 sparc_use_sched_lookahead (void)
7672 if (sparc_cpu == PROCESSOR_NIAGARA
7673 || sparc_cpu == PROCESSOR_NIAGARA2)
7675 if (sparc_cpu == PROCESSOR_ULTRASPARC
7676 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7678 if ((1 << sparc_cpu) &
7679 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7680 (1 << PROCESSOR_SPARCLITE86X)))
7686 sparc_issue_rate (void)
7690 case PROCESSOR_NIAGARA:
7691 case PROCESSOR_NIAGARA2:
7695 /* Assume V9 processors are capable of at least dual-issue. */
7697 case PROCESSOR_SUPERSPARC:
7699 case PROCESSOR_HYPERSPARC:
7700 case PROCESSOR_SPARCLITE86X:
7702 case PROCESSOR_ULTRASPARC:
7703 case PROCESSOR_ULTRASPARC3:
7709 set_extends (rtx insn)
7711 register rtx pat = PATTERN (insn);
7713 switch (GET_CODE (SET_SRC (pat)))
7715 /* Load and some shift instructions zero extend. */
7718 /* sethi clears the high bits */
7720 /* LO_SUM is used with sethi. sethi cleared the high
7721 bits and the values used with lo_sum are positive */
7723 /* Store flag stores 0 or 1 */
7733 rtx op0 = XEXP (SET_SRC (pat), 0);
7734 rtx op1 = XEXP (SET_SRC (pat), 1);
7735 if (GET_CODE (op1) == CONST_INT)
7736 return INTVAL (op1) >= 0;
7737 if (GET_CODE (op0) != REG)
7739 if (sparc_check_64 (op0, insn) == 1)
7741 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7746 rtx op0 = XEXP (SET_SRC (pat), 0);
7747 rtx op1 = XEXP (SET_SRC (pat), 1);
7748 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7750 if (GET_CODE (op1) == CONST_INT)
7751 return INTVAL (op1) >= 0;
7752 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7755 return GET_MODE (SET_SRC (pat)) == SImode;
7756 /* Positive integers leave the high bits zero. */
7758 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7760 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7763 return - (GET_MODE (SET_SRC (pat)) == SImode);
7765 return sparc_check_64 (SET_SRC (pat), insn);
7771 /* We _ought_ to have only one kind per function, but... */
7772 static GTY(()) rtx sparc_addr_diff_list;
7773 static GTY(()) rtx sparc_addr_list;
7776 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7778 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7780 sparc_addr_diff_list
7781 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7783 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7787 sparc_output_addr_vec (rtx vec)
7789 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7790 int idx, vlen = XVECLEN (body, 0);
7792 #ifdef ASM_OUTPUT_ADDR_VEC_START
7793 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7796 #ifdef ASM_OUTPUT_CASE_LABEL
7797 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7800 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7803 for (idx = 0; idx < vlen; idx++)
7805 ASM_OUTPUT_ADDR_VEC_ELT
7806 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7809 #ifdef ASM_OUTPUT_ADDR_VEC_END
7810 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7815 sparc_output_addr_diff_vec (rtx vec)
7817 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7818 rtx base = XEXP (XEXP (body, 0), 0);
7819 int idx, vlen = XVECLEN (body, 1);
7821 #ifdef ASM_OUTPUT_ADDR_VEC_START
7822 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7825 #ifdef ASM_OUTPUT_CASE_LABEL
7826 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7829 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7832 for (idx = 0; idx < vlen; idx++)
7834 ASM_OUTPUT_ADDR_DIFF_ELT
7837 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7838 CODE_LABEL_NUMBER (base));
7841 #ifdef ASM_OUTPUT_ADDR_VEC_END
7842 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7847 sparc_output_deferred_case_vectors (void)
7852 if (sparc_addr_list == NULL_RTX
7853 && sparc_addr_diff_list == NULL_RTX)
7856 /* Align to cache line in the function's code section. */
7857 switch_to_section (current_function_section ());
7859 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7861 ASM_OUTPUT_ALIGN (asm_out_file, align);
7863 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7864 sparc_output_addr_vec (XEXP (t, 0));
7865 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7866 sparc_output_addr_diff_vec (XEXP (t, 0));
7868 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7871 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7872 unknown. Return 1 if the high bits are zero, -1 if the register is
7875 sparc_check_64 (rtx x, rtx insn)
7877 /* If a register is set only once it is safe to ignore insns this
7878 code does not know how to handle. The loop will either recognize
7879 the single set and return the correct value or fail to recognize
7884 gcc_assert (GET_CODE (x) == REG);
7886 if (GET_MODE (x) == DImode)
7887 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7889 if (flag_expensive_optimizations
7890 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
7896 insn = get_last_insn_anywhere ();
7901 while ((insn = PREV_INSN (insn)))
7903 switch (GET_CODE (insn))
7916 rtx pat = PATTERN (insn);
7917 if (GET_CODE (pat) != SET)
7919 if (rtx_equal_p (x, SET_DEST (pat)))
7920 return set_extends (insn);
7921 if (y && rtx_equal_p (y, SET_DEST (pat)))
7922 return set_extends (insn);
7923 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7931 /* Returns assembly code to perform a DImode shift using
7932 a 64-bit global or out register on SPARC-V8+. */
7934 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7936 static char asm_code[60];
7938 /* The scratch register is only required when the destination
7939 register is not a 64-bit global or out register. */
7940 if (which_alternative != 2)
7941 operands[3] = operands[0];
7943 /* We can only shift by constants <= 63. */
7944 if (GET_CODE (operands[2]) == CONST_INT)
7945 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7947 if (GET_CODE (operands[1]) == CONST_INT)
7949 output_asm_insn ("mov\t%1, %3", operands);
7953 output_asm_insn ("sllx\t%H1, 32, %3", operands);
7954 if (sparc_check_64 (operands[1], insn) <= 0)
7955 output_asm_insn ("srl\t%L1, 0, %L1", operands);
7956 output_asm_insn ("or\t%L1, %3, %3", operands);
7959 strcpy(asm_code, opcode);
7961 if (which_alternative != 2)
7962 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7964 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7967 /* Output rtl to increment the profiler label LABELNO
7968 for profiling a function entry. */
7971 sparc_profile_hook (int labelno)
7976 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7977 if (NO_PROFILE_COUNTERS)
7979 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
7983 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7984 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7985 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7989 #if defined (OBJECT_FORMAT_ELF) && !HAVE_GNU_AS
7991 sparc_elf_asm_named_section (const char *name, unsigned int flags,
7992 tree decl ATTRIBUTE_UNUSED)
7994 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
7996 if (!(flags & SECTION_DEBUG))
7997 fputs (",#alloc", asm_out_file);
7998 if (flags & SECTION_WRITE)
7999 fputs (",#write", asm_out_file);
8000 if (flags & SECTION_TLS)
8001 fputs (",#tls", asm_out_file);
8002 if (flags & SECTION_CODE)
8003 fputs (",#execinstr", asm_out_file);
8005 /* ??? Handle SECTION_BSS. */
8007 fputc ('\n', asm_out_file);
8009 #endif /* OBJECT_FORMAT_ELF */
8011 /* We do not allow indirect calls to be optimized into sibling calls.
8013 We cannot use sibling calls when delayed branches are disabled
8014 because they will likely require the call delay slot to be filled.
8016 Also, on SPARC 32-bit we cannot emit a sibling call when the
8017 current function returns a structure. This is because the "unimp
8018 after call" convention would cause the callee to return to the
8019 wrong place. The generic code already disallows cases where the
8020 function being called returns a structure.
8022 It may seem strange how this last case could occur. Usually there
8023 is code after the call which jumps to epilogue code which dumps the
8024 return value into the struct return area. That ought to invalidate
8025 the sibling call right? Well, in the C++ case we can end up passing
8026 the pointer to the struct return area to a constructor (which returns
8027 void) and then nothing else happens. Such a sibling call would look
8028 valid without the added check here.
8030 VxWorks PIC PLT entries require the global pointer to be initialized
8031 on entry. We therefore can't emit sibling calls to them. */
8033 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8036 && flag_delayed_branch
8037 && (TARGET_ARCH64 || ! cfun->returns_struct)
8038 && !(TARGET_VXWORKS_RTP
8040 && !targetm.binds_local_p (decl)));
8043 /* libfunc renaming. */
8044 #include "config/gofast.h"
8047 sparc_init_libfuncs (void)
8051 /* Use the subroutines that Sun's library provides for integer
8052 multiply and divide. The `*' prevents an underscore from
8053 being prepended by the compiler. .umul is a little faster
8055 set_optab_libfunc (smul_optab, SImode, "*.umul");
8056 set_optab_libfunc (sdiv_optab, SImode, "*.div");
8057 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
8058 set_optab_libfunc (smod_optab, SImode, "*.rem");
8059 set_optab_libfunc (umod_optab, SImode, "*.urem");
8061 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
8062 set_optab_libfunc (add_optab, TFmode, "_Q_add");
8063 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
8064 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
8065 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
8066 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
8068 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
8069 is because with soft-float, the SFmode and DFmode sqrt
8070 instructions will be absent, and the compiler will notice and
8071 try to use the TFmode sqrt instruction for calls to the
8072 builtin function sqrt, but this fails. */
8074 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
8076 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
8077 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
8078 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
8079 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
8080 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
8081 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
8083 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
8084 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
8085 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
8086 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
8088 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
8089 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
8090 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
8091 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
8093 if (DITF_CONVERSION_LIBFUNCS)
8095 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
8096 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
8097 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
8098 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
8101 if (SUN_CONVERSION_LIBFUNCS)
8103 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
8104 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
8105 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
8106 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
8111 /* In the SPARC 64bit ABI, SImode multiply and divide functions
8112 do not exist in the library. Make sure the compiler does not
8113 emit calls to them by accident. (It should always use the
8114 hardware instructions.) */
8115 set_optab_libfunc (smul_optab, SImode, 0);
8116 set_optab_libfunc (sdiv_optab, SImode, 0);
8117 set_optab_libfunc (udiv_optab, SImode, 0);
8118 set_optab_libfunc (smod_optab, SImode, 0);
8119 set_optab_libfunc (umod_optab, SImode, 0);
8121 if (SUN_INTEGER_MULTIPLY_64)
8123 set_optab_libfunc (smul_optab, DImode, "__mul64");
8124 set_optab_libfunc (sdiv_optab, DImode, "__div64");
8125 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
8126 set_optab_libfunc (smod_optab, DImode, "__rem64");
8127 set_optab_libfunc (umod_optab, DImode, "__urem64");
8130 if (SUN_CONVERSION_LIBFUNCS)
8132 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
8133 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
8134 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
8135 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
8139 gofast_maybe_init_libfuncs ();
8142 #define def_builtin(NAME, CODE, TYPE) \
8143 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
8146 /* Implement the TARGET_INIT_BUILTINS target hook.
8147 Create builtin functions for special SPARC instructions. */
8150 sparc_init_builtins (void)
8153 sparc_vis_init_builtins ();
8156 /* Create builtin functions for VIS 1.0 instructions. */
8159 sparc_vis_init_builtins (void)
8161 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
8162 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
8163 tree v4hi = build_vector_type (intHI_type_node, 4);
8164 tree v2hi = build_vector_type (intHI_type_node, 2);
8165 tree v2si = build_vector_type (intSI_type_node, 2);
8167 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
8168 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
8169 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
8170 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
8171 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
8172 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
8173 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
8174 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
8175 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
8176 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
8177 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
8178 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
8179 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
8181 intDI_type_node, 0);
8182 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
8184 intDI_type_node, 0);
8185 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
8187 intSI_type_node, 0);
8188 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
8190 intDI_type_node, 0);
8192 /* Packing and expanding vectors. */
8193 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
8194 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
8195 v8qi_ftype_v2si_v8qi);
8196 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
8198 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
8199 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
8200 v8qi_ftype_v4qi_v4qi);
8202 /* Multiplications. */
8203 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
8204 v4hi_ftype_v4qi_v4hi);
8205 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
8206 v4hi_ftype_v4qi_v2hi);
8207 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
8208 v4hi_ftype_v4qi_v2hi);
8209 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
8210 v4hi_ftype_v8qi_v4hi);
8211 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
8212 v4hi_ftype_v8qi_v4hi);
8213 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
8214 v2si_ftype_v4qi_v2hi);
8215 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
8216 v2si_ftype_v4qi_v2hi);
8218 /* Data aligning. */
8219 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
8220 v4hi_ftype_v4hi_v4hi);
8221 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
8222 v8qi_ftype_v8qi_v8qi);
8223 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
8224 v2si_ftype_v2si_v2si);
8225 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
8228 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
8231 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
8234 /* Pixel distance. */
8235 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
8236 di_ftype_v8qi_v8qi_di);
8239 /* Handle TARGET_EXPAND_BUILTIN target hook.
8240 Expand builtin functions for sparc intrinsics. */
8243 sparc_expand_builtin (tree exp, rtx target,
8244 rtx subtarget ATTRIBUTE_UNUSED,
8245 enum machine_mode tmode ATTRIBUTE_UNUSED,
8246 int ignore ATTRIBUTE_UNUSED)
8249 call_expr_arg_iterator iter;
8250 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8251 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
8253 enum machine_mode mode[4];
8256 mode[0] = insn_data[icode].operand[0].mode;
8258 || GET_MODE (target) != mode[0]
8259 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
8260 op[0] = gen_reg_rtx (mode[0]);
8264 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8267 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8268 op[arg_count] = expand_normal (arg);
8270 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8272 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8278 pat = GEN_FCN (icode) (op[0], op[1]);
8281 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8284 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8299 sparc_vis_mul8x16 (int e8, int e16)
8301 return (e8 * e16 + 128) / 256;
8304 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8305 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8306 constants. A tree list with the results of the multiplications is returned,
8307 and each element in the list is of INNER_TYPE. */
8310 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8312 tree n_elts = NULL_TREE;
8317 case CODE_FOR_fmul8x16_vis:
8318 for (; elts0 && elts1;
8319 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8322 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8323 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8324 n_elts = tree_cons (NULL_TREE,
8325 build_int_cst (inner_type, val),
8330 case CODE_FOR_fmul8x16au_vis:
8331 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8333 for (; elts0; elts0 = TREE_CHAIN (elts0))
8336 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8338 n_elts = tree_cons (NULL_TREE,
8339 build_int_cst (inner_type, val),
8344 case CODE_FOR_fmul8x16al_vis:
8345 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8347 for (; elts0; elts0 = TREE_CHAIN (elts0))
8350 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8352 n_elts = tree_cons (NULL_TREE,
8353 build_int_cst (inner_type, val),
8362 return nreverse (n_elts);
8365 /* Handle TARGET_FOLD_BUILTIN target hook.
8366 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8367 result of the function call is ignored. NULL_TREE is returned if the
8368 function could not be folded. */
8371 sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
8373 tree arg0, arg1, arg2;
8374 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8375 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
8378 && icode != CODE_FOR_alignaddrsi_vis
8379 && icode != CODE_FOR_alignaddrdi_vis)
8380 return fold_convert (rtype, integer_zero_node);
8384 case CODE_FOR_fexpand_vis:
8385 arg0 = TREE_VALUE (arglist);
8388 if (TREE_CODE (arg0) == VECTOR_CST)
8390 tree inner_type = TREE_TYPE (rtype);
8391 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8392 tree n_elts = NULL_TREE;
8394 for (; elts; elts = TREE_CHAIN (elts))
8396 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8397 n_elts = tree_cons (NULL_TREE,
8398 build_int_cst (inner_type, val),
8401 return build_vector (rtype, nreverse (n_elts));
8405 case CODE_FOR_fmul8x16_vis:
8406 case CODE_FOR_fmul8x16au_vis:
8407 case CODE_FOR_fmul8x16al_vis:
8408 arg0 = TREE_VALUE (arglist);
8409 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8413 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8415 tree inner_type = TREE_TYPE (rtype);
8416 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8417 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8418 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
8421 return build_vector (rtype, n_elts);
8425 case CODE_FOR_fpmerge_vis:
8426 arg0 = TREE_VALUE (arglist);
8427 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8431 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8433 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8434 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8435 tree n_elts = NULL_TREE;
8437 for (; elts0 && elts1;
8438 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8440 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8441 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8444 return build_vector (rtype, nreverse (n_elts));
8448 case CODE_FOR_pdist_vis:
8449 arg0 = TREE_VALUE (arglist);
8450 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8451 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8456 if (TREE_CODE (arg0) == VECTOR_CST
8457 && TREE_CODE (arg1) == VECTOR_CST
8458 && TREE_CODE (arg2) == INTEGER_CST)
8461 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8462 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8463 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8464 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8466 for (; elts0 && elts1;
8467 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8469 unsigned HOST_WIDE_INT
8470 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8471 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8472 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8473 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8475 unsigned HOST_WIDE_INT l;
8478 overflow |= neg_double (low1, high1, &l, &h);
8479 overflow |= add_double (low0, high0, l, h, &l, &h);
8481 overflow |= neg_double (l, h, &l, &h);
8483 overflow |= add_double (low, high, l, h, &low, &high);
8486 gcc_assert (overflow == 0);
8488 return build_int_cst_wide (rtype, low, high);
8498 /* ??? This duplicates information provided to the compiler by the
8499 ??? scheduler description. Some day, teach genautomata to output
8500 ??? the latencies and then CSE will just use that. */
8503 sparc_rtx_costs (rtx x, int code, int outer_code, int *total,
8504 bool speed ATTRIBUTE_UNUSED)
8506 enum machine_mode mode = GET_MODE (x);
8507 bool float_mode_p = FLOAT_MODE_P (mode);
8512 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8530 if (GET_MODE (x) == VOIDmode
8531 && ((CONST_DOUBLE_HIGH (x) == 0
8532 && CONST_DOUBLE_LOW (x) < 0x1000)
8533 || (CONST_DOUBLE_HIGH (x) == -1
8534 && CONST_DOUBLE_LOW (x) < 0
8535 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8542 /* If outer-code was a sign or zero extension, a cost
8543 of COSTS_N_INSNS (1) was already added in. This is
8544 why we are subtracting it back out. */
8545 if (outer_code == ZERO_EXTEND)
8547 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8549 else if (outer_code == SIGN_EXTEND)
8551 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8553 else if (float_mode_p)
8555 *total = sparc_costs->float_load;
8559 *total = sparc_costs->int_load;
8567 *total = sparc_costs->float_plusminus;
8569 *total = COSTS_N_INSNS (1);
8574 *total = sparc_costs->float_mul;
8575 else if (! TARGET_HARD_MUL)
8576 *total = COSTS_N_INSNS (25);
8582 if (sparc_costs->int_mul_bit_factor)
8586 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8588 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8589 for (nbits = 0; value != 0; value &= value - 1)
8592 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8593 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8595 rtx x1 = XEXP (x, 1);
8596 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8597 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8599 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8601 for (; value2 != 0; value2 &= value2 - 1)
8609 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8610 bit_cost = COSTS_N_INSNS (bit_cost);
8614 *total = sparc_costs->int_mulX + bit_cost;
8616 *total = sparc_costs->int_mul + bit_cost;
8623 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8633 *total = sparc_costs->float_div_df;
8635 *total = sparc_costs->float_div_sf;
8640 *total = sparc_costs->int_divX;
8642 *total = sparc_costs->int_div;
8649 *total = COSTS_N_INSNS (1);
8656 case UNSIGNED_FLOAT:
8660 case FLOAT_TRUNCATE:
8661 *total = sparc_costs->float_move;
8666 *total = sparc_costs->float_sqrt_df;
8668 *total = sparc_costs->float_sqrt_sf;
8673 *total = sparc_costs->float_cmp;
8675 *total = COSTS_N_INSNS (1);
8680 *total = sparc_costs->float_cmove;
8682 *total = sparc_costs->int_cmove;
8686 /* Handle the NAND vector patterns. */
8687 if (sparc_vector_mode_supported_p (GET_MODE (x))
8688 && GET_CODE (XEXP (x, 0)) == NOT
8689 && GET_CODE (XEXP (x, 1)) == NOT)
8691 *total = COSTS_N_INSNS (1);
8702 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
8703 This is achieved by means of a manual dynamic stack space allocation in
8704 the current frame. We make the assumption that SEQ doesn't contain any
8705 function calls, with the possible exception of calls to the PIC helper. */
8708 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8710 /* We must preserve the lowest 16 words for the register save area. */
8711 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
8712 /* We really need only 2 words of fresh stack space. */
8713 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
8716 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
8717 SPARC_STACK_BIAS + offset));
8719 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
8720 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8722 emit_insn (gen_rtx_SET (VOIDmode,
8723 adjust_address (slot, word_mode, UNITS_PER_WORD),
8727 emit_insn (gen_rtx_SET (VOIDmode,
8729 adjust_address (slot, word_mode, UNITS_PER_WORD)));
8730 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8731 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
8734 /* Output the assembler code for a thunk function. THUNK_DECL is the
8735 declaration for the thunk function itself, FUNCTION is the decl for
8736 the target function. DELTA is an immediate constant offset to be
8737 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8738 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8741 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8742 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8745 rtx this_rtx, insn, funexp;
8746 unsigned int int_arg_first;
8748 reload_completed = 1;
8749 epilogue_completed = 1;
8751 emit_note (NOTE_INSN_PROLOGUE_END);
8753 if (flag_delayed_branch)
8755 /* We will emit a regular sibcall below, so we need to instruct
8756 output_sibcall that we are in a leaf function. */
8757 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8759 /* This will cause final.c to invoke leaf_renumber_regs so we
8760 must behave as if we were in a not-yet-leafified function. */
8761 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8765 /* We will emit the sibcall manually below, so we will need to
8766 manually spill non-leaf registers. */
8767 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8769 /* We really are in a leaf function. */
8770 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8773 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8774 returns a structure, the structure return pointer is there instead. */
8776 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8777 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
8779 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
8781 /* Add DELTA. When possible use a plain add, otherwise load it into
8782 a register first. */
8785 rtx delta_rtx = GEN_INT (delta);
8787 if (! SPARC_SIMM13_P (delta))
8789 rtx scratch = gen_rtx_REG (Pmode, 1);
8790 emit_move_insn (scratch, delta_rtx);
8791 delta_rtx = scratch;
8794 /* THIS_RTX += DELTA. */
8795 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
8798 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
8801 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8802 rtx scratch = gen_rtx_REG (Pmode, 1);
8804 gcc_assert (vcall_offset < 0);
8806 /* SCRATCH = *THIS_RTX. */
8807 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
8809 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8810 may not have any available scratch register at this point. */
8811 if (SPARC_SIMM13_P (vcall_offset))
8813 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8814 else if (! fixed_regs[5]
8815 /* The below sequence is made up of at least 2 insns,
8816 while the default method may need only one. */
8817 && vcall_offset < -8192)
8819 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8820 emit_move_insn (scratch2, vcall_offset_rtx);
8821 vcall_offset_rtx = scratch2;
8825 rtx increment = GEN_INT (-4096);
8827 /* VCALL_OFFSET is a negative number whose typical range can be
8828 estimated as -32768..0 in 32-bit mode. In almost all cases
8829 it is therefore cheaper to emit multiple add insns than
8830 spilling and loading the constant into a register (at least
8832 while (! SPARC_SIMM13_P (vcall_offset))
8834 emit_insn (gen_add2_insn (scratch, increment));
8835 vcall_offset += 4096;
8837 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8840 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
8841 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8842 gen_rtx_PLUS (Pmode,
8844 vcall_offset_rtx)));
8846 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
8847 emit_insn (gen_add2_insn (this_rtx, scratch));
8850 /* Generate a tail call to the target function. */
8851 if (! TREE_USED (function))
8853 assemble_external (function);
8854 TREE_USED (function) = 1;
8856 funexp = XEXP (DECL_RTL (function), 0);
8858 if (flag_delayed_branch)
8860 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8861 insn = emit_call_insn (gen_sibcall (funexp));
8862 SIBLING_CALL_P (insn) = 1;
8866 /* The hoops we have to jump through in order to generate a sibcall
8867 without using delay slots... */
8868 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
8872 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8873 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
8875 /* Delay emitting the PIC helper function because it needs to
8876 change the section and we are emitting assembly code. */
8877 load_pic_register (true); /* clobbers %o7 */
8878 scratch = legitimize_pic_address (funexp, scratch);
8881 emit_and_preserve (seq, spill_reg, spill_reg2);
8883 else if (TARGET_ARCH32)
8885 emit_insn (gen_rtx_SET (VOIDmode,
8887 gen_rtx_HIGH (SImode, funexp)));
8888 emit_insn (gen_rtx_SET (VOIDmode,
8890 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8892 else /* TARGET_ARCH64 */
8894 switch (sparc_cmodel)
8898 /* The destination can serve as a temporary. */
8899 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8904 /* The destination cannot serve as a temporary. */
8905 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8907 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8910 emit_and_preserve (seq, spill_reg, 0);
8918 emit_jump_insn (gen_indirect_jump (scratch));
8923 /* Run just enough of rest_of_compilation to get the insns emitted.
8924 There's not really enough bulk here to make other passes such as
8925 instruction scheduling worth while. Note that use_thunk calls
8926 assemble_start_function and assemble_end_function. */
8927 insn = get_insns ();
8928 insn_locators_alloc ();
8929 shorten_branches (insn);
8930 final_start_function (insn, file, 1);
8931 final (insn, file, 1);
8932 final_end_function ();
8934 reload_completed = 0;
8935 epilogue_completed = 0;
8938 /* Return true if sparc_output_mi_thunk would be able to output the
8939 assembler code for the thunk function specified by the arguments
8940 it is passed, and false otherwise. */
8942 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
8943 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8944 HOST_WIDE_INT vcall_offset,
8945 const_tree function ATTRIBUTE_UNUSED)
8947 /* Bound the loop used in the default method above. */
8948 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8951 /* How to allocate a 'struct machine_function'. */
8953 static struct machine_function *
8954 sparc_init_machine_status (void)
8956 return GGC_CNEW (struct machine_function);
8959 /* Locate some local-dynamic symbol still in use by this function
8960 so that we can print its name in local-dynamic base patterns. */
8963 get_some_local_dynamic_name (void)
8967 if (cfun->machine->some_ld_name)
8968 return cfun->machine->some_ld_name;
8970 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8972 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8973 return cfun->machine->some_ld_name;
8979 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8984 && GET_CODE (x) == SYMBOL_REF
8985 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8987 cfun->machine->some_ld_name = XSTR (x, 0);
8994 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
8995 This is called from dwarf2out.c to emit call frame instructions
8996 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
8998 sparc_dwarf_handle_frame_unspec (const char *label,
8999 rtx pattern ATTRIBUTE_UNUSED,
9000 int index ATTRIBUTE_UNUSED)
9002 gcc_assert (index == UNSPECV_SAVEW);
9003 dwarf2out_window_save (label);
9006 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
9007 We need to emit DTP-relative relocations. */
9010 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
9015 fputs ("\t.word\t%r_tls_dtpoff32(", file);
9018 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
9023 output_addr_const (file, x);
9027 /* Do whatever processing is required at the end of a file. */
9030 sparc_file_end (void)
9032 /* If we haven't emitted the special PIC helper function, do so now. */
9033 if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
9036 if (NEED_INDICATE_EXEC_STACK)
9037 file_end_indicate_exec_stack ();
9040 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9041 /* Implement TARGET_MANGLE_TYPE. */
9044 sparc_mangle_type (const_tree type)
9047 && TYPE_MAIN_VARIANT (type) == long_double_type_node
9048 && TARGET_LONG_DOUBLE_128)
9051 /* For all other types, use normal C++ mangling. */
9056 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
9057 compare and swap on the word containing the byte or half-word. */
9060 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
9062 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
9063 rtx addr = gen_reg_rtx (Pmode);
9064 rtx off = gen_reg_rtx (SImode);
9065 rtx oldv = gen_reg_rtx (SImode);
9066 rtx newv = gen_reg_rtx (SImode);
9067 rtx oldvalue = gen_reg_rtx (SImode);
9068 rtx newvalue = gen_reg_rtx (SImode);
9069 rtx res = gen_reg_rtx (SImode);
9070 rtx resv = gen_reg_rtx (SImode);
9071 rtx memsi, val, mask, end_label, loop_label, cc;
9073 emit_insn (gen_rtx_SET (VOIDmode, addr,
9074 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
9076 if (Pmode != SImode)
9077 addr1 = gen_lowpart (SImode, addr1);
9078 emit_insn (gen_rtx_SET (VOIDmode, off,
9079 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
9081 memsi = gen_rtx_MEM (SImode, addr);
9082 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
9083 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
9085 val = force_reg (SImode, memsi);
9087 emit_insn (gen_rtx_SET (VOIDmode, off,
9088 gen_rtx_XOR (SImode, off,
9089 GEN_INT (GET_MODE (mem) == QImode
9092 emit_insn (gen_rtx_SET (VOIDmode, off,
9093 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
9095 if (GET_MODE (mem) == QImode)
9096 mask = force_reg (SImode, GEN_INT (0xff));
9098 mask = force_reg (SImode, GEN_INT (0xffff));
9100 emit_insn (gen_rtx_SET (VOIDmode, mask,
9101 gen_rtx_ASHIFT (SImode, mask, off)));
9103 emit_insn (gen_rtx_SET (VOIDmode, val,
9104 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9107 oldval = gen_lowpart (SImode, oldval);
9108 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9109 gen_rtx_ASHIFT (SImode, oldval, off)));
9111 newval = gen_lowpart_common (SImode, newval);
9112 emit_insn (gen_rtx_SET (VOIDmode, newv,
9113 gen_rtx_ASHIFT (SImode, newval, off)));
9115 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9116 gen_rtx_AND (SImode, oldv, mask)));
9118 emit_insn (gen_rtx_SET (VOIDmode, newv,
9119 gen_rtx_AND (SImode, newv, mask)));
9121 end_label = gen_label_rtx ();
9122 loop_label = gen_label_rtx ();
9123 emit_label (loop_label);
9125 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
9126 gen_rtx_IOR (SImode, oldv, val)));
9128 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
9129 gen_rtx_IOR (SImode, newv, val)));
9131 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
9133 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
9135 emit_insn (gen_rtx_SET (VOIDmode, resv,
9136 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9139 cc = gen_compare_reg_1 (NE, resv, val);
9140 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
9142 /* Use cbranchcc4 to separate the compare and branch! */
9143 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
9144 cc, const0_rtx, loop_label));
9146 emit_label (end_label);
9148 emit_insn (gen_rtx_SET (VOIDmode, res,
9149 gen_rtx_AND (SImode, res, mask)));
9151 emit_insn (gen_rtx_SET (VOIDmode, res,
9152 gen_rtx_LSHIFTRT (SImode, res, off)));
9154 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
9157 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
9160 sparc_frame_pointer_required (void)
9162 return !(leaf_function_p () && only_leaf_regs_used ());
9165 /* The way this is structured, we can't eliminate SFP in favor of SP
9166 if the frame pointer is required: we want to use the SFP->HFP elimination
9167 in that case. But the test in update_eliminables doesn't know we are
9168 assuming below that we only do the former elimination. */
9171 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
9173 return (to == HARD_FRAME_POINTER_REGNUM
9174 || !targetm.frame_pointer_required ());
9177 #include "gt-sparc.h"