1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "insn-codes.h"
35 #include "conditions.h"
37 #include "insn-attr.h"
44 #include "diagnostic-core.h"
50 #include "target-def.h"
51 #include "cfglayout.h"
53 #include "langhooks.h"
57 #include "dwarf2out.h"
61 struct processor_costs cypress_costs = {
62 COSTS_N_INSNS (2), /* int load */
63 COSTS_N_INSNS (2), /* int signed load */
64 COSTS_N_INSNS (2), /* int zeroed load */
65 COSTS_N_INSNS (2), /* float load */
66 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
67 COSTS_N_INSNS (5), /* fadd, fsub */
68 COSTS_N_INSNS (1), /* fcmp */
69 COSTS_N_INSNS (1), /* fmov, fmovr */
70 COSTS_N_INSNS (7), /* fmul */
71 COSTS_N_INSNS (37), /* fdivs */
72 COSTS_N_INSNS (37), /* fdivd */
73 COSTS_N_INSNS (63), /* fsqrts */
74 COSTS_N_INSNS (63), /* fsqrtd */
75 COSTS_N_INSNS (1), /* imul */
76 COSTS_N_INSNS (1), /* imulX */
77 0, /* imul bit factor */
78 COSTS_N_INSNS (1), /* idiv */
79 COSTS_N_INSNS (1), /* idivX */
80 COSTS_N_INSNS (1), /* movcc/movr */
81 0, /* shift penalty */
85 struct processor_costs supersparc_costs = {
86 COSTS_N_INSNS (1), /* int load */
87 COSTS_N_INSNS (1), /* int signed load */
88 COSTS_N_INSNS (1), /* int zeroed load */
89 COSTS_N_INSNS (0), /* float load */
90 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
91 COSTS_N_INSNS (3), /* fadd, fsub */
92 COSTS_N_INSNS (3), /* fcmp */
93 COSTS_N_INSNS (1), /* fmov, fmovr */
94 COSTS_N_INSNS (3), /* fmul */
95 COSTS_N_INSNS (6), /* fdivs */
96 COSTS_N_INSNS (9), /* fdivd */
97 COSTS_N_INSNS (12), /* fsqrts */
98 COSTS_N_INSNS (12), /* fsqrtd */
99 COSTS_N_INSNS (4), /* imul */
100 COSTS_N_INSNS (4), /* imulX */
101 0, /* imul bit factor */
102 COSTS_N_INSNS (4), /* idiv */
103 COSTS_N_INSNS (4), /* idivX */
104 COSTS_N_INSNS (1), /* movcc/movr */
105 1, /* shift penalty */
109 struct processor_costs hypersparc_costs = {
110 COSTS_N_INSNS (1), /* int load */
111 COSTS_N_INSNS (1), /* int signed load */
112 COSTS_N_INSNS (1), /* int zeroed load */
113 COSTS_N_INSNS (1), /* float load */
114 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
115 COSTS_N_INSNS (1), /* fadd, fsub */
116 COSTS_N_INSNS (1), /* fcmp */
117 COSTS_N_INSNS (1), /* fmov, fmovr */
118 COSTS_N_INSNS (1), /* fmul */
119 COSTS_N_INSNS (8), /* fdivs */
120 COSTS_N_INSNS (12), /* fdivd */
121 COSTS_N_INSNS (17), /* fsqrts */
122 COSTS_N_INSNS (17), /* fsqrtd */
123 COSTS_N_INSNS (17), /* imul */
124 COSTS_N_INSNS (17), /* imulX */
125 0, /* imul bit factor */
126 COSTS_N_INSNS (17), /* idiv */
127 COSTS_N_INSNS (17), /* idivX */
128 COSTS_N_INSNS (1), /* movcc/movr */
129 0, /* shift penalty */
133 struct processor_costs sparclet_costs = {
134 COSTS_N_INSNS (3), /* int load */
135 COSTS_N_INSNS (3), /* int signed load */
136 COSTS_N_INSNS (1), /* int zeroed load */
137 COSTS_N_INSNS (1), /* float load */
138 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
139 COSTS_N_INSNS (1), /* fadd, fsub */
140 COSTS_N_INSNS (1), /* fcmp */
141 COSTS_N_INSNS (1), /* fmov, fmovr */
142 COSTS_N_INSNS (1), /* fmul */
143 COSTS_N_INSNS (1), /* fdivs */
144 COSTS_N_INSNS (1), /* fdivd */
145 COSTS_N_INSNS (1), /* fsqrts */
146 COSTS_N_INSNS (1), /* fsqrtd */
147 COSTS_N_INSNS (5), /* imul */
148 COSTS_N_INSNS (5), /* imulX */
149 0, /* imul bit factor */
150 COSTS_N_INSNS (5), /* idiv */
151 COSTS_N_INSNS (5), /* idivX */
152 COSTS_N_INSNS (1), /* movcc/movr */
153 0, /* shift penalty */
157 struct processor_costs ultrasparc_costs = {
158 COSTS_N_INSNS (2), /* int load */
159 COSTS_N_INSNS (3), /* int signed load */
160 COSTS_N_INSNS (2), /* int zeroed load */
161 COSTS_N_INSNS (2), /* float load */
162 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
163 COSTS_N_INSNS (4), /* fadd, fsub */
164 COSTS_N_INSNS (1), /* fcmp */
165 COSTS_N_INSNS (2), /* fmov, fmovr */
166 COSTS_N_INSNS (4), /* fmul */
167 COSTS_N_INSNS (13), /* fdivs */
168 COSTS_N_INSNS (23), /* fdivd */
169 COSTS_N_INSNS (13), /* fsqrts */
170 COSTS_N_INSNS (23), /* fsqrtd */
171 COSTS_N_INSNS (4), /* imul */
172 COSTS_N_INSNS (4), /* imulX */
173 2, /* imul bit factor */
174 COSTS_N_INSNS (37), /* idiv */
175 COSTS_N_INSNS (68), /* idivX */
176 COSTS_N_INSNS (2), /* movcc/movr */
177 2, /* shift penalty */
181 struct processor_costs ultrasparc3_costs = {
182 COSTS_N_INSNS (2), /* int load */
183 COSTS_N_INSNS (3), /* int signed load */
184 COSTS_N_INSNS (3), /* int zeroed load */
185 COSTS_N_INSNS (2), /* float load */
186 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
187 COSTS_N_INSNS (4), /* fadd, fsub */
188 COSTS_N_INSNS (5), /* fcmp */
189 COSTS_N_INSNS (3), /* fmov, fmovr */
190 COSTS_N_INSNS (4), /* fmul */
191 COSTS_N_INSNS (17), /* fdivs */
192 COSTS_N_INSNS (20), /* fdivd */
193 COSTS_N_INSNS (20), /* fsqrts */
194 COSTS_N_INSNS (29), /* fsqrtd */
195 COSTS_N_INSNS (6), /* imul */
196 COSTS_N_INSNS (6), /* imulX */
197 0, /* imul bit factor */
198 COSTS_N_INSNS (40), /* idiv */
199 COSTS_N_INSNS (71), /* idivX */
200 COSTS_N_INSNS (2), /* movcc/movr */
201 0, /* shift penalty */
205 struct processor_costs niagara_costs = {
206 COSTS_N_INSNS (3), /* int load */
207 COSTS_N_INSNS (3), /* int signed load */
208 COSTS_N_INSNS (3), /* int zeroed load */
209 COSTS_N_INSNS (9), /* float load */
210 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
211 COSTS_N_INSNS (8), /* fadd, fsub */
212 COSTS_N_INSNS (26), /* fcmp */
213 COSTS_N_INSNS (8), /* fmov, fmovr */
214 COSTS_N_INSNS (29), /* fmul */
215 COSTS_N_INSNS (54), /* fdivs */
216 COSTS_N_INSNS (83), /* fdivd */
217 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
218 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
219 COSTS_N_INSNS (11), /* imul */
220 COSTS_N_INSNS (11), /* imulX */
221 0, /* imul bit factor */
222 COSTS_N_INSNS (72), /* idiv */
223 COSTS_N_INSNS (72), /* idivX */
224 COSTS_N_INSNS (1), /* movcc/movr */
225 0, /* shift penalty */
229 struct processor_costs niagara2_costs = {
230 COSTS_N_INSNS (3), /* int load */
231 COSTS_N_INSNS (3), /* int signed load */
232 COSTS_N_INSNS (3), /* int zeroed load */
233 COSTS_N_INSNS (3), /* float load */
234 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
235 COSTS_N_INSNS (6), /* fadd, fsub */
236 COSTS_N_INSNS (6), /* fcmp */
237 COSTS_N_INSNS (6), /* fmov, fmovr */
238 COSTS_N_INSNS (6), /* fmul */
239 COSTS_N_INSNS (19), /* fdivs */
240 COSTS_N_INSNS (33), /* fdivd */
241 COSTS_N_INSNS (19), /* fsqrts */
242 COSTS_N_INSNS (33), /* fsqrtd */
243 COSTS_N_INSNS (5), /* imul */
244 COSTS_N_INSNS (5), /* imulX */
245 0, /* imul bit factor */
246 COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
247 COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
248 COSTS_N_INSNS (1), /* movcc/movr */
249 0, /* shift penalty */
252 const struct processor_costs *sparc_costs = &cypress_costs;
254 #ifdef HAVE_AS_RELAX_OPTION
255 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
256 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
257 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
258 somebody does not branch between the sethi and jmp. */
259 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
261 #define LEAF_SIBCALL_SLOT_RESERVED_P \
262 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
265 /* Global variables for machine-dependent things. */
267 /* Size of frame. Need to know this to emit return insns from leaf procedures.
268 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
269 reload pass. This is important as the value is later used for scheduling
270 (to see what can go in a delay slot).
271 APPARENT_FSIZE is the size of the stack less the register save area and less
272 the outgoing argument area. It is used when saving call preserved regs. */
273 static HOST_WIDE_INT apparent_fsize;
274 static HOST_WIDE_INT actual_fsize;
276 /* Number of live general or floating point registers needed to be
277 saved (as 4-byte quantities). */
278 static int num_gfregs;
280 /* The alias set for prologue/epilogue register save/restore. */
281 static GTY(()) alias_set_type sparc_sr_alias_set;
283 /* The alias set for the structure return value. */
284 static GTY(()) alias_set_type struct_value_alias_set;
286 /* Vector to say how input registers are mapped to output registers.
287 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
288 eliminate it. You must use -fomit-frame-pointer to get that. */
289 char leaf_reg_remap[] =
290 { 0, 1, 2, 3, 4, 5, 6, 7,
291 -1, -1, -1, -1, -1, -1, 14, -1,
292 -1, -1, -1, -1, -1, -1, -1, -1,
293 8, 9, 10, 11, 12, 13, -1, 15,
295 32, 33, 34, 35, 36, 37, 38, 39,
296 40, 41, 42, 43, 44, 45, 46, 47,
297 48, 49, 50, 51, 52, 53, 54, 55,
298 56, 57, 58, 59, 60, 61, 62, 63,
299 64, 65, 66, 67, 68, 69, 70, 71,
300 72, 73, 74, 75, 76, 77, 78, 79,
301 80, 81, 82, 83, 84, 85, 86, 87,
302 88, 89, 90, 91, 92, 93, 94, 95,
303 96, 97, 98, 99, 100};
305 /* Vector, indexed by hard register number, which contains 1
306 for a register that is allowable in a candidate for leaf
307 function treatment. */
308 char sparc_leaf_regs[] =
309 { 1, 1, 1, 1, 1, 1, 1, 1,
310 0, 0, 0, 0, 0, 0, 1, 0,
311 0, 0, 0, 0, 0, 0, 0, 0,
312 1, 1, 1, 1, 1, 1, 0, 1,
313 1, 1, 1, 1, 1, 1, 1, 1,
314 1, 1, 1, 1, 1, 1, 1, 1,
315 1, 1, 1, 1, 1, 1, 1, 1,
316 1, 1, 1, 1, 1, 1, 1, 1,
317 1, 1, 1, 1, 1, 1, 1, 1,
318 1, 1, 1, 1, 1, 1, 1, 1,
319 1, 1, 1, 1, 1, 1, 1, 1,
320 1, 1, 1, 1, 1, 1, 1, 1,
323 struct GTY(()) machine_function
325 /* Some local-dynamic TLS symbol name. */
326 const char *some_ld_name;
328 /* True if the current function is leaf and uses only leaf regs,
329 so that the SPARC leaf function optimization can be applied.
330 Private version of current_function_uses_only_leaf_regs, see
331 sparc_expand_prologue for the rationale. */
334 /* True if the data calculated by sparc_expand_prologue are valid. */
335 bool prologue_data_valid_p;
338 #define sparc_leaf_function_p cfun->machine->leaf_function_p
339 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
341 /* Register we pretend to think the frame pointer is allocated to.
342 Normally, this is %fp, but if we are in a leaf procedure, this
343 is %sp+"something". We record "something" separately as it may
344 be too big for reg+constant addressing. */
345 static rtx frame_base_reg;
346 static HOST_WIDE_INT frame_base_offset;
348 /* 1 if the next opcode is to be specially indented. */
349 int sparc_indent_opcode = 0;
351 static bool sparc_handle_option (size_t, const char *, int);
352 static void sparc_option_override (void);
353 static void sparc_init_modes (void);
354 static void scan_record_type (const_tree, int *, int *, int *);
355 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
356 const_tree, bool, bool, int *, int *);
358 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
359 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
361 static void sparc_emit_set_const32 (rtx, rtx);
362 static void sparc_emit_set_const64 (rtx, rtx);
363 static void sparc_output_addr_vec (rtx);
364 static void sparc_output_addr_diff_vec (rtx);
365 static void sparc_output_deferred_case_vectors (void);
366 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
367 static rtx sparc_builtin_saveregs (void);
368 static int epilogue_renumber (rtx *, int);
369 static bool sparc_assemble_integer (rtx, unsigned int, int);
370 static int set_extends (rtx);
371 static void load_pic_register (void);
372 static int save_or_restore_regs (int, int, rtx, int, int);
373 static void emit_save_or_restore_regs (int);
374 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
375 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
376 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
377 tree) ATTRIBUTE_UNUSED;
378 static int sparc_adjust_cost (rtx, rtx, rtx, int);
379 static int sparc_issue_rate (void);
380 static void sparc_sched_init (FILE *, int, int);
381 static int sparc_use_sched_lookahead (void);
383 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
384 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
385 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
386 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
387 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
389 static bool sparc_function_ok_for_sibcall (tree, tree);
390 static void sparc_init_libfuncs (void);
391 static void sparc_init_builtins (void);
392 static void sparc_vis_init_builtins (void);
393 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
394 static tree sparc_fold_builtin (tree, int, tree *, bool);
395 static int sparc_vis_mul8x16 (int, int);
396 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
397 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
398 HOST_WIDE_INT, tree);
399 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
400 HOST_WIDE_INT, const_tree);
401 static struct machine_function * sparc_init_machine_status (void);
402 static bool sparc_cannot_force_const_mem (rtx);
403 static rtx sparc_tls_get_addr (void);
404 static rtx sparc_tls_got (void);
405 static const char *get_some_local_dynamic_name (void);
406 static int get_some_local_dynamic_name_1 (rtx *, void *);
407 static bool sparc_rtx_costs (rtx, int, int, int *, bool);
408 static bool sparc_promote_prototypes (const_tree);
409 static rtx sparc_function_value (const_tree, const_tree, bool);
410 static rtx sparc_libcall_value (enum machine_mode, const_rtx);
411 static bool sparc_function_value_regno_p (const unsigned int);
412 static rtx sparc_struct_value_rtx (tree, int);
413 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
414 int *, const_tree, int);
415 static bool sparc_return_in_memory (const_tree, const_tree);
416 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
417 static void sparc_va_start (tree, rtx);
418 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
419 static bool sparc_vector_mode_supported_p (enum machine_mode);
420 static bool sparc_tls_referenced_p (rtx);
421 static rtx sparc_legitimize_tls_address (rtx);
422 static rtx sparc_legitimize_pic_address (rtx, rtx);
423 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
424 static bool sparc_mode_dependent_address_p (const_rtx);
425 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
426 enum machine_mode, const_tree, bool);
427 static void sparc_function_arg_advance (CUMULATIVE_ARGS *,
428 enum machine_mode, const_tree, bool);
429 static rtx sparc_function_arg_1 (const CUMULATIVE_ARGS *,
430 enum machine_mode, const_tree, bool, bool);
431 static rtx sparc_function_arg (CUMULATIVE_ARGS *,
432 enum machine_mode, const_tree, bool);
433 static rtx sparc_function_incoming_arg (CUMULATIVE_ARGS *,
434 enum machine_mode, const_tree, bool);
435 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
436 enum machine_mode, tree, bool);
437 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
438 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
439 static void sparc_file_end (void);
440 static bool sparc_frame_pointer_required (void);
441 static bool sparc_can_eliminate (const int, const int);
442 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
443 static const char *sparc_mangle_type (const_tree);
445 static void sparc_trampoline_init (rtx, tree, rtx);
446 static enum machine_mode sparc_preferred_simd_mode (enum machine_mode);
448 #ifdef SUBTARGET_ATTRIBUTE_TABLE
449 /* Table of valid machine attributes. */
450 static const struct attribute_spec sparc_attribute_table[] =
452 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
453 SUBTARGET_ATTRIBUTE_TABLE,
454 { NULL, 0, 0, false, false, false, NULL }
458 /* Option handling. */
461 enum cmodel sparc_cmodel;
463 char sparc_hard_reg_printed[8];
465 struct sparc_cpu_select sparc_select[] =
467 /* switch name, tune arch */
468 { (char *)0, "default", 1, 1 },
469 { (char *)0, "-mcpu=", 1, 1 },
470 { (char *)0, "-mtune=", 1, 0 },
474 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
475 enum processor_type sparc_cpu;
477 /* Whether
\fan FPU option was specified. */
478 static bool fpu_option_set = false;
480 /* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
481 static const struct default_options sparc_option_optimization_table[] =
483 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
484 { OPT_LEVELS_NONE, 0, NULL, 0 }
487 /* Initialize the GCC target structure. */
489 /* The default is to use .half rather than .short for aligned HI objects. */
490 #undef TARGET_ASM_ALIGNED_HI_OP
491 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
493 #undef TARGET_ASM_UNALIGNED_HI_OP
494 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
495 #undef TARGET_ASM_UNALIGNED_SI_OP
496 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
497 #undef TARGET_ASM_UNALIGNED_DI_OP
498 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
500 /* The target hook has to handle DI-mode values. */
501 #undef TARGET_ASM_INTEGER
502 #define TARGET_ASM_INTEGER sparc_assemble_integer
504 #undef TARGET_ASM_FUNCTION_PROLOGUE
505 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
506 #undef TARGET_ASM_FUNCTION_EPILOGUE
507 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
509 #undef TARGET_SCHED_ADJUST_COST
510 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
511 #undef TARGET_SCHED_ISSUE_RATE
512 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
513 #undef TARGET_SCHED_INIT
514 #define TARGET_SCHED_INIT sparc_sched_init
515 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
516 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
518 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
519 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
521 #undef TARGET_INIT_LIBFUNCS
522 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
523 #undef TARGET_INIT_BUILTINS
524 #define TARGET_INIT_BUILTINS sparc_init_builtins
526 #undef TARGET_LEGITIMIZE_ADDRESS
527 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
528 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
529 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
531 #undef TARGET_EXPAND_BUILTIN
532 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
533 #undef TARGET_FOLD_BUILTIN
534 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
537 #undef TARGET_HAVE_TLS
538 #define TARGET_HAVE_TLS true
541 #undef TARGET_CANNOT_FORCE_CONST_MEM
542 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
544 #undef TARGET_ASM_OUTPUT_MI_THUNK
545 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
546 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
547 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
549 #undef TARGET_RTX_COSTS
550 #define TARGET_RTX_COSTS sparc_rtx_costs
551 #undef TARGET_ADDRESS_COST
552 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
554 #undef TARGET_PROMOTE_FUNCTION_MODE
555 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
557 #undef TARGET_PROMOTE_PROTOTYPES
558 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
560 #undef TARGET_FUNCTION_VALUE
561 #define TARGET_FUNCTION_VALUE sparc_function_value
562 #undef TARGET_LIBCALL_VALUE
563 #define TARGET_LIBCALL_VALUE sparc_libcall_value
564 #undef TARGET_FUNCTION_VALUE_REGNO_P
565 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
567 #undef TARGET_STRUCT_VALUE_RTX
568 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
569 #undef TARGET_RETURN_IN_MEMORY
570 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
571 #undef TARGET_MUST_PASS_IN_STACK
572 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
573 #undef TARGET_PASS_BY_REFERENCE
574 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
575 #undef TARGET_ARG_PARTIAL_BYTES
576 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
577 #undef TARGET_FUNCTION_ARG_ADVANCE
578 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
579 #undef TARGET_FUNCTION_ARG
580 #define TARGET_FUNCTION_ARG sparc_function_arg
581 #undef TARGET_FUNCTION_INCOMING_ARG
582 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
584 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
585 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
586 #undef TARGET_STRICT_ARGUMENT_NAMING
587 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
589 #undef TARGET_EXPAND_BUILTIN_VA_START
590 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
591 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
592 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
594 #undef TARGET_VECTOR_MODE_SUPPORTED_P
595 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
597 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
598 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
600 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
601 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
603 #ifdef SUBTARGET_INSERT_ATTRIBUTES
604 #undef TARGET_INSERT_ATTRIBUTES
605 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
608 #ifdef SUBTARGET_ATTRIBUTE_TABLE
609 #undef TARGET_ATTRIBUTE_TABLE
610 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
613 #undef TARGET_RELAXED_ORDERING
614 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
616 #undef TARGET_DEFAULT_TARGET_FLAGS
617 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
618 #undef TARGET_HANDLE_OPTION
619 #define TARGET_HANDLE_OPTION sparc_handle_option
620 #undef TARGET_OPTION_OVERRIDE
621 #define TARGET_OPTION_OVERRIDE sparc_option_override
622 #undef TARGET_OPTION_OPTIMIZATION_TABLE
623 #define TARGET_OPTION_OPTIMIZATION_TABLE sparc_option_optimization_table
625 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
626 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
627 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
630 #undef TARGET_ASM_FILE_END
631 #define TARGET_ASM_FILE_END sparc_file_end
633 #undef TARGET_FRAME_POINTER_REQUIRED
634 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
636 #undef TARGET_CAN_ELIMINATE
637 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
639 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
640 #undef TARGET_MANGLE_TYPE
641 #define TARGET_MANGLE_TYPE sparc_mangle_type
644 #undef TARGET_LEGITIMATE_ADDRESS_P
645 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
647 #undef TARGET_TRAMPOLINE_INIT
648 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
650 struct gcc_target targetm = TARGET_INITIALIZER;
652 /* Implement TARGET_HANDLE_OPTION. */
655 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
660 case OPT_mhard_float:
661 case OPT_msoft_float:
662 fpu_option_set = true;
666 sparc_select[1].string = arg;
670 sparc_select[2].string = arg;
677 /* Validate and override various options, and do some machine dependent
681 sparc_option_override (void)
683 static struct code_model {
684 const char *const name;
685 const enum cmodel value;
686 } const cmodels[] = {
688 { "medlow", CM_MEDLOW },
689 { "medmid", CM_MEDMID },
690 { "medany", CM_MEDANY },
691 { "embmedany", CM_EMBMEDANY },
692 { NULL, (enum cmodel) 0 }
694 const struct code_model *cmodel;
695 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
696 static struct cpu_default {
698 const char *const name;
699 } const cpu_default[] = {
700 /* There must be one entry here for each TARGET_CPU value. */
701 { TARGET_CPU_sparc, "cypress" },
702 { TARGET_CPU_sparclet, "tsc701" },
703 { TARGET_CPU_sparclite, "f930" },
704 { TARGET_CPU_v8, "v8" },
705 { TARGET_CPU_hypersparc, "hypersparc" },
706 { TARGET_CPU_sparclite86x, "sparclite86x" },
707 { TARGET_CPU_supersparc, "supersparc" },
708 { TARGET_CPU_v9, "v9" },
709 { TARGET_CPU_ultrasparc, "ultrasparc" },
710 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
711 { TARGET_CPU_niagara, "niagara" },
712 { TARGET_CPU_niagara2, "niagara2" },
715 const struct cpu_default *def;
716 /* Table of values for -m{cpu,tune}=. */
717 static struct cpu_table {
718 const char *const name;
719 const enum processor_type processor;
722 } const cpu_table[] = {
723 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
724 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
725 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
726 /* TI TMS390Z55 supersparc */
727 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
728 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
729 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
730 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
731 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
732 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
733 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
734 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
736 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
738 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
739 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
740 /* TI ultrasparc I, II, IIi */
741 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
742 /* Although insns using %y are deprecated, it is a clear win on current
744 |MASK_DEPRECATED_V8_INSNS},
745 /* TI ultrasparc III */
746 /* ??? Check if %y issue still holds true in ultra3. */
747 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
749 { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
750 { "niagara2", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9},
751 { 0, (enum processor_type) 0, 0, 0 }
753 const struct cpu_table *cpu;
754 const struct sparc_cpu_select *sel;
757 #ifdef SUBTARGET_OVERRIDE_OPTIONS
758 SUBTARGET_OVERRIDE_OPTIONS;
761 #ifndef SPARC_BI_ARCH
762 /* Check for unsupported architecture size. */
763 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
764 error ("%s is not supported by this configuration",
765 DEFAULT_ARCH32_P ? "-m64" : "-m32");
768 /* We force all 64bit archs to use 128 bit long double */
769 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
771 error ("-mlong-double-64 not allowed with -m64");
772 target_flags |= MASK_LONG_DOUBLE_128;
775 /* Code model selection. */
776 sparc_cmodel = SPARC_DEFAULT_CMODEL;
780 sparc_cmodel = CM_32;
783 if (sparc_cmodel_string != NULL)
787 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
788 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
790 if (cmodel->name == NULL)
791 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
793 sparc_cmodel = cmodel->value;
796 error ("-mcmodel= is not supported on 32 bit systems");
799 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
801 /* Set the default CPU. */
802 for (def = &cpu_default[0]; def->name; ++def)
803 if (def->cpu == TARGET_CPU_DEFAULT)
805 gcc_assert (def->name);
806 sparc_select[0].string = def->name;
808 for (sel = &sparc_select[0]; sel->name; ++sel)
812 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
813 if (! strcmp (sel->string, cpu->name))
816 sparc_cpu = cpu->processor;
820 target_flags &= ~cpu->disable;
821 target_flags |= cpu->enable;
827 error ("bad value (%s) for %s switch", sel->string, sel->name);
831 /* If -mfpu or -mno-fpu was explicitly used, don't override with
832 the processor default. */
834 target_flags = (target_flags & ~MASK_FPU) | fpu;
836 /* Don't allow -mvis if FPU is disabled. */
838 target_flags &= ~MASK_VIS;
840 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
842 -m64 also implies v9. */
843 if (TARGET_VIS || TARGET_ARCH64)
845 target_flags |= MASK_V9;
846 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
849 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
850 if (TARGET_V9 && TARGET_ARCH32)
851 target_flags |= MASK_DEPRECATED_V8_INSNS;
853 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
854 if (! TARGET_V9 || TARGET_ARCH64)
855 target_flags &= ~MASK_V8PLUS;
857 /* Don't use stack biasing in 32 bit mode. */
859 target_flags &= ~MASK_STACK_BIAS;
861 /* Supply a default value for align_functions. */
862 if (align_functions == 0
863 && (sparc_cpu == PROCESSOR_ULTRASPARC
864 || sparc_cpu == PROCESSOR_ULTRASPARC3
865 || sparc_cpu == PROCESSOR_NIAGARA
866 || sparc_cpu == PROCESSOR_NIAGARA2))
867 align_functions = 32;
869 /* Validate PCC_STRUCT_RETURN. */
870 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
871 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
873 /* Only use .uaxword when compiling for a 64-bit target. */
875 targetm.asm_out.unaligned_op.di = NULL;
877 /* Do various machine dependent initializations. */
880 /* Acquire unique alias sets for our private stuff. */
881 sparc_sr_alias_set = new_alias_set ();
882 struct_value_alias_set = new_alias_set ();
884 /* Set up function hooks. */
885 init_machine_status = sparc_init_machine_status;
890 case PROCESSOR_CYPRESS:
891 sparc_costs = &cypress_costs;
894 case PROCESSOR_SPARCLITE:
895 case PROCESSOR_SUPERSPARC:
896 sparc_costs = &supersparc_costs;
900 case PROCESSOR_HYPERSPARC:
901 case PROCESSOR_SPARCLITE86X:
902 sparc_costs = &hypersparc_costs;
904 case PROCESSOR_SPARCLET:
905 case PROCESSOR_TSC701:
906 sparc_costs = &sparclet_costs;
909 case PROCESSOR_ULTRASPARC:
910 sparc_costs = &ultrasparc_costs;
912 case PROCESSOR_ULTRASPARC3:
913 sparc_costs = &ultrasparc3_costs;
915 case PROCESSOR_NIAGARA:
916 sparc_costs = &niagara_costs;
918 case PROCESSOR_NIAGARA2:
919 sparc_costs = &niagara2_costs;
923 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
924 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
925 target_flags |= MASK_LONG_DOUBLE_128;
928 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
929 ((sparc_cpu == PROCESSOR_ULTRASPARC
930 || sparc_cpu == PROCESSOR_NIAGARA
931 || sparc_cpu == PROCESSOR_NIAGARA2)
933 : (sparc_cpu == PROCESSOR_ULTRASPARC3
935 global_options.x_param_values,
936 global_options_set.x_param_values);
937 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
938 ((sparc_cpu == PROCESSOR_ULTRASPARC
939 || sparc_cpu == PROCESSOR_ULTRASPARC3
940 || sparc_cpu == PROCESSOR_NIAGARA
941 || sparc_cpu == PROCESSOR_NIAGARA2)
943 global_options.x_param_values,
944 global_options_set.x_param_values);
947 /* Miscellaneous utilities. */
949 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
950 or branch on register contents instructions. */
953 v9_regcmp_p (enum rtx_code code)
955 return (code == EQ || code == NE || code == GE || code == LT
956 || code == LE || code == GT);
959 /* Nonzero if OP is a floating point constant which can
960 be loaded into an integer register using a single
961 sethi instruction. */
966 if (GET_CODE (op) == CONST_DOUBLE)
971 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
972 REAL_VALUE_TO_TARGET_SINGLE (r, i);
973 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
979 /* Nonzero if OP is a floating point constant which can
980 be loaded into an integer register using a single
986 if (GET_CODE (op) == CONST_DOUBLE)
991 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
992 REAL_VALUE_TO_TARGET_SINGLE (r, i);
993 return SPARC_SIMM13_P (i);
999 /* Nonzero if OP is a floating point constant which can
1000 be loaded into an integer register using a high/losum
1001 instruction sequence. */
1004 fp_high_losum_p (rtx op)
1006 /* The constraints calling this should only be in
1007 SFmode move insns, so any constant which cannot
1008 be moved using a single insn will do. */
1009 if (GET_CODE (op) == CONST_DOUBLE)
1014 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1015 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1016 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
1022 /* Expand a move instruction. Return true if all work is done. */
1025 sparc_expand_move (enum machine_mode mode, rtx *operands)
1027 /* Handle sets of MEM first. */
1028 if (GET_CODE (operands[0]) == MEM)
1030 /* 0 is a register (or a pair of registers) on SPARC. */
1031 if (register_or_zero_operand (operands[1], mode))
1034 if (!reload_in_progress)
1036 operands[0] = validize_mem (operands[0]);
1037 operands[1] = force_reg (mode, operands[1]);
1041 /* Fixup TLS cases. */
1043 && CONSTANT_P (operands[1])
1044 && sparc_tls_referenced_p (operands [1]))
1046 operands[1] = sparc_legitimize_tls_address (operands[1]);
1050 /* Fixup PIC cases. */
1051 if (flag_pic && CONSTANT_P (operands[1]))
1053 if (pic_address_needs_scratch (operands[1]))
1054 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
1056 /* VxWorks does not impose a fixed gap between segments; the run-time
1057 gap can be different from the object-file gap. We therefore can't
1058 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1059 are absolutely sure that X is in the same segment as the GOT.
1060 Unfortunately, the flexibility of linker scripts means that we
1061 can't be sure of that in general, so assume that _G_O_T_-relative
1062 accesses are never valid on VxWorks. */
1063 if (GET_CODE (operands[1]) == LABEL_REF && !TARGET_VXWORKS_RTP)
1067 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1073 gcc_assert (TARGET_ARCH64);
1074 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1079 if (symbolic_operand (operands[1], mode))
1082 = sparc_legitimize_pic_address (operands[1],
1084 ? operands[0] : NULL_RTX);
1089 /* If we are trying to toss an integer constant into FP registers,
1090 or loading a FP or vector constant, force it into memory. */
1091 if (CONSTANT_P (operands[1])
1092 && REG_P (operands[0])
1093 && (SPARC_FP_REG_P (REGNO (operands[0]))
1094 || SCALAR_FLOAT_MODE_P (mode)
1095 || VECTOR_MODE_P (mode)))
1097 /* emit_group_store will send such bogosity to us when it is
1098 not storing directly into memory. So fix this up to avoid
1099 crashes in output_constant_pool. */
1100 if (operands [1] == const0_rtx)
1101 operands[1] = CONST0_RTX (mode);
1103 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1104 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1105 && const_zero_operand (operands[1], mode))
1108 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1109 /* We are able to build any SF constant in integer registers
1110 with at most 2 instructions. */
1112 /* And any DF constant in integer registers. */
1114 && (reload_completed || reload_in_progress))))
1117 operands[1] = force_const_mem (mode, operands[1]);
1118 if (!reload_in_progress)
1119 operands[1] = validize_mem (operands[1]);
1123 /* Accept non-constants and valid constants unmodified. */
1124 if (!CONSTANT_P (operands[1])
1125 || GET_CODE (operands[1]) == HIGH
1126 || input_operand (operands[1], mode))
1132 /* All QImode constants require only one insn, so proceed. */
1137 sparc_emit_set_const32 (operands[0], operands[1]);
1141 /* input_operand should have filtered out 32-bit mode. */
1142 sparc_emit_set_const64 (operands[0], operands[1]);
1152 /* Load OP1, a 32-bit constant, into OP0, a register.
1153 We know it can't be done in one insn when we get
1154 here, the move expander guarantees this. */
1157 sparc_emit_set_const32 (rtx op0, rtx op1)
1159 enum machine_mode mode = GET_MODE (op0);
1162 if (reload_in_progress || reload_completed)
1165 temp = gen_reg_rtx (mode);
1167 if (GET_CODE (op1) == CONST_INT)
1169 gcc_assert (!small_int_operand (op1, mode)
1170 && !const_high_operand (op1, mode));
1172 /* Emit them as real moves instead of a HIGH/LO_SUM,
1173 this way CSE can see everything and reuse intermediate
1174 values if it wants. */
1175 emit_insn (gen_rtx_SET (VOIDmode, temp,
1176 GEN_INT (INTVAL (op1)
1177 & ~(HOST_WIDE_INT)0x3ff)));
1179 emit_insn (gen_rtx_SET (VOIDmode,
1181 gen_rtx_IOR (mode, temp,
1182 GEN_INT (INTVAL (op1) & 0x3ff))));
1186 /* A symbol, emit in the traditional way. */
1187 emit_insn (gen_rtx_SET (VOIDmode, temp,
1188 gen_rtx_HIGH (mode, op1)));
1189 emit_insn (gen_rtx_SET (VOIDmode,
1190 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1194 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1195 If TEMP is nonzero, we are forbidden to use any other scratch
1196 registers. Otherwise, we are allowed to generate them as needed.
1198 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1199 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1202 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1204 rtx temp1, temp2, temp3, temp4, temp5;
1207 if (temp && GET_MODE (temp) == TImode)
1210 temp = gen_rtx_REG (DImode, REGNO (temp));
1213 /* SPARC-V9 code-model support. */
1214 switch (sparc_cmodel)
1217 /* The range spanned by all instructions in the object is less
1218 than 2^31 bytes (2GB) and the distance from any instruction
1219 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1220 than 2^31 bytes (2GB).
1222 The executable must be in the low 4TB of the virtual address
1225 sethi %hi(symbol), %temp1
1226 or %temp1, %lo(symbol), %reg */
1228 temp1 = temp; /* op0 is allowed. */
1230 temp1 = gen_reg_rtx (DImode);
1232 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1233 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1237 /* The range spanned by all instructions in the object is less
1238 than 2^31 bytes (2GB) and the distance from any instruction
1239 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1240 than 2^31 bytes (2GB).
1242 The executable must be in the low 16TB of the virtual address
1245 sethi %h44(symbol), %temp1
1246 or %temp1, %m44(symbol), %temp2
1247 sllx %temp2, 12, %temp3
1248 or %temp3, %l44(symbol), %reg */
1253 temp3 = temp; /* op0 is allowed. */
1257 temp1 = gen_reg_rtx (DImode);
1258 temp2 = gen_reg_rtx (DImode);
1259 temp3 = gen_reg_rtx (DImode);
1262 emit_insn (gen_seth44 (temp1, op1));
1263 emit_insn (gen_setm44 (temp2, temp1, op1));
1264 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1265 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1266 emit_insn (gen_setl44 (op0, temp3, op1));
1270 /* The range spanned by all instructions in the object is less
1271 than 2^31 bytes (2GB) and the distance from any instruction
1272 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1273 than 2^31 bytes (2GB).
1275 The executable can be placed anywhere in the virtual address
1278 sethi %hh(symbol), %temp1
1279 sethi %lm(symbol), %temp2
1280 or %temp1, %hm(symbol), %temp3
1281 sllx %temp3, 32, %temp4
1282 or %temp4, %temp2, %temp5
1283 or %temp5, %lo(symbol), %reg */
1286 /* It is possible that one of the registers we got for operands[2]
1287 might coincide with that of operands[0] (which is why we made
1288 it TImode). Pick the other one to use as our scratch. */
1289 if (rtx_equal_p (temp, op0))
1291 gcc_assert (ti_temp);
1292 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1295 temp2 = temp; /* op0 is _not_ allowed, see above. */
1302 temp1 = gen_reg_rtx (DImode);
1303 temp2 = gen_reg_rtx (DImode);
1304 temp3 = gen_reg_rtx (DImode);
1305 temp4 = gen_reg_rtx (DImode);
1306 temp5 = gen_reg_rtx (DImode);
1309 emit_insn (gen_sethh (temp1, op1));
1310 emit_insn (gen_setlm (temp2, op1));
1311 emit_insn (gen_sethm (temp3, temp1, op1));
1312 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1313 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1314 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1315 gen_rtx_PLUS (DImode, temp4, temp2)));
1316 emit_insn (gen_setlo (op0, temp5, op1));
1320 /* Old old old backwards compatibility kruft here.
1321 Essentially it is MEDLOW with a fixed 64-bit
1322 virtual base added to all data segment addresses.
1323 Text-segment stuff is computed like MEDANY, we can't
1324 reuse the code above because the relocation knobs
1327 Data segment: sethi %hi(symbol), %temp1
1328 add %temp1, EMBMEDANY_BASE_REG, %temp2
1329 or %temp2, %lo(symbol), %reg */
1330 if (data_segment_operand (op1, GET_MODE (op1)))
1334 temp1 = temp; /* op0 is allowed. */
1339 temp1 = gen_reg_rtx (DImode);
1340 temp2 = gen_reg_rtx (DImode);
1343 emit_insn (gen_embmedany_sethi (temp1, op1));
1344 emit_insn (gen_embmedany_brsum (temp2, temp1));
1345 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1348 /* Text segment: sethi %uhi(symbol), %temp1
1349 sethi %hi(symbol), %temp2
1350 or %temp1, %ulo(symbol), %temp3
1351 sllx %temp3, 32, %temp4
1352 or %temp4, %temp2, %temp5
1353 or %temp5, %lo(symbol), %reg */
1358 /* It is possible that one of the registers we got for operands[2]
1359 might coincide with that of operands[0] (which is why we made
1360 it TImode). Pick the other one to use as our scratch. */
1361 if (rtx_equal_p (temp, op0))
1363 gcc_assert (ti_temp);
1364 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1367 temp2 = temp; /* op0 is _not_ allowed, see above. */
1374 temp1 = gen_reg_rtx (DImode);
1375 temp2 = gen_reg_rtx (DImode);
1376 temp3 = gen_reg_rtx (DImode);
1377 temp4 = gen_reg_rtx (DImode);
1378 temp5 = gen_reg_rtx (DImode);
1381 emit_insn (gen_embmedany_textuhi (temp1, op1));
1382 emit_insn (gen_embmedany_texthi (temp2, op1));
1383 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1384 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1385 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1386 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1387 gen_rtx_PLUS (DImode, temp4, temp2)));
1388 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1397 #if HOST_BITS_PER_WIDE_INT == 32
1399 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1404 /* These avoid problems when cross compiling. If we do not
1405 go through all this hair then the optimizer will see
1406 invalid REG_EQUAL notes or in some cases none at all. */
1407 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1408 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1409 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1410 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1412 /* The optimizer is not to assume anything about exactly
1413 which bits are set for a HIGH, they are unspecified.
1414 Unfortunately this leads to many missed optimizations
1415 during CSE. We mask out the non-HIGH bits, and matches
1416 a plain movdi, to alleviate this problem. */
1418 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1420 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1424 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1426 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1430 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1432 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1436 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1438 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1441 /* Worker routines for 64-bit constant formation on arch64.
1442 One of the key things to be doing in these emissions is
1443 to create as many temp REGs as possible. This makes it
1444 possible for half-built constants to be used later when
1445 such values are similar to something required later on.
1446 Without doing this, the optimizer cannot see such
1449 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1450 unsigned HOST_WIDE_INT, int);
1453 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1454 unsigned HOST_WIDE_INT low_bits, int is_neg)
1456 unsigned HOST_WIDE_INT high_bits;
1459 high_bits = (~low_bits) & 0xffffffff;
1461 high_bits = low_bits;
1463 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1466 emit_insn (gen_rtx_SET (VOIDmode, op0,
1467 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1471 /* If we are XOR'ing with -1, then we should emit a one's complement
1472 instead. This way the combiner will notice logical operations
1473 such as ANDN later on and substitute. */
1474 if ((low_bits & 0x3ff) == 0x3ff)
1476 emit_insn (gen_rtx_SET (VOIDmode, op0,
1477 gen_rtx_NOT (DImode, temp)));
1481 emit_insn (gen_rtx_SET (VOIDmode, op0,
1482 gen_safe_XOR64 (temp,
1483 (-(HOST_WIDE_INT)0x400
1484 | (low_bits & 0x3ff)))));
1489 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1490 unsigned HOST_WIDE_INT, int);
1493 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1494 unsigned HOST_WIDE_INT high_bits,
1495 unsigned HOST_WIDE_INT low_immediate,
1500 if ((high_bits & 0xfffffc00) != 0)
1502 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1503 if ((high_bits & ~0xfffffc00) != 0)
1504 emit_insn (gen_rtx_SET (VOIDmode, op0,
1505 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1511 emit_insn (gen_safe_SET64 (temp, high_bits));
1515 /* Now shift it up into place. */
1516 emit_insn (gen_rtx_SET (VOIDmode, op0,
1517 gen_rtx_ASHIFT (DImode, temp2,
1518 GEN_INT (shift_count))));
1520 /* If there is a low immediate part piece, finish up by
1521 putting that in as well. */
1522 if (low_immediate != 0)
1523 emit_insn (gen_rtx_SET (VOIDmode, op0,
1524 gen_safe_OR64 (op0, low_immediate)));
1527 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1528 unsigned HOST_WIDE_INT);
1530 /* Full 64-bit constant decomposition. Even though this is the
1531 'worst' case, we still optimize a few things away. */
1533 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1534 unsigned HOST_WIDE_INT high_bits,
1535 unsigned HOST_WIDE_INT low_bits)
1539 if (reload_in_progress || reload_completed)
1542 sub_temp = gen_reg_rtx (DImode);
1544 if ((high_bits & 0xfffffc00) != 0)
1546 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1547 if ((high_bits & ~0xfffffc00) != 0)
1548 emit_insn (gen_rtx_SET (VOIDmode,
1550 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1556 emit_insn (gen_safe_SET64 (temp, high_bits));
1560 if (!reload_in_progress && !reload_completed)
1562 rtx temp2 = gen_reg_rtx (DImode);
1563 rtx temp3 = gen_reg_rtx (DImode);
1564 rtx temp4 = gen_reg_rtx (DImode);
1566 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1567 gen_rtx_ASHIFT (DImode, sub_temp,
1570 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1571 if ((low_bits & ~0xfffffc00) != 0)
1573 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1574 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1575 emit_insn (gen_rtx_SET (VOIDmode, op0,
1576 gen_rtx_PLUS (DImode, temp4, temp3)));
1580 emit_insn (gen_rtx_SET (VOIDmode, op0,
1581 gen_rtx_PLUS (DImode, temp4, temp2)));
1586 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1587 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1588 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1591 /* We are in the middle of reload, so this is really
1592 painful. However we do still make an attempt to
1593 avoid emitting truly stupid code. */
1594 if (low1 != const0_rtx)
1596 emit_insn (gen_rtx_SET (VOIDmode, op0,
1597 gen_rtx_ASHIFT (DImode, sub_temp,
1598 GEN_INT (to_shift))));
1599 emit_insn (gen_rtx_SET (VOIDmode, op0,
1600 gen_rtx_IOR (DImode, op0, low1)));
1608 if (low2 != const0_rtx)
1610 emit_insn (gen_rtx_SET (VOIDmode, op0,
1611 gen_rtx_ASHIFT (DImode, sub_temp,
1612 GEN_INT (to_shift))));
1613 emit_insn (gen_rtx_SET (VOIDmode, op0,
1614 gen_rtx_IOR (DImode, op0, low2)));
1622 emit_insn (gen_rtx_SET (VOIDmode, op0,
1623 gen_rtx_ASHIFT (DImode, sub_temp,
1624 GEN_INT (to_shift))));
1625 if (low3 != const0_rtx)
1626 emit_insn (gen_rtx_SET (VOIDmode, op0,
1627 gen_rtx_IOR (DImode, op0, low3)));
1632 /* Analyze a 64-bit constant for certain properties. */
1633 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1634 unsigned HOST_WIDE_INT,
1635 int *, int *, int *);
1638 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1639 unsigned HOST_WIDE_INT low_bits,
1640 int *hbsp, int *lbsp, int *abbasp)
1642 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1645 lowest_bit_set = highest_bit_set = -1;
1649 if ((lowest_bit_set == -1)
1650 && ((low_bits >> i) & 1))
1652 if ((highest_bit_set == -1)
1653 && ((high_bits >> (32 - i - 1)) & 1))
1654 highest_bit_set = (64 - i - 1);
1657 && ((highest_bit_set == -1)
1658 || (lowest_bit_set == -1)));
1664 if ((lowest_bit_set == -1)
1665 && ((high_bits >> i) & 1))
1666 lowest_bit_set = i + 32;
1667 if ((highest_bit_set == -1)
1668 && ((low_bits >> (32 - i - 1)) & 1))
1669 highest_bit_set = 32 - i - 1;
1672 && ((highest_bit_set == -1)
1673 || (lowest_bit_set == -1)));
1675 /* If there are no bits set this should have gone out
1676 as one instruction! */
1677 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1678 all_bits_between_are_set = 1;
1679 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1683 if ((low_bits & (1 << i)) != 0)
1688 if ((high_bits & (1 << (i - 32))) != 0)
1691 all_bits_between_are_set = 0;
1694 *hbsp = highest_bit_set;
1695 *lbsp = lowest_bit_set;
1696 *abbasp = all_bits_between_are_set;
1699 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1702 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1703 unsigned HOST_WIDE_INT low_bits)
1705 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1708 || high_bits == 0xffffffff)
1711 analyze_64bit_constant (high_bits, low_bits,
1712 &highest_bit_set, &lowest_bit_set,
1713 &all_bits_between_are_set);
1715 if ((highest_bit_set == 63
1716 || lowest_bit_set == 0)
1717 && all_bits_between_are_set != 0)
1720 if ((highest_bit_set - lowest_bit_set) < 21)
1726 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1727 unsigned HOST_WIDE_INT,
1730 static unsigned HOST_WIDE_INT
1731 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1732 unsigned HOST_WIDE_INT low_bits,
1733 int lowest_bit_set, int shift)
1735 HOST_WIDE_INT hi, lo;
1737 if (lowest_bit_set < 32)
1739 lo = (low_bits >> lowest_bit_set) << shift;
1740 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1745 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1747 gcc_assert (! (hi & lo));
1751 /* Here we are sure to be arch64 and this is an integer constant
1752 being loaded into a register. Emit the most efficient
1753 insn sequence possible. Detection of all the 1-insn cases
1754 has been done already. */
1756 sparc_emit_set_const64 (rtx op0, rtx op1)
1758 unsigned HOST_WIDE_INT high_bits, low_bits;
1759 int lowest_bit_set, highest_bit_set;
1760 int all_bits_between_are_set;
1763 /* Sanity check that we know what we are working with. */
1764 gcc_assert (TARGET_ARCH64
1765 && (GET_CODE (op0) == SUBREG
1766 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1768 if (reload_in_progress || reload_completed)
1771 if (GET_CODE (op1) != CONST_INT)
1773 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1778 temp = gen_reg_rtx (DImode);
1780 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1781 low_bits = (INTVAL (op1) & 0xffffffff);
1783 /* low_bits bits 0 --> 31
1784 high_bits bits 32 --> 63 */
1786 analyze_64bit_constant (high_bits, low_bits,
1787 &highest_bit_set, &lowest_bit_set,
1788 &all_bits_between_are_set);
1790 /* First try for a 2-insn sequence. */
1792 /* These situations are preferred because the optimizer can
1793 * do more things with them:
1795 * sllx %reg, shift, %reg
1797 * srlx %reg, shift, %reg
1798 * 3) mov some_small_const, %reg
1799 * sllx %reg, shift, %reg
1801 if (((highest_bit_set == 63
1802 || lowest_bit_set == 0)
1803 && all_bits_between_are_set != 0)
1804 || ((highest_bit_set - lowest_bit_set) < 12))
1806 HOST_WIDE_INT the_const = -1;
1807 int shift = lowest_bit_set;
1809 if ((highest_bit_set != 63
1810 && lowest_bit_set != 0)
1811 || all_bits_between_are_set == 0)
1814 create_simple_focus_bits (high_bits, low_bits,
1817 else if (lowest_bit_set == 0)
1818 shift = -(63 - highest_bit_set);
1820 gcc_assert (SPARC_SIMM13_P (the_const));
1821 gcc_assert (shift != 0);
1823 emit_insn (gen_safe_SET64 (temp, the_const));
1825 emit_insn (gen_rtx_SET (VOIDmode,
1827 gen_rtx_ASHIFT (DImode,
1831 emit_insn (gen_rtx_SET (VOIDmode,
1833 gen_rtx_LSHIFTRT (DImode,
1835 GEN_INT (-shift))));
1839 /* Now a range of 22 or less bits set somewhere.
1840 * 1) sethi %hi(focus_bits), %reg
1841 * sllx %reg, shift, %reg
1842 * 2) sethi %hi(focus_bits), %reg
1843 * srlx %reg, shift, %reg
1845 if ((highest_bit_set - lowest_bit_set) < 21)
1847 unsigned HOST_WIDE_INT focus_bits =
1848 create_simple_focus_bits (high_bits, low_bits,
1849 lowest_bit_set, 10);
1851 gcc_assert (SPARC_SETHI_P (focus_bits));
1852 gcc_assert (lowest_bit_set != 10);
1854 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1856 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1857 if (lowest_bit_set < 10)
1858 emit_insn (gen_rtx_SET (VOIDmode,
1860 gen_rtx_LSHIFTRT (DImode, temp,
1861 GEN_INT (10 - lowest_bit_set))));
1862 else if (lowest_bit_set > 10)
1863 emit_insn (gen_rtx_SET (VOIDmode,
1865 gen_rtx_ASHIFT (DImode, temp,
1866 GEN_INT (lowest_bit_set - 10))));
1870 /* 1) sethi %hi(low_bits), %reg
1871 * or %reg, %lo(low_bits), %reg
1872 * 2) sethi %hi(~low_bits), %reg
1873 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1876 || high_bits == 0xffffffff)
1878 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1879 (high_bits == 0xffffffff));
1883 /* Now, try 3-insn sequences. */
1885 /* 1) sethi %hi(high_bits), %reg
1886 * or %reg, %lo(high_bits), %reg
1887 * sllx %reg, 32, %reg
1891 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1895 /* We may be able to do something quick
1896 when the constant is negated, so try that. */
1897 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1898 (~low_bits) & 0xfffffc00))
1900 /* NOTE: The trailing bits get XOR'd so we need the
1901 non-negated bits, not the negated ones. */
1902 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1904 if ((((~high_bits) & 0xffffffff) == 0
1905 && ((~low_bits) & 0x80000000) == 0)
1906 || (((~high_bits) & 0xffffffff) == 0xffffffff
1907 && ((~low_bits) & 0x80000000) != 0))
1909 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1911 if ((SPARC_SETHI_P (fast_int)
1912 && (~high_bits & 0xffffffff) == 0)
1913 || SPARC_SIMM13_P (fast_int))
1914 emit_insn (gen_safe_SET64 (temp, fast_int));
1916 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1921 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1922 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1923 sparc_emit_set_const64 (temp, negated_const);
1926 /* If we are XOR'ing with -1, then we should emit a one's complement
1927 instead. This way the combiner will notice logical operations
1928 such as ANDN later on and substitute. */
1929 if (trailing_bits == 0x3ff)
1931 emit_insn (gen_rtx_SET (VOIDmode, op0,
1932 gen_rtx_NOT (DImode, temp)));
1936 emit_insn (gen_rtx_SET (VOIDmode,
1938 gen_safe_XOR64 (temp,
1939 (-0x400 | trailing_bits))));
1944 /* 1) sethi %hi(xxx), %reg
1945 * or %reg, %lo(xxx), %reg
1946 * sllx %reg, yyy, %reg
1948 * ??? This is just a generalized version of the low_bits==0
1949 * thing above, FIXME...
1951 if ((highest_bit_set - lowest_bit_set) < 32)
1953 unsigned HOST_WIDE_INT focus_bits =
1954 create_simple_focus_bits (high_bits, low_bits,
1957 /* We can't get here in this state. */
1958 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1960 /* So what we know is that the set bits straddle the
1961 middle of the 64-bit word. */
1962 sparc_emit_set_const64_quick2 (op0, temp,
1968 /* 1) sethi %hi(high_bits), %reg
1969 * or %reg, %lo(high_bits), %reg
1970 * sllx %reg, 32, %reg
1971 * or %reg, low_bits, %reg
1973 if (SPARC_SIMM13_P(low_bits)
1974 && ((int)low_bits > 0))
1976 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1980 /* The easiest way when all else fails, is full decomposition. */
1981 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1983 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1985 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1986 return the mode to be used for the comparison. For floating-point,
1987 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1988 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1989 processing is needed. */
1992 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1994 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2020 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2021 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2023 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2024 return CCX_NOOVmode;
2030 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2037 /* Emit the compare insn and return the CC reg for a CODE comparison
2038 with operands X and Y. */
2041 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2043 enum machine_mode mode;
2046 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2049 mode = SELECT_CC_MODE (code, x, y);
2051 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2052 fcc regs (cse can't tell they're really call clobbered regs and will
2053 remove a duplicate comparison even if there is an intervening function
2054 call - it will then try to reload the cc reg via an int reg which is why
2055 we need the movcc patterns). It is possible to provide the movcc
2056 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2057 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2058 to tell cse that CCFPE mode registers (even pseudos) are call
2061 /* ??? This is an experiment. Rather than making changes to cse which may
2062 or may not be easy/clean, we do our own cse. This is possible because
2063 we will generate hard registers. Cse knows they're call clobbered (it
2064 doesn't know the same thing about pseudos). If we guess wrong, no big
2065 deal, but if we win, great! */
2067 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2068 #if 1 /* experiment */
2071 /* We cycle through the registers to ensure they're all exercised. */
2072 static int next_fcc_reg = 0;
2073 /* Previous x,y for each fcc reg. */
2074 static rtx prev_args[4][2];
2076 /* Scan prev_args for x,y. */
2077 for (reg = 0; reg < 4; reg++)
2078 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2083 prev_args[reg][0] = x;
2084 prev_args[reg][1] = y;
2085 next_fcc_reg = (next_fcc_reg + 1) & 3;
2087 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2090 cc_reg = gen_reg_rtx (mode);
2091 #endif /* ! experiment */
2092 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2093 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2095 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2097 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2098 will only result in an unrecognizable insn so no point in asserting. */
2099 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2105 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2108 gen_compare_reg (rtx cmp)
2110 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2113 /* This function is used for v9 only.
2114 DEST is the target of the Scc insn.
2115 CODE is the code for an Scc's comparison.
2116 X and Y are the values we compare.
2118 This function is needed to turn
2121 (gt (reg:CCX 100 %icc)
2125 (gt:DI (reg:CCX 100 %icc)
2128 IE: The instruction recognizer needs to see the mode of the comparison to
2129 find the right instruction. We could use "gt:DI" right in the
2130 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2133 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2136 && (GET_MODE (x) == DImode
2137 || GET_MODE (dest) == DImode))
2140 /* Try to use the movrCC insns. */
2142 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2144 && v9_regcmp_p (compare_code))
2149 /* Special case for op0 != 0. This can be done with one instruction if
2152 if (compare_code == NE
2153 && GET_MODE (dest) == DImode
2154 && rtx_equal_p (op0, dest))
2156 emit_insn (gen_rtx_SET (VOIDmode, dest,
2157 gen_rtx_IF_THEN_ELSE (DImode,
2158 gen_rtx_fmt_ee (compare_code, DImode,
2165 if (reg_overlap_mentioned_p (dest, op0))
2167 /* Handle the case where dest == x.
2168 We "early clobber" the result. */
2169 op0 = gen_reg_rtx (GET_MODE (x));
2170 emit_move_insn (op0, x);
2173 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2174 if (GET_MODE (op0) != DImode)
2176 temp = gen_reg_rtx (DImode);
2177 convert_move (temp, op0, 0);
2181 emit_insn (gen_rtx_SET (VOIDmode, dest,
2182 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2183 gen_rtx_fmt_ee (compare_code, DImode,
2191 x = gen_compare_reg_1 (compare_code, x, y);
2194 gcc_assert (GET_MODE (x) != CC_NOOVmode
2195 && GET_MODE (x) != CCX_NOOVmode);
2197 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2198 emit_insn (gen_rtx_SET (VOIDmode, dest,
2199 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2200 gen_rtx_fmt_ee (compare_code,
2201 GET_MODE (x), x, y),
2202 const1_rtx, dest)));
2208 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2209 without jumps using the addx/subx instructions. */
2212 emit_scc_insn (rtx operands[])
2219 /* The quad-word fp compare library routines all return nonzero to indicate
2220 true, which is different from the equivalent libgcc routines, so we must
2221 handle them specially here. */
2222 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2224 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2225 GET_CODE (operands[1]));
2226 operands[2] = XEXP (operands[1], 0);
2227 operands[3] = XEXP (operands[1], 1);
2230 code = GET_CODE (operands[1]);
2234 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2235 more applications). The exception to this is "reg != 0" which can
2236 be done in one instruction on v9 (so we do it). */
2239 if (GET_MODE (x) == SImode)
2241 rtx pat = gen_seqsi_special (operands[0], x, y);
2245 else if (GET_MODE (x) == DImode)
2247 rtx pat = gen_seqdi_special (operands[0], x, y);
2255 if (GET_MODE (x) == SImode)
2257 rtx pat = gen_snesi_special (operands[0], x, y);
2261 else if (GET_MODE (x) == DImode)
2263 rtx pat = gen_snedi_special (operands[0], x, y);
2269 /* For the rest, on v9 we can use conditional moves. */
2273 if (gen_v9_scc (operands[0], code, x, y))
2277 /* We can do LTU and GEU using the addx/subx instructions too. And
2278 for GTU/LEU, if both operands are registers swap them and fall
2279 back to the easy case. */
2280 if (code == GTU || code == LEU)
2282 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2283 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2288 code = swap_condition (code);
2292 if (code == LTU || code == GEU)
2294 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2295 gen_rtx_fmt_ee (code, SImode,
2296 gen_compare_reg_1 (code, x, y),
2301 /* Nope, do branches. */
2305 /* Emit a conditional jump insn for the v9 architecture using comparison code
2306 CODE and jump target LABEL.
2307 This function exists to take advantage of the v9 brxx insns. */
2310 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2312 emit_jump_insn (gen_rtx_SET (VOIDmode,
2314 gen_rtx_IF_THEN_ELSE (VOIDmode,
2315 gen_rtx_fmt_ee (code, GET_MODE (op0),
2317 gen_rtx_LABEL_REF (VOIDmode, label),
2322 emit_conditional_branch_insn (rtx operands[])
2324 /* The quad-word fp compare library routines all return nonzero to indicate
2325 true, which is different from the equivalent libgcc routines, so we must
2326 handle them specially here. */
2327 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2329 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2330 GET_CODE (operands[0]));
2331 operands[1] = XEXP (operands[0], 0);
2332 operands[2] = XEXP (operands[0], 1);
2335 if (TARGET_ARCH64 && operands[2] == const0_rtx
2336 && GET_CODE (operands[1]) == REG
2337 && GET_MODE (operands[1]) == DImode)
2339 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2343 operands[1] = gen_compare_reg (operands[0]);
2344 operands[2] = const0_rtx;
2345 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2346 operands[1], operands[2]);
2347 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2352 /* Generate a DFmode part of a hard TFmode register.
2353 REG is the TFmode hard register, LOW is 1 for the
2354 low 64bit of the register and 0 otherwise.
2357 gen_df_reg (rtx reg, int low)
2359 int regno = REGNO (reg);
2361 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2362 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2363 return gen_rtx_REG (DFmode, regno);
2366 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2367 Unlike normal calls, TFmode operands are passed by reference. It is
2368 assumed that no more than 3 operands are required. */
2371 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2373 rtx ret_slot = NULL, arg[3], func_sym;
2376 /* We only expect to be called for conversions, unary, and binary ops. */
2377 gcc_assert (nargs == 2 || nargs == 3);
2379 for (i = 0; i < nargs; ++i)
2381 rtx this_arg = operands[i];
2384 /* TFmode arguments and return values are passed by reference. */
2385 if (GET_MODE (this_arg) == TFmode)
2387 int force_stack_temp;
2389 force_stack_temp = 0;
2390 if (TARGET_BUGGY_QP_LIB && i == 0)
2391 force_stack_temp = 1;
2393 if (GET_CODE (this_arg) == MEM
2394 && ! force_stack_temp)
2395 this_arg = XEXP (this_arg, 0);
2396 else if (CONSTANT_P (this_arg)
2397 && ! force_stack_temp)
2399 this_slot = force_const_mem (TFmode, this_arg);
2400 this_arg = XEXP (this_slot, 0);
2404 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2406 /* Operand 0 is the return value. We'll copy it out later. */
2408 emit_move_insn (this_slot, this_arg);
2410 ret_slot = this_slot;
2412 this_arg = XEXP (this_slot, 0);
2419 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2421 if (GET_MODE (operands[0]) == TFmode)
2424 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2425 arg[0], GET_MODE (arg[0]),
2426 arg[1], GET_MODE (arg[1]));
2428 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2429 arg[0], GET_MODE (arg[0]),
2430 arg[1], GET_MODE (arg[1]),
2431 arg[2], GET_MODE (arg[2]));
2434 emit_move_insn (operands[0], ret_slot);
2440 gcc_assert (nargs == 2);
2442 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2443 GET_MODE (operands[0]), 1,
2444 arg[1], GET_MODE (arg[1]));
2446 if (ret != operands[0])
2447 emit_move_insn (operands[0], ret);
2451 /* Expand soft-float TFmode calls to sparc abi routines. */
2454 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2476 emit_soft_tfmode_libcall (func, 3, operands);
2480 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2484 gcc_assert (code == SQRT);
2487 emit_soft_tfmode_libcall (func, 2, operands);
2491 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2498 switch (GET_MODE (operands[1]))
2511 case FLOAT_TRUNCATE:
2512 switch (GET_MODE (operands[0]))
2526 switch (GET_MODE (operands[1]))
2531 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2541 case UNSIGNED_FLOAT:
2542 switch (GET_MODE (operands[1]))
2547 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2558 switch (GET_MODE (operands[0]))
2572 switch (GET_MODE (operands[0]))
2589 emit_soft_tfmode_libcall (func, 2, operands);
2592 /* Expand a hard-float tfmode operation. All arguments must be in
2596 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2600 if (GET_RTX_CLASS (code) == RTX_UNARY)
2602 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2603 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2607 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2608 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2609 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2610 operands[1], operands[2]);
2613 if (register_operand (operands[0], VOIDmode))
2616 dest = gen_reg_rtx (GET_MODE (operands[0]));
2618 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2620 if (dest != operands[0])
2621 emit_move_insn (operands[0], dest);
2625 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2627 if (TARGET_HARD_QUAD)
2628 emit_hard_tfmode_operation (code, operands);
2630 emit_soft_tfmode_binop (code, operands);
2634 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2636 if (TARGET_HARD_QUAD)
2637 emit_hard_tfmode_operation (code, operands);
2639 emit_soft_tfmode_unop (code, operands);
2643 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2645 if (TARGET_HARD_QUAD)
2646 emit_hard_tfmode_operation (code, operands);
2648 emit_soft_tfmode_cvt (code, operands);
2651 /* Return nonzero if a branch/jump/call instruction will be emitting
2652 nop into its delay slot. */
2655 empty_delay_slot (rtx insn)
2659 /* If no previous instruction (should not happen), return true. */
2660 if (PREV_INSN (insn) == NULL)
2663 seq = NEXT_INSN (PREV_INSN (insn));
2664 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2670 /* Return nonzero if TRIAL can go into the call delay slot. */
2673 tls_call_delay (rtx trial)
2678 call __tls_get_addr, %tgd_call (foo)
2679 add %l7, %o0, %o0, %tgd_add (foo)
2680 while Sun as/ld does not. */
2681 if (TARGET_GNU_TLS || !TARGET_TLS)
2684 pat = PATTERN (trial);
2686 /* We must reject tgd_add{32|64}, i.e.
2687 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2688 and tldm_add{32|64}, i.e.
2689 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2691 if (GET_CODE (pat) == SET
2692 && GET_CODE (SET_SRC (pat)) == PLUS)
2694 rtx unspec = XEXP (SET_SRC (pat), 1);
2696 if (GET_CODE (unspec) == UNSPEC
2697 && (XINT (unspec, 1) == UNSPEC_TLSGD
2698 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2705 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2706 instruction. RETURN_P is true if the v9 variant 'return' is to be
2707 considered in the test too.
2709 TRIAL must be a SET whose destination is a REG appropriate for the
2710 'restore' instruction or, if RETURN_P is true, for the 'return'
2714 eligible_for_restore_insn (rtx trial, bool return_p)
2716 rtx pat = PATTERN (trial);
2717 rtx src = SET_SRC (pat);
2719 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2720 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2721 && arith_operand (src, GET_MODE (src)))
2724 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2726 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2729 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2730 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2731 && arith_double_operand (src, GET_MODE (src)))
2732 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2734 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2735 else if (! TARGET_FPU && register_operand (src, SFmode))
2738 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2739 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2742 /* If we have the 'return' instruction, anything that does not use
2743 local or output registers and can go into a delay slot wins. */
2744 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2745 && (get_attr_in_uncond_branch_delay (trial)
2746 == IN_UNCOND_BRANCH_DELAY_TRUE))
2749 /* The 'restore src1,src2,dest' pattern for SImode. */
2750 else if (GET_CODE (src) == PLUS
2751 && register_operand (XEXP (src, 0), SImode)
2752 && arith_operand (XEXP (src, 1), SImode))
2755 /* The 'restore src1,src2,dest' pattern for DImode. */
2756 else if (GET_CODE (src) == PLUS
2757 && register_operand (XEXP (src, 0), DImode)
2758 && arith_double_operand (XEXP (src, 1), DImode))
2761 /* The 'restore src1,%lo(src2),dest' pattern. */
2762 else if (GET_CODE (src) == LO_SUM
2763 && ! TARGET_CM_MEDMID
2764 && ((register_operand (XEXP (src, 0), SImode)
2765 && immediate_operand (XEXP (src, 1), SImode))
2767 && register_operand (XEXP (src, 0), DImode)
2768 && immediate_operand (XEXP (src, 1), DImode))))
2771 /* The 'restore src,src,dest' pattern. */
2772 else if (GET_CODE (src) == ASHIFT
2773 && (register_operand (XEXP (src, 0), SImode)
2774 || register_operand (XEXP (src, 0), DImode))
2775 && XEXP (src, 1) == const1_rtx)
2781 /* Return nonzero if TRIAL can go into the function return's
2785 eligible_for_return_delay (rtx trial)
2789 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2792 if (get_attr_length (trial) != 1)
2795 /* If there are any call-saved registers, we should scan TRIAL if it
2796 does not reference them. For now just make it easy. */
2800 /* If the function uses __builtin_eh_return, the eh_return machinery
2801 occupies the delay slot. */
2802 if (crtl->calls_eh_return)
2805 /* In the case of a true leaf function, anything can go into the slot. */
2806 if (sparc_leaf_function_p)
2807 return get_attr_in_uncond_branch_delay (trial)
2808 == IN_UNCOND_BRANCH_DELAY_TRUE;
2810 pat = PATTERN (trial);
2812 /* Otherwise, only operations which can be done in tandem with
2813 a `restore' or `return' insn can go into the delay slot. */
2814 if (GET_CODE (SET_DEST (pat)) != REG
2815 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2818 /* If this instruction sets up floating point register and we have a return
2819 instruction, it can probably go in. But restore will not work
2821 if (REGNO (SET_DEST (pat)) >= 32)
2823 && ! epilogue_renumber (&pat, 1)
2824 && (get_attr_in_uncond_branch_delay (trial)
2825 == IN_UNCOND_BRANCH_DELAY_TRUE));
2827 return eligible_for_restore_insn (trial, true);
2830 /* Return nonzero if TRIAL can go into the sibling call's
2834 eligible_for_sibcall_delay (rtx trial)
2838 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2841 if (get_attr_length (trial) != 1)
2844 pat = PATTERN (trial);
2846 if (sparc_leaf_function_p)
2848 /* If the tail call is done using the call instruction,
2849 we have to restore %o7 in the delay slot. */
2850 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2853 /* %g1 is used to build the function address */
2854 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2860 /* Otherwise, only operations which can be done in tandem with
2861 a `restore' insn can go into the delay slot. */
2862 if (GET_CODE (SET_DEST (pat)) != REG
2863 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2864 || REGNO (SET_DEST (pat)) >= 32)
2867 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2869 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2872 return eligible_for_restore_insn (trial, false);
2876 short_branch (int uid1, int uid2)
2878 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2880 /* Leave a few words of "slop". */
2881 if (delta >= -1023 && delta <= 1022)
2887 /* Return nonzero if REG is not used after INSN.
2888 We assume REG is a reload reg, and therefore does
2889 not live past labels or calls or jumps. */
2891 reg_unused_after (rtx reg, rtx insn)
2893 enum rtx_code code, prev_code = UNKNOWN;
2895 while ((insn = NEXT_INSN (insn)))
2897 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2900 code = GET_CODE (insn);
2901 if (GET_CODE (insn) == CODE_LABEL)
2906 rtx set = single_set (insn);
2907 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2910 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2912 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2920 /* Determine if it's legal to put X into the constant pool. This
2921 is not possible if X contains the address of a symbol that is
2922 not constant (TLS) or not known at final link time (PIC). */
2925 sparc_cannot_force_const_mem (rtx x)
2927 switch (GET_CODE (x))
2932 /* Accept all non-symbolic constants. */
2936 /* Labels are OK iff we are non-PIC. */
2937 return flag_pic != 0;
2940 /* 'Naked' TLS symbol references are never OK,
2941 non-TLS symbols are OK iff we are non-PIC. */
2942 if (SYMBOL_REF_TLS_MODEL (x))
2945 return flag_pic != 0;
2948 return sparc_cannot_force_const_mem (XEXP (x, 0));
2951 return sparc_cannot_force_const_mem (XEXP (x, 0))
2952 || sparc_cannot_force_const_mem (XEXP (x, 1));
2961 static GTY(()) bool pic_helper_needed = false;
2962 static GTY(()) rtx pic_helper_symbol;
2963 static GTY(()) rtx global_offset_table;
2965 /* Ensure that we are not using patterns that are not OK with PIC. */
2973 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2974 && (GET_CODE (recog_data.operand[i]) != CONST
2975 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2976 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2977 == global_offset_table)
2978 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2986 /* Return true if X is an address which needs a temporary register when
2987 reloaded while generating PIC code. */
2990 pic_address_needs_scratch (rtx x)
2992 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2993 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2994 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2995 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2996 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
3002 /* Determine if a given RTX is a valid constant. We already know this
3003 satisfies CONSTANT_P. */
3006 legitimate_constant_p (rtx x)
3008 switch (GET_CODE (x))
3012 if (sparc_tls_referenced_p (x))
3017 if (GET_MODE (x) == VOIDmode)
3020 /* Floating point constants are generally not ok.
3021 The only exception is 0.0 in VIS. */
3023 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
3024 && const_zero_operand (x, GET_MODE (x)))
3030 /* Vector constants are generally not ok.
3031 The only exception is 0 in VIS. */
3033 && const_zero_operand (x, GET_MODE (x)))
3045 /* Determine if a given RTX is a valid constant address. */
3048 constant_address_p (rtx x)
3050 switch (GET_CODE (x))
3058 if (flag_pic && pic_address_needs_scratch (x))
3060 return legitimate_constant_p (x);
3063 return !flag_pic && legitimate_constant_p (x);
3070 /* Nonzero if the constant value X is a legitimate general operand
3071 when generating PIC code. It is given that flag_pic is on and
3072 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3075 legitimate_pic_operand_p (rtx x)
3077 if (pic_address_needs_scratch (x))
3079 if (sparc_tls_referenced_p (x))
3084 /* Return nonzero if ADDR is a valid memory address.
3085 STRICT specifies whether strict register checking applies. */
3088 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3090 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3092 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3094 else if (GET_CODE (addr) == PLUS)
3096 rs1 = XEXP (addr, 0);
3097 rs2 = XEXP (addr, 1);
3099 /* Canonicalize. REG comes first, if there are no regs,
3100 LO_SUM comes first. */
3102 && GET_CODE (rs1) != SUBREG
3104 || GET_CODE (rs2) == SUBREG
3105 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3107 rs1 = XEXP (addr, 1);
3108 rs2 = XEXP (addr, 0);
3112 && rs1 == pic_offset_table_rtx
3114 && GET_CODE (rs2) != SUBREG
3115 && GET_CODE (rs2) != LO_SUM
3116 && GET_CODE (rs2) != MEM
3117 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
3118 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3119 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3121 || GET_CODE (rs1) == SUBREG)
3122 && RTX_OK_FOR_OFFSET_P (rs2)))
3127 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3128 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3130 /* We prohibit REG + REG for TFmode when there are no quad move insns
3131 and we consequently need to split. We do this because REG+REG
3132 is not an offsettable address. If we get the situation in reload
3133 where source and destination of a movtf pattern are both MEMs with
3134 REG+REG address, then only one of them gets converted to an
3135 offsettable address. */
3137 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3140 /* We prohibit REG + REG on ARCH32 if not optimizing for
3141 DFmode/DImode because then mem_min_alignment is likely to be zero
3142 after reload and the forced split would lack a matching splitter
3144 if (TARGET_ARCH32 && !optimize
3145 && (mode == DFmode || mode == DImode))
3148 else if (USE_AS_OFFSETABLE_LO10
3149 && GET_CODE (rs1) == LO_SUM
3151 && ! TARGET_CM_MEDMID
3152 && RTX_OK_FOR_OLO10_P (rs2))
3155 imm1 = XEXP (rs1, 1);
3156 rs1 = XEXP (rs1, 0);
3157 if (!CONSTANT_P (imm1)
3158 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3162 else if (GET_CODE (addr) == LO_SUM)
3164 rs1 = XEXP (addr, 0);
3165 imm1 = XEXP (addr, 1);
3167 if (!CONSTANT_P (imm1)
3168 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3171 /* We can't allow TFmode in 32-bit mode, because an offset greater
3172 than the alignment (8) may cause the LO_SUM to overflow. */
3173 if (mode == TFmode && TARGET_ARCH32)
3176 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3181 if (GET_CODE (rs1) == SUBREG)
3182 rs1 = SUBREG_REG (rs1);
3188 if (GET_CODE (rs2) == SUBREG)
3189 rs2 = SUBREG_REG (rs2);
3196 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3197 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3202 if ((REGNO (rs1) >= 32
3203 && REGNO (rs1) != FRAME_POINTER_REGNUM
3204 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3206 && (REGNO (rs2) >= 32
3207 && REGNO (rs2) != FRAME_POINTER_REGNUM
3208 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3214 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3216 static GTY(()) rtx sparc_tls_symbol;
3219 sparc_tls_get_addr (void)
3221 if (!sparc_tls_symbol)
3222 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3224 return sparc_tls_symbol;
3228 sparc_tls_got (void)
3233 crtl->uses_pic_offset_table = 1;
3234 return pic_offset_table_rtx;
3237 if (!global_offset_table)
3238 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3239 temp = gen_reg_rtx (Pmode);
3240 emit_move_insn (temp, global_offset_table);
3244 /* Return true if X contains a thread-local symbol. */
3247 sparc_tls_referenced_p (rtx x)
3249 if (!TARGET_HAVE_TLS)
3252 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3253 x = XEXP (XEXP (x, 0), 0);
3255 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
3258 /* That's all we handle in sparc_legitimize_tls_address for now. */
3262 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3263 this (thread-local) address. */
3266 sparc_legitimize_tls_address (rtx addr)
3268 rtx temp1, temp2, temp3, ret, o0, got, insn;
3270 gcc_assert (can_create_pseudo_p ());
3272 if (GET_CODE (addr) == SYMBOL_REF)
3273 switch (SYMBOL_REF_TLS_MODEL (addr))
3275 case TLS_MODEL_GLOBAL_DYNAMIC:
3277 temp1 = gen_reg_rtx (SImode);
3278 temp2 = gen_reg_rtx (SImode);
3279 ret = gen_reg_rtx (Pmode);
3280 o0 = gen_rtx_REG (Pmode, 8);
3281 got = sparc_tls_got ();
3282 emit_insn (gen_tgd_hi22 (temp1, addr));
3283 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3286 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3287 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3292 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3293 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3296 CALL_INSN_FUNCTION_USAGE (insn)
3297 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3298 CALL_INSN_FUNCTION_USAGE (insn));
3299 insn = get_insns ();
3301 emit_libcall_block (insn, ret, o0, addr);
3304 case TLS_MODEL_LOCAL_DYNAMIC:
3306 temp1 = gen_reg_rtx (SImode);
3307 temp2 = gen_reg_rtx (SImode);
3308 temp3 = gen_reg_rtx (Pmode);
3309 ret = gen_reg_rtx (Pmode);
3310 o0 = gen_rtx_REG (Pmode, 8);
3311 got = sparc_tls_got ();
3312 emit_insn (gen_tldm_hi22 (temp1));
3313 emit_insn (gen_tldm_lo10 (temp2, temp1));
3316 emit_insn (gen_tldm_add32 (o0, got, temp2));
3317 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3322 emit_insn (gen_tldm_add64 (o0, got, temp2));
3323 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3326 CALL_INSN_FUNCTION_USAGE (insn)
3327 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3328 CALL_INSN_FUNCTION_USAGE (insn));
3329 insn = get_insns ();
3331 emit_libcall_block (insn, temp3, o0,
3332 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3333 UNSPEC_TLSLD_BASE));
3334 temp1 = gen_reg_rtx (SImode);
3335 temp2 = gen_reg_rtx (SImode);
3336 emit_insn (gen_tldo_hix22 (temp1, addr));
3337 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3339 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3341 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3344 case TLS_MODEL_INITIAL_EXEC:
3345 temp1 = gen_reg_rtx (SImode);
3346 temp2 = gen_reg_rtx (SImode);
3347 temp3 = gen_reg_rtx (Pmode);
3348 got = sparc_tls_got ();
3349 emit_insn (gen_tie_hi22 (temp1, addr));
3350 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3352 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3354 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3357 ret = gen_reg_rtx (Pmode);
3359 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3362 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3366 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3369 case TLS_MODEL_LOCAL_EXEC:
3370 temp1 = gen_reg_rtx (Pmode);
3371 temp2 = gen_reg_rtx (Pmode);
3374 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3375 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3379 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3380 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3382 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3389 else if (GET_CODE (addr) == CONST)
3393 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
3395 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
3396 offset = XEXP (XEXP (addr, 0), 1);
3398 base = force_operand (base, NULL_RTX);
3399 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
3400 offset = force_reg (Pmode, offset);
3401 ret = gen_rtx_PLUS (Pmode, base, offset);
3405 gcc_unreachable (); /* for now ... */
3410 /* Legitimize PIC addresses. If the address is already position-independent,
3411 we return ORIG. Newly generated position-independent addresses go into a
3412 reg. This is REG if nonzero, otherwise we allocate register(s) as
3416 sparc_legitimize_pic_address (rtx orig, rtx reg)
3418 bool gotdata_op = false;
3420 if (GET_CODE (orig) == SYMBOL_REF
3421 /* See the comment in sparc_expand_move. */
3422 || (TARGET_VXWORKS_RTP && GET_CODE (orig) == LABEL_REF))
3424 rtx pic_ref, address;
3429 gcc_assert (! reload_in_progress && ! reload_completed);
3430 reg = gen_reg_rtx (Pmode);
3435 /* If not during reload, allocate another temp reg here for loading
3436 in the address, so that these instructions can be optimized
3438 rtx temp_reg = ((reload_in_progress || reload_completed)
3439 ? reg : gen_reg_rtx (Pmode));
3441 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3442 won't get confused into thinking that these two instructions
3443 are loading in the true address of the symbol. If in the
3444 future a PIC rtx exists, that should be used instead. */
3447 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3448 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3452 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3453 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3461 crtl->uses_pic_offset_table = 1;
3465 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
3466 pic_offset_table_rtx,
3469 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
3470 pic_offset_table_rtx,
3475 pic_ref = gen_const_mem (Pmode,
3476 gen_rtx_PLUS (Pmode,
3477 pic_offset_table_rtx, address));
3478 insn = emit_move_insn (reg, pic_ref);
3480 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3482 set_unique_reg_note (insn, REG_EQUAL, orig);
3485 else if (GET_CODE (orig) == CONST)
3489 if (GET_CODE (XEXP (orig, 0)) == PLUS
3490 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3495 gcc_assert (! reload_in_progress && ! reload_completed);
3496 reg = gen_reg_rtx (Pmode);
3499 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3500 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
3501 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
3502 base == reg ? NULL_RTX : reg);
3504 if (GET_CODE (offset) == CONST_INT)
3506 if (SMALL_INT (offset))
3507 return plus_constant (base, INTVAL (offset));
3508 else if (! reload_in_progress && ! reload_completed)
3509 offset = force_reg (Pmode, offset);
3511 /* If we reach here, then something is seriously wrong. */
3514 return gen_rtx_PLUS (Pmode, base, offset);
3516 else if (GET_CODE (orig) == LABEL_REF)
3517 /* ??? Why do we do this? */
3518 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3519 the register is live instead, in case it is eliminated. */
3520 crtl->uses_pic_offset_table = 1;
3525 /* Try machine-dependent ways of modifying an illegitimate address X
3526 to be legitimate. If we find one, return the new, valid address.
3528 OLDX is the address as it was before break_out_memory_refs was called.
3529 In some cases it is useful to look at this to decide what needs to be done.
3531 MODE is the mode of the operand pointed to by X.
3533 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3536 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3537 enum machine_mode mode)
3541 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3542 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3543 force_operand (XEXP (x, 0), NULL_RTX));
3544 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3545 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3546 force_operand (XEXP (x, 1), NULL_RTX));
3547 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3548 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3550 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3551 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3552 force_operand (XEXP (x, 1), NULL_RTX));
3554 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3557 if (sparc_tls_referenced_p (x))
3558 x = sparc_legitimize_tls_address (x);
3560 x = sparc_legitimize_pic_address (x, NULL_RTX);
3561 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3562 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3563 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3564 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3565 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3566 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3567 else if (GET_CODE (x) == SYMBOL_REF
3568 || GET_CODE (x) == CONST
3569 || GET_CODE (x) == LABEL_REF)
3570 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3575 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
3576 replace the input X, or the original X if no replacement is called for.
3577 The output parameter *WIN is 1 if the calling macro should goto WIN,
3580 For SPARC, we wish to handle addresses by splitting them into
3581 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
3582 This cuts the number of extra insns by one.
3584 Do nothing when generating PIC code and the address is a symbolic
3585 operand or requires a scratch register. */
3588 sparc_legitimize_reload_address (rtx x, enum machine_mode mode,
3589 int opnum, int type,
3590 int ind_levels ATTRIBUTE_UNUSED, int *win)
3592 /* Decompose SImode constants into HIGH+LO_SUM. */
3594 && (mode != TFmode || TARGET_ARCH64)
3595 && GET_MODE (x) == SImode
3596 && GET_CODE (x) != LO_SUM
3597 && GET_CODE (x) != HIGH
3598 && sparc_cmodel <= CM_MEDLOW
3600 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
3602 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
3603 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3604 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3605 opnum, (enum reload_type)type);
3610 /* We have to recognize what we have already generated above. */
3611 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
3613 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3614 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3615 opnum, (enum reload_type)type);
3624 /* Return true if ADDR (a legitimate address expression)
3625 has an effect that depends on the machine mode it is used for.
3631 is not equivalent to
3633 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
3635 because [%l7+a+1] is interpreted as the address of (a+1). */
3639 sparc_mode_dependent_address_p (const_rtx addr)
3641 if (flag_pic && GET_CODE (addr) == PLUS)
3643 rtx op0 = XEXP (addr, 0);
3644 rtx op1 = XEXP (addr, 1);
3645 if (op0 == pic_offset_table_rtx
3646 && SYMBOLIC_CONST (op1))
3653 #ifdef HAVE_GAS_HIDDEN
3654 # define USE_HIDDEN_LINKONCE 1
3656 # define USE_HIDDEN_LINKONCE 0
3660 get_pc_thunk_name (char name[32], unsigned int regno)
3662 const char *pic_name = reg_names[regno];
3664 /* Skip the leading '%' as that cannot be used in a
3668 if (USE_HIDDEN_LINKONCE)
3669 sprintf (name, "__sparc_get_pc_thunk.%s", pic_name);
3671 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
3674 /* Emit code to load the PIC register. */
3677 load_pic_register (void)
3679 int orig_flag_pic = flag_pic;
3681 if (TARGET_VXWORKS_RTP)
3683 emit_insn (gen_vxworks_load_got ());
3684 emit_use (pic_offset_table_rtx);
3688 /* If we haven't initialized the special PIC symbols, do so now. */
3689 if (!pic_helper_needed)
3693 pic_helper_needed = true;
3695 get_pc_thunk_name (name, REGNO (pic_offset_table_rtx));
3696 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
3698 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3703 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3704 pic_helper_symbol));
3706 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3707 pic_helper_symbol));
3708 flag_pic = orig_flag_pic;
3710 /* Need to emit this whether or not we obey regdecls,
3711 since setjmp/longjmp can cause life info to screw up.
3712 ??? In the case where we don't obey regdecls, this is not sufficient
3713 since we may not fall out the bottom. */
3714 emit_use (pic_offset_table_rtx);
3717 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3718 address of the call target. */
3721 sparc_emit_call_insn (rtx pat, rtx addr)
3725 insn = emit_call_insn (pat);
3727 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3728 if (TARGET_VXWORKS_RTP
3730 && GET_CODE (addr) == SYMBOL_REF
3731 && (SYMBOL_REF_DECL (addr)
3732 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3733 : !SYMBOL_REF_LOCAL_P (addr)))
3735 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3736 crtl->uses_pic_offset_table = 1;
3740 /* Return 1 if RTX is a MEM which is known to be aligned to at
3741 least a DESIRED byte boundary. */
3744 mem_min_alignment (rtx mem, int desired)
3746 rtx addr, base, offset;
3748 /* If it's not a MEM we can't accept it. */
3749 if (GET_CODE (mem) != MEM)
3753 if (!TARGET_UNALIGNED_DOUBLES
3754 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3757 /* ??? The rest of the function predates MEM_ALIGN so
3758 there is probably a bit of redundancy. */
3759 addr = XEXP (mem, 0);
3760 base = offset = NULL_RTX;
3761 if (GET_CODE (addr) == PLUS)
3763 if (GET_CODE (XEXP (addr, 0)) == REG)
3765 base = XEXP (addr, 0);
3767 /* What we are saying here is that if the base
3768 REG is aligned properly, the compiler will make
3769 sure any REG based index upon it will be so
3771 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3772 offset = XEXP (addr, 1);
3774 offset = const0_rtx;
3777 else if (GET_CODE (addr) == REG)
3780 offset = const0_rtx;
3783 if (base != NULL_RTX)
3785 int regno = REGNO (base);
3787 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3789 /* Check if the compiler has recorded some information
3790 about the alignment of the base REG. If reload has
3791 completed, we already matched with proper alignments.
3792 If not running global_alloc, reload might give us
3793 unaligned pointer to local stack though. */
3795 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3796 || (optimize && reload_completed))
3797 && (INTVAL (offset) & (desired - 1)) == 0)
3802 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3806 else if (! TARGET_UNALIGNED_DOUBLES
3807 || CONSTANT_P (addr)
3808 || GET_CODE (addr) == LO_SUM)
3810 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3811 is true, in which case we can only assume that an access is aligned if
3812 it is to a constant address, or the address involves a LO_SUM. */
3816 /* An obviously unaligned address. */
3821 /* Vectors to keep interesting information about registers where it can easily
3822 be got. We used to use the actual mode value as the bit number, but there
3823 are more than 32 modes now. Instead we use two tables: one indexed by
3824 hard register number, and one indexed by mode. */
3826 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3827 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3828 mapped into one sparc_mode_class mode. */
3830 enum sparc_mode_class {
3831 S_MODE, D_MODE, T_MODE, O_MODE,
3832 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3836 /* Modes for single-word and smaller quantities. */
3837 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3839 /* Modes for double-word and smaller quantities. */
3840 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3842 /* Modes for quad-word and smaller quantities. */
3843 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3845 /* Modes for 8-word and smaller quantities. */
3846 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3848 /* Modes for single-float quantities. We must allow any single word or
3849 smaller quantity. This is because the fix/float conversion instructions
3850 take integer inputs/outputs from the float registers. */
3851 #define SF_MODES (S_MODES)
3853 /* Modes for double-float and smaller quantities. */
3854 #define DF_MODES (D_MODES)
3856 /* Modes for quad-float and smaller quantities. */
3857 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
3859 /* Modes for quad-float pairs and smaller quantities. */
3860 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
3862 /* Modes for double-float only quantities. */
3863 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3865 /* Modes for quad-float and double-float only quantities. */
3866 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
3868 /* Modes for quad-float pairs and double-float only quantities. */
3869 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
3871 /* Modes for condition codes. */
3872 #define CC_MODES (1 << (int) CC_MODE)
3873 #define CCFP_MODES (1 << (int) CCFP_MODE)
3875 /* Value is 1 if register/mode pair is acceptable on sparc.
3876 The funny mixture of D and T modes is because integer operations
3877 do not specially operate on tetra quantities, so non-quad-aligned
3878 registers can hold quadword quantities (except %o4 and %i4 because
3879 they cross fixed registers). */
3881 /* This points to either the 32 bit or the 64 bit version. */
3882 const int *hard_regno_mode_classes;
3884 static const int hard_32bit_mode_classes[] = {
3885 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3886 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3887 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3888 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3890 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3891 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3892 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3893 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3895 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3896 and none can hold SFmode/SImode values. */
3897 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3898 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3899 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3900 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3903 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3909 static const int hard_64bit_mode_classes[] = {
3910 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3911 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3912 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3913 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3915 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3916 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3917 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3918 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3920 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3921 and none can hold SFmode/SImode values. */
3922 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3923 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3924 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3925 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3928 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3934 int sparc_mode_class [NUM_MACHINE_MODES];
3936 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3939 sparc_init_modes (void)
3943 for (i = 0; i < NUM_MACHINE_MODES; i++)
3945 switch (GET_MODE_CLASS (i))
3948 case MODE_PARTIAL_INT:
3949 case MODE_COMPLEX_INT:
3950 if (GET_MODE_SIZE (i) <= 4)
3951 sparc_mode_class[i] = 1 << (int) S_MODE;
3952 else if (GET_MODE_SIZE (i) == 8)
3953 sparc_mode_class[i] = 1 << (int) D_MODE;
3954 else if (GET_MODE_SIZE (i) == 16)
3955 sparc_mode_class[i] = 1 << (int) T_MODE;
3956 else if (GET_MODE_SIZE (i) == 32)
3957 sparc_mode_class[i] = 1 << (int) O_MODE;
3959 sparc_mode_class[i] = 0;
3961 case MODE_VECTOR_INT:
3962 if (GET_MODE_SIZE (i) <= 4)
3963 sparc_mode_class[i] = 1 << (int)SF_MODE;
3964 else if (GET_MODE_SIZE (i) == 8)
3965 sparc_mode_class[i] = 1 << (int)DF_MODE;
3968 case MODE_COMPLEX_FLOAT:
3969 if (GET_MODE_SIZE (i) <= 4)
3970 sparc_mode_class[i] = 1 << (int) SF_MODE;
3971 else if (GET_MODE_SIZE (i) == 8)
3972 sparc_mode_class[i] = 1 << (int) DF_MODE;
3973 else if (GET_MODE_SIZE (i) == 16)
3974 sparc_mode_class[i] = 1 << (int) TF_MODE;
3975 else if (GET_MODE_SIZE (i) == 32)
3976 sparc_mode_class[i] = 1 << (int) OF_MODE;
3978 sparc_mode_class[i] = 0;
3981 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3982 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3984 sparc_mode_class[i] = 1 << (int) CC_MODE;
3987 sparc_mode_class[i] = 0;
3993 hard_regno_mode_classes = hard_64bit_mode_classes;
3995 hard_regno_mode_classes = hard_32bit_mode_classes;
3997 /* Initialize the array used by REGNO_REG_CLASS. */
3998 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4000 if (i < 16 && TARGET_V8PLUS)
4001 sparc_regno_reg_class[i] = I64_REGS;
4002 else if (i < 32 || i == FRAME_POINTER_REGNUM)
4003 sparc_regno_reg_class[i] = GENERAL_REGS;
4005 sparc_regno_reg_class[i] = FP_REGS;
4007 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
4009 sparc_regno_reg_class[i] = FPCC_REGS;
4011 sparc_regno_reg_class[i] = NO_REGS;
4015 /* Compute the frame size required by the function. This function is called
4016 during the reload pass and also by sparc_expand_prologue. */
4019 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
4021 int outgoing_args_size = (crtl->outgoing_args_size
4022 + REG_PARM_STACK_SPACE (current_function_decl));
4023 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
4028 for (i = 0; i < 8; i++)
4029 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4034 for (i = 0; i < 8; i += 2)
4035 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
4036 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
4040 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
4041 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
4042 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
4045 /* Set up values for use in prologue and epilogue. */
4046 num_gfregs = n_regs;
4051 && crtl->outgoing_args_size == 0)
4052 actual_fsize = apparent_fsize = 0;
4055 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
4056 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
4057 apparent_fsize += n_regs * 4;
4058 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
4061 /* Make sure nothing can clobber our register windows.
4062 If a SAVE must be done, or there is a stack-local variable,
4063 the register window area must be allocated. */
4064 if (! leaf_function_p || size > 0)
4065 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
4067 return SPARC_STACK_ALIGN (actual_fsize);
4070 /* Output any necessary .register pseudo-ops. */
4073 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
4075 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
4081 /* Check if %g[2367] were used without
4082 .register being printed for them already. */
4083 for (i = 2; i < 8; i++)
4085 if (df_regs_ever_live_p (i)
4086 && ! sparc_hard_reg_printed [i])
4088 sparc_hard_reg_printed [i] = 1;
4089 /* %g7 is used as TLS base register, use #ignore
4090 for it instead of #scratch. */
4091 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
4092 i == 7 ? "ignore" : "scratch");
4099 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
4101 #if PROBE_INTERVAL > 4096
4102 #error Cannot use indexed addressing mode for stack probing
4105 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
4106 inclusive. These are offsets from the current stack pointer.
4108 Note that we don't use the REG+REG addressing mode for the probes because
4109 of the stack bias in 64-bit mode. And it doesn't really buy us anything
4110 so the advantages of having a single code win here. */
4113 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
4115 rtx g1 = gen_rtx_REG (Pmode, 1);
4117 /* See if we have a constant small number of probes to generate. If so,
4118 that's the easy case. */
4119 if (size <= PROBE_INTERVAL)
4121 emit_move_insn (g1, GEN_INT (first));
4122 emit_insn (gen_rtx_SET (VOIDmode, g1,
4123 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4124 emit_stack_probe (plus_constant (g1, -size));
4127 /* The run-time loop is made up of 10 insns in the generic case while the
4128 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
4129 else if (size <= 5 * PROBE_INTERVAL)
4133 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
4134 emit_insn (gen_rtx_SET (VOIDmode, g1,
4135 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4136 emit_stack_probe (g1);
4138 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
4139 it exceeds SIZE. If only two probes are needed, this will not
4140 generate any code. Then probe at FIRST + SIZE. */
4141 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
4143 emit_insn (gen_rtx_SET (VOIDmode, g1,
4144 plus_constant (g1, -PROBE_INTERVAL)));
4145 emit_stack_probe (g1);
4148 emit_stack_probe (plus_constant (g1, (i - PROBE_INTERVAL) - size));
4151 /* Otherwise, do the same as above, but in a loop. Note that we must be
4152 extra careful with variables wrapping around because we might be at
4153 the very top (or the very bottom) of the address space and we have
4154 to be able to handle this case properly; in particular, we use an
4155 equality test for the loop condition. */
4158 HOST_WIDE_INT rounded_size;
4159 rtx g4 = gen_rtx_REG (Pmode, 4);
4161 emit_move_insn (g1, GEN_INT (first));
4164 /* Step 1: round SIZE to the previous multiple of the interval. */
4166 rounded_size = size & -PROBE_INTERVAL;
4167 emit_move_insn (g4, GEN_INT (rounded_size));
4170 /* Step 2: compute initial and final value of the loop counter. */
4172 /* TEST_ADDR = SP + FIRST. */
4173 emit_insn (gen_rtx_SET (VOIDmode, g1,
4174 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4176 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
4177 emit_insn (gen_rtx_SET (VOIDmode, g4, gen_rtx_MINUS (Pmode, g1, g4)));
4182 while (TEST_ADDR != LAST_ADDR)
4184 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
4188 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
4189 until it is equal to ROUNDED_SIZE. */
4192 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
4194 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
4197 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
4198 that SIZE is equal to ROUNDED_SIZE. */
4200 if (size != rounded_size)
4201 emit_stack_probe (plus_constant (g4, rounded_size - size));
4204 /* Make sure nothing is scheduled before we are done. */
4205 emit_insn (gen_blockage ());
4208 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
4209 absolute addresses. */
4212 output_probe_stack_range (rtx reg1, rtx reg2)
4214 static int labelno = 0;
4215 char loop_lab[32], end_lab[32];
4218 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
4219 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
4221 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
4223 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
4226 output_asm_insn ("cmp\t%0, %1", xops);
4228 fputs ("\tbe,pn\t%xcc,", asm_out_file);
4230 fputs ("\tbe\t", asm_out_file);
4231 assemble_name_raw (asm_out_file, end_lab);
4232 fputc ('\n', asm_out_file);
4234 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
4235 xops[1] = GEN_INT (-PROBE_INTERVAL);
4236 output_asm_insn (" add\t%0, %1, %0", xops);
4238 /* Probe at TEST_ADDR and branch. */
4240 fputs ("\tba,pt\t%xcc,", asm_out_file);
4242 fputs ("\tba\t", asm_out_file);
4243 assemble_name_raw (asm_out_file, loop_lab);
4244 fputc ('\n', asm_out_file);
4245 xops[1] = GEN_INT (SPARC_STACK_BIAS);
4246 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
4248 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
4253 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
4254 as needed. LOW should be double-word aligned for 32-bit registers.
4255 Return the new OFFSET. */
4258 #define SORR_RESTORE 1
4261 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
4266 if (TARGET_ARCH64 && high <= 32)
4268 for (i = low; i < high; i++)
4270 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4272 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
4273 set_mem_alias_set (mem, sparc_sr_alias_set);
4274 if (action == SORR_SAVE)
4276 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4277 RTX_FRAME_RELATED_P (insn) = 1;
4279 else /* action == SORR_RESTORE */
4280 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4287 for (i = low; i < high; i += 2)
4289 bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i];
4290 bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1];
4291 enum machine_mode mode;
4296 mode = i < 32 ? DImode : DFmode;
4301 mode = i < 32 ? SImode : SFmode;
4306 mode = i < 32 ? SImode : SFmode;
4313 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
4314 set_mem_alias_set (mem, sparc_sr_alias_set);
4315 if (action == SORR_SAVE)
4317 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4318 RTX_FRAME_RELATED_P (insn) = 1;
4320 else /* action == SORR_RESTORE */
4321 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4323 /* Always preserve double-word alignment. */
4324 offset = (offset + 7) & -8;
4331 /* Emit code to save call-saved registers. */
4334 emit_save_or_restore_regs (int action)
4336 HOST_WIDE_INT offset;
4339 offset = frame_base_offset - apparent_fsize;
4341 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
4343 /* ??? This might be optimized a little as %g1 might already have a
4344 value close enough that a single add insn will do. */
4345 /* ??? Although, all of this is probably only a temporary fix
4346 because if %g1 can hold a function result, then
4347 sparc_expand_epilogue will lose (the result will be
4349 base = gen_rtx_REG (Pmode, 1);
4350 emit_move_insn (base, GEN_INT (offset));
4351 emit_insn (gen_rtx_SET (VOIDmode,
4353 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
4357 base = frame_base_reg;
4359 offset = save_or_restore_regs (0, 8, base, offset, action);
4360 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
4363 /* Generate a save_register_window insn. */
4366 gen_save_register_window (rtx increment)
4369 return gen_save_register_windowdi (increment);
4371 return gen_save_register_windowsi (increment);
4374 /* Generate an increment for the stack pointer. */
4377 gen_stack_pointer_inc (rtx increment)
4379 return gen_rtx_SET (VOIDmode,
4381 gen_rtx_PLUS (Pmode,
4386 /* Generate a decrement for the stack pointer. */
4389 gen_stack_pointer_dec (rtx decrement)
4391 return gen_rtx_SET (VOIDmode,
4393 gen_rtx_MINUS (Pmode,
4398 /* Expand the function prologue. The prologue is responsible for reserving
4399 storage for the frame, saving the call-saved registers and loading the
4400 PIC register if needed. */
4403 sparc_expand_prologue (void)
4408 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4409 on the final value of the flag means deferring the prologue/epilogue
4410 expansion until just before the second scheduling pass, which is too
4411 late to emit multiple epilogues or return insns.
4413 Of course we are making the assumption that the value of the flag
4414 will not change between now and its final value. Of the three parts
4415 of the formula, only the last one can reasonably vary. Let's take a
4416 closer look, after assuming that the first two ones are set to true
4417 (otherwise the last value is effectively silenced).
4419 If only_leaf_regs_used returns false, the global predicate will also
4420 be false so the actual frame size calculated below will be positive.
4421 As a consequence, the save_register_window insn will be emitted in
4422 the instruction stream; now this insn explicitly references %fp
4423 which is not a leaf register so only_leaf_regs_used will always
4424 return false subsequently.
4426 If only_leaf_regs_used returns true, we hope that the subsequent
4427 optimization passes won't cause non-leaf registers to pop up. For
4428 example, the regrename pass has special provisions to not rename to
4429 non-leaf registers in a leaf function. */
4430 sparc_leaf_function_p
4431 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
4433 /* Need to use actual_fsize, since we are also allocating
4434 space for our callee (and our own register save area). */
4436 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4438 /* Advertise that the data calculated just above are now valid. */
4439 sparc_prologue_data_valid_p = true;
4441 if (flag_stack_usage)
4442 current_function_static_stack_size = actual_fsize;
4444 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && actual_fsize)
4445 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, actual_fsize);
4447 if (sparc_leaf_function_p)
4449 frame_base_reg = stack_pointer_rtx;
4450 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4454 frame_base_reg = hard_frame_pointer_rtx;
4455 frame_base_offset = SPARC_STACK_BIAS;
4458 if (actual_fsize == 0)
4460 else if (sparc_leaf_function_p)
4462 if (actual_fsize <= 4096)
4463 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4464 else if (actual_fsize <= 8192)
4466 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4467 /* %sp is still the CFA register. */
4468 RTX_FRAME_RELATED_P (insn) = 1;
4470 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4474 rtx reg = gen_rtx_REG (Pmode, 1);
4475 emit_move_insn (reg, GEN_INT (-actual_fsize));
4476 insn = emit_insn (gen_stack_pointer_inc (reg));
4477 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4478 gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4481 RTX_FRAME_RELATED_P (insn) = 1;
4485 if (actual_fsize <= 4096)
4486 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4487 else if (actual_fsize <= 8192)
4489 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
4490 /* %sp is not the CFA register anymore. */
4491 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4495 rtx reg = gen_rtx_REG (Pmode, 1);
4496 emit_move_insn (reg, GEN_INT (-actual_fsize));
4497 insn = emit_insn (gen_save_register_window (reg));
4500 RTX_FRAME_RELATED_P (insn) = 1;
4501 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4502 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4506 emit_save_or_restore_regs (SORR_SAVE);
4508 /* Load the PIC register if needed. */
4509 if (flag_pic && crtl->uses_pic_offset_table)
4510 load_pic_register ();
4513 /* This function generates the assembly code for function entry, which boils
4514 down to emitting the necessary .register directives. */
4517 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4519 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4520 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4522 sparc_output_scratch_registers (file);
4525 /* Expand the function epilogue, either normal or part of a sibcall.
4526 We emit all the instructions except the return or the call. */
4529 sparc_expand_epilogue (void)
4532 emit_save_or_restore_regs (SORR_RESTORE);
4534 if (actual_fsize == 0)
4536 else if (sparc_leaf_function_p)
4538 if (actual_fsize <= 4096)
4539 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4540 else if (actual_fsize <= 8192)
4542 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4543 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4547 rtx reg = gen_rtx_REG (Pmode, 1);
4548 emit_move_insn (reg, GEN_INT (-actual_fsize));
4549 emit_insn (gen_stack_pointer_dec (reg));
4554 /* Return true if it is appropriate to emit `return' instructions in the
4555 body of a function. */
4558 sparc_can_use_return_insn_p (void)
4560 return sparc_prologue_data_valid_p
4561 && (actual_fsize == 0 || !sparc_leaf_function_p);
4564 /* This function generates the assembly code for function exit. */
4567 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4569 /* If the last two instructions of a function are "call foo; dslot;"
4570 the return address might point to the first instruction in the next
4571 function and we have to output a dummy nop for the sake of sane
4572 backtraces in such cases. This is pointless for sibling calls since
4573 the return address is explicitly adjusted. */
4575 rtx insn, last_real_insn;
4577 insn = get_last_insn ();
4579 last_real_insn = prev_real_insn (insn);
4581 && GET_CODE (last_real_insn) == INSN
4582 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4583 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4586 && CALL_P (last_real_insn)
4587 && !SIBLING_CALL_P (last_real_insn))
4588 fputs("\tnop\n", file);
4590 sparc_output_deferred_case_vectors ();
4593 /* Output a 'restore' instruction. */
4596 output_restore (rtx pat)
4602 fputs ("\t restore\n", asm_out_file);
4606 gcc_assert (GET_CODE (pat) == SET);
4608 operands[0] = SET_DEST (pat);
4609 pat = SET_SRC (pat);
4611 switch (GET_CODE (pat))
4614 operands[1] = XEXP (pat, 0);
4615 operands[2] = XEXP (pat, 1);
4616 output_asm_insn (" restore %r1, %2, %Y0", operands);
4619 operands[1] = XEXP (pat, 0);
4620 operands[2] = XEXP (pat, 1);
4621 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4624 operands[1] = XEXP (pat, 0);
4625 gcc_assert (XEXP (pat, 1) == const1_rtx);
4626 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4630 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4635 /* Output a return. */
4638 output_return (rtx insn)
4640 if (sparc_leaf_function_p)
4642 /* This is a leaf function so we don't have to bother restoring the
4643 register window, which frees us from dealing with the convoluted
4644 semantics of restore/return. We simply output the jump to the
4645 return address and the insn in the delay slot (if any). */
4647 gcc_assert (! crtl->calls_eh_return);
4649 return "jmp\t%%o7+%)%#";
4653 /* This is a regular function so we have to restore the register window.
4654 We may have a pending insn for the delay slot, which will be either
4655 combined with the 'restore' instruction or put in the delay slot of
4656 the 'return' instruction. */
4658 if (crtl->calls_eh_return)
4660 /* If the function uses __builtin_eh_return, the eh_return
4661 machinery occupies the delay slot. */
4662 gcc_assert (! final_sequence);
4664 if (! flag_delayed_branch)
4665 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4668 fputs ("\treturn\t%i7+8\n", asm_out_file);
4670 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4672 if (flag_delayed_branch)
4673 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4675 fputs ("\t nop\n", asm_out_file);
4677 else if (final_sequence)
4681 delay = NEXT_INSN (insn);
4684 pat = PATTERN (delay);
4686 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4688 epilogue_renumber (&pat, 0);
4689 return "return\t%%i7+%)%#";
4693 output_asm_insn ("jmp\t%%i7+%)", NULL);
4694 output_restore (pat);
4695 PATTERN (delay) = gen_blockage ();
4696 INSN_CODE (delay) = -1;
4701 /* The delay slot is empty. */
4703 return "return\t%%i7+%)\n\t nop";
4704 else if (flag_delayed_branch)
4705 return "jmp\t%%i7+%)\n\t restore";
4707 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4714 /* Output a sibling call. */
4717 output_sibcall (rtx insn, rtx call_operand)
4721 gcc_assert (flag_delayed_branch);
4723 operands[0] = call_operand;
4725 if (sparc_leaf_function_p)
4727 /* This is a leaf function so we don't have to bother restoring the
4728 register window. We simply output the jump to the function and
4729 the insn in the delay slot (if any). */
4731 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4734 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4737 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4738 it into branch if possible. */
4739 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4744 /* This is a regular function so we have to restore the register window.
4745 We may have a pending insn for the delay slot, which will be combined
4746 with the 'restore' instruction. */
4748 output_asm_insn ("call\t%a0, 0", operands);
4752 rtx delay = NEXT_INSN (insn);
4755 output_restore (PATTERN (delay));
4757 PATTERN (delay) = gen_blockage ();
4758 INSN_CODE (delay) = -1;
4761 output_restore (NULL_RTX);
4767 /* Functions for handling argument passing.
4769 For 32-bit, the first 6 args are normally in registers and the rest are
4770 pushed. Any arg that starts within the first 6 words is at least
4771 partially passed in a register unless its data type forbids.
4773 For 64-bit, the argument registers are laid out as an array of 16 elements
4774 and arguments are added sequentially. The first 6 int args and up to the
4775 first 16 fp args (depending on size) are passed in regs.
4777 Slot Stack Integral Float Float in structure Double Long Double
4778 ---- ----- -------- ----- ------------------ ------ -----------
4779 15 [SP+248] %f31 %f30,%f31 %d30
4780 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4781 13 [SP+232] %f27 %f26,%f27 %d26
4782 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4783 11 [SP+216] %f23 %f22,%f23 %d22
4784 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4785 9 [SP+200] %f19 %f18,%f19 %d18
4786 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4787 7 [SP+184] %f15 %f14,%f15 %d14
4788 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4789 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4790 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4791 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4792 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4793 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4794 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4796 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4798 Integral arguments are always passed as 64-bit quantities appropriately
4801 Passing of floating point values is handled as follows.
4802 If a prototype is in scope:
4803 If the value is in a named argument (i.e. not a stdarg function or a
4804 value not part of the `...') then the value is passed in the appropriate
4806 If the value is part of the `...' and is passed in one of the first 6
4807 slots then the value is passed in the appropriate int reg.
4808 If the value is part of the `...' and is not passed in one of the first 6
4809 slots then the value is passed in memory.
4810 If a prototype is not in scope:
4811 If the value is one of the first 6 arguments the value is passed in the
4812 appropriate integer reg and the appropriate fp reg.
4813 If the value is not one of the first 6 arguments the value is passed in
4814 the appropriate fp reg and in memory.
4817 Summary of the calling conventions implemented by GCC on the SPARC:
4820 size argument return value
4822 small integer <4 int. reg. int. reg.
4823 word 4 int. reg. int. reg.
4824 double word 8 int. reg. int. reg.
4826 _Complex small integer <8 int. reg. int. reg.
4827 _Complex word 8 int. reg. int. reg.
4828 _Complex double word 16 memory int. reg.
4830 vector integer <=8 int. reg. FP reg.
4831 vector integer >8 memory memory
4833 float 4 int. reg. FP reg.
4834 double 8 int. reg. FP reg.
4835 long double 16 memory memory
4837 _Complex float 8 memory FP reg.
4838 _Complex double 16 memory FP reg.
4839 _Complex long double 32 memory FP reg.
4841 vector float any memory memory
4843 aggregate any memory memory
4848 size argument return value
4850 small integer <8 int. reg. int. reg.
4851 word 8 int. reg. int. reg.
4852 double word 16 int. reg. int. reg.
4854 _Complex small integer <16 int. reg. int. reg.
4855 _Complex word 16 int. reg. int. reg.
4856 _Complex double word 32 memory int. reg.
4858 vector integer <=16 FP reg. FP reg.
4859 vector integer 16<s<=32 memory FP reg.
4860 vector integer >32 memory memory
4862 float 4 FP reg. FP reg.
4863 double 8 FP reg. FP reg.
4864 long double 16 FP reg. FP reg.
4866 _Complex float 8 FP reg. FP reg.
4867 _Complex double 16 FP reg. FP reg.
4868 _Complex long double 32 memory FP reg.
4870 vector float <=16 FP reg. FP reg.
4871 vector float 16<s<=32 memory FP reg.
4872 vector float >32 memory memory
4874 aggregate <=16 reg. reg.
4875 aggregate 16<s<=32 memory reg.
4876 aggregate >32 memory memory
4880 Note #1: complex floating-point types follow the extended SPARC ABIs as
4881 implemented by the Sun compiler.
4883 Note #2: integral vector types follow the scalar floating-point types
4884 conventions to match what is implemented by the Sun VIS SDK.
4886 Note #3: floating-point vector types follow the aggregate types
4890 /* Maximum number of int regs for args. */
4891 #define SPARC_INT_ARG_MAX 6
4892 /* Maximum number of fp regs for args. */
4893 #define SPARC_FP_ARG_MAX 16
4895 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4897 /* Handle the INIT_CUMULATIVE_ARGS macro.
4898 Initialize a variable CUM of type CUMULATIVE_ARGS
4899 for a call to a function whose data type is FNTYPE.
4900 For a library call, FNTYPE is 0. */
4903 init_cumulative_args (struct sparc_args *cum, tree fntype,
4904 rtx libname ATTRIBUTE_UNUSED,
4905 tree fndecl ATTRIBUTE_UNUSED)
4908 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4909 cum->libcall_p = fntype == 0;
4912 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4913 When a prototype says `char' or `short', really pass an `int'. */
4916 sparc_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED)
4918 return TARGET_ARCH32 ? true : false;
4921 /* Handle promotion of pointer and integer arguments. */
4923 static enum machine_mode
4924 sparc_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
4925 enum machine_mode mode,
4926 int *punsignedp ATTRIBUTE_UNUSED,
4927 const_tree fntype ATTRIBUTE_UNUSED,
4928 int for_return ATTRIBUTE_UNUSED)
4930 if (POINTER_TYPE_P (type))
4932 *punsignedp = POINTERS_EXTEND_UNSIGNED;
4936 /* For TARGET_ARCH64 we need this, as we don't have instructions
4937 for arithmetic operations which do zero/sign extension at the same time,
4938 so without this we end up with a srl/sra after every assignment to an
4939 user variable, which means very very bad code. */
4941 && GET_MODE_CLASS (mode) == MODE_INT
4942 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4948 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4951 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4953 return TARGET_ARCH64 ? true : false;
4956 /* Scan the record type TYPE and return the following predicates:
4957 - INTREGS_P: the record contains at least one field or sub-field
4958 that is eligible for promotion in integer registers.
4959 - FP_REGS_P: the record contains at least one field or sub-field
4960 that is eligible for promotion in floating-point registers.
4961 - PACKED_P: the record contains at least one field that is packed.
4963 Sub-fields are not taken into account for the PACKED_P predicate. */
4966 scan_record_type (const_tree type, int *intregs_p, int *fpregs_p,
4971 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
4973 if (TREE_CODE (field) == FIELD_DECL)
4975 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4976 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4977 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4978 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4984 if (packed_p && DECL_PACKED (field))
4990 /* Compute the slot number to pass an argument in.
4991 Return the slot number or -1 if passing on the stack.
4993 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4994 the preceding args and about the function being called.
4995 MODE is the argument's machine mode.
4996 TYPE is the data type of the argument (as a tree).
4997 This is null for libcalls where that information may
4999 NAMED is nonzero if this argument is a named parameter
5000 (otherwise it is an extra parameter matching an ellipsis).
5001 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
5002 *PREGNO records the register number to use if scalar type.
5003 *PPADDING records the amount of padding needed in words. */
5006 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
5007 const_tree type, bool named, bool incoming_p,
5008 int *pregno, int *ppadding)
5010 int regbase = (incoming_p
5011 ? SPARC_INCOMING_INT_ARG_FIRST
5012 : SPARC_OUTGOING_INT_ARG_FIRST);
5013 int slotno = cum->words;
5014 enum mode_class mclass;
5019 if (type && TREE_ADDRESSABLE (type))
5025 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
5028 /* For SPARC64, objects requiring 16-byte alignment get it. */
5030 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
5031 && (slotno & 1) != 0)
5032 slotno++, *ppadding = 1;
5034 mclass = GET_MODE_CLASS (mode);
5035 if (type && TREE_CODE (type) == VECTOR_TYPE)
5037 /* Vector types deserve special treatment because they are
5038 polymorphic wrt their mode, depending upon whether VIS
5039 instructions are enabled. */
5040 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5042 /* The SPARC port defines no floating-point vector modes. */
5043 gcc_assert (mode == BLKmode);
5047 /* Integral vector types should either have a vector
5048 mode or an integral mode, because we are guaranteed
5049 by pass_by_reference that their size is not greater
5050 than 16 bytes and TImode is 16-byte wide. */
5051 gcc_assert (mode != BLKmode);
5053 /* Vector integers are handled like floats according to
5055 mclass = MODE_FLOAT;
5062 case MODE_COMPLEX_FLOAT:
5063 case MODE_VECTOR_INT:
5064 if (TARGET_ARCH64 && TARGET_FPU && named)
5066 if (slotno >= SPARC_FP_ARG_MAX)
5068 regno = SPARC_FP_ARG_FIRST + slotno * 2;
5069 /* Arguments filling only one single FP register are
5070 right-justified in the outer double FP register. */
5071 if (GET_MODE_SIZE (mode) <= 4)
5078 case MODE_COMPLEX_INT:
5079 if (slotno >= SPARC_INT_ARG_MAX)
5081 regno = regbase + slotno;
5085 if (mode == VOIDmode)
5086 /* MODE is VOIDmode when generating the actual call. */
5089 gcc_assert (mode == BLKmode);
5093 || (TREE_CODE (type) != VECTOR_TYPE
5094 && TREE_CODE (type) != RECORD_TYPE))
5096 if (slotno >= SPARC_INT_ARG_MAX)
5098 regno = regbase + slotno;
5100 else /* TARGET_ARCH64 && type */
5102 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
5104 /* First see what kinds of registers we would need. */
5105 if (TREE_CODE (type) == VECTOR_TYPE)
5108 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
5110 /* The ABI obviously doesn't specify how packed structures
5111 are passed. These are defined to be passed in int regs
5112 if possible, otherwise memory. */
5113 if (packed_p || !named)
5114 fpregs_p = 0, intregs_p = 1;
5116 /* If all arg slots are filled, then must pass on stack. */
5117 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
5120 /* If there are only int args and all int arg slots are filled,
5121 then must pass on stack. */
5122 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
5125 /* Note that even if all int arg slots are filled, fp members may
5126 still be passed in regs if such regs are available.
5127 *PREGNO isn't set because there may be more than one, it's up
5128 to the caller to compute them. */
5141 /* Handle recursive register counting for structure field layout. */
5143 struct function_arg_record_value_parms
5145 rtx ret; /* return expression being built. */
5146 int slotno; /* slot number of the argument. */
5147 int named; /* whether the argument is named. */
5148 int regbase; /* regno of the base register. */
5149 int stack; /* 1 if part of the argument is on the stack. */
5150 int intoffset; /* offset of the first pending integer field. */
5151 unsigned int nregs; /* number of words passed in registers. */
5154 static void function_arg_record_value_3
5155 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
5156 static void function_arg_record_value_2
5157 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5158 static void function_arg_record_value_1
5159 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5160 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
5161 static rtx function_arg_union_value (int, enum machine_mode, int, int);
5163 /* A subroutine of function_arg_record_value. Traverse the structure
5164 recursively and determine how many registers will be required. */
5167 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
5168 struct function_arg_record_value_parms *parms,
5173 /* We need to compute how many registers are needed so we can
5174 allocate the PARALLEL but before we can do that we need to know
5175 whether there are any packed fields. The ABI obviously doesn't
5176 specify how structures are passed in this case, so they are
5177 defined to be passed in int regs if possible, otherwise memory,
5178 regardless of whether there are fp values present. */
5181 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5183 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5190 /* Compute how many registers we need. */
5191 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5193 if (TREE_CODE (field) == FIELD_DECL)
5195 HOST_WIDE_INT bitpos = startbitpos;
5197 if (DECL_SIZE (field) != 0)
5199 if (integer_zerop (DECL_SIZE (field)))
5202 if (host_integerp (bit_position (field), 1))
5203 bitpos += int_bit_position (field);
5206 /* ??? FIXME: else assume zero offset. */
5208 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5209 function_arg_record_value_1 (TREE_TYPE (field),
5213 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5214 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5219 if (parms->intoffset != -1)
5221 unsigned int startbit, endbit;
5222 int intslots, this_slotno;
5224 startbit = parms->intoffset & -BITS_PER_WORD;
5225 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5227 intslots = (endbit - startbit) / BITS_PER_WORD;
5228 this_slotno = parms->slotno + parms->intoffset
5231 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5233 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5234 /* We need to pass this field on the stack. */
5238 parms->nregs += intslots;
5239 parms->intoffset = -1;
5242 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
5243 If it wasn't true we wouldn't be here. */
5244 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5245 && DECL_MODE (field) == BLKmode)
5246 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5247 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5254 if (parms->intoffset == -1)
5255 parms->intoffset = bitpos;
5261 /* A subroutine of function_arg_record_value. Assign the bits of the
5262 structure between parms->intoffset and bitpos to integer registers. */
5265 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
5266 struct function_arg_record_value_parms *parms)
5268 enum machine_mode mode;
5270 unsigned int startbit, endbit;
5271 int this_slotno, intslots, intoffset;
5274 if (parms->intoffset == -1)
5277 intoffset = parms->intoffset;
5278 parms->intoffset = -1;
5280 startbit = intoffset & -BITS_PER_WORD;
5281 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5282 intslots = (endbit - startbit) / BITS_PER_WORD;
5283 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
5285 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
5289 /* If this is the trailing part of a word, only load that much into
5290 the register. Otherwise load the whole register. Note that in
5291 the latter case we may pick up unwanted bits. It's not a problem
5292 at the moment but may wish to revisit. */
5294 if (intoffset % BITS_PER_WORD != 0)
5295 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5300 intoffset /= BITS_PER_UNIT;
5303 regno = parms->regbase + this_slotno;
5304 reg = gen_rtx_REG (mode, regno);
5305 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5306 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5309 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5314 while (intslots > 0);
5317 /* A subroutine of function_arg_record_value. Traverse the structure
5318 recursively and assign bits to floating point registers. Track which
5319 bits in between need integer registers; invoke function_arg_record_value_3
5320 to make that happen. */
5323 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
5324 struct function_arg_record_value_parms *parms,
5330 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5332 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5339 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5341 if (TREE_CODE (field) == FIELD_DECL)
5343 HOST_WIDE_INT bitpos = startbitpos;
5345 if (DECL_SIZE (field) != 0)
5347 if (integer_zerop (DECL_SIZE (field)))
5350 if (host_integerp (bit_position (field), 1))
5351 bitpos += int_bit_position (field);
5354 /* ??? FIXME: else assume zero offset. */
5356 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5357 function_arg_record_value_2 (TREE_TYPE (field),
5361 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5362 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5367 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5368 int regno, nregs, pos;
5369 enum machine_mode mode = DECL_MODE (field);
5372 function_arg_record_value_3 (bitpos, parms);
5374 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5377 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5378 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5380 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5382 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5388 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5389 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5391 reg = gen_rtx_REG (mode, regno);
5392 pos = bitpos / BITS_PER_UNIT;
5393 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5394 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5398 regno += GET_MODE_SIZE (mode) / 4;
5399 reg = gen_rtx_REG (mode, regno);
5400 pos += GET_MODE_SIZE (mode);
5401 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5402 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5408 if (parms->intoffset == -1)
5409 parms->intoffset = bitpos;
5415 /* Used by function_arg and sparc_function_value_1 to implement the complex
5416 conventions of the 64-bit ABI for passing and returning structures.
5417 Return an expression valid as a return value for the FUNCTION_ARG
5418 and TARGET_FUNCTION_VALUE.
5420 TYPE is the data type of the argument (as a tree).
5421 This is null for libcalls where that information may
5423 MODE is the argument's machine mode.
5424 SLOTNO is the index number of the argument's slot in the parameter array.
5425 NAMED is nonzero if this argument is a named parameter
5426 (otherwise it is an extra parameter matching an ellipsis).
5427 REGBASE is the regno of the base register for the parameter array. */
5430 function_arg_record_value (const_tree type, enum machine_mode mode,
5431 int slotno, int named, int regbase)
5433 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5434 struct function_arg_record_value_parms parms;
5437 parms.ret = NULL_RTX;
5438 parms.slotno = slotno;
5439 parms.named = named;
5440 parms.regbase = regbase;
5443 /* Compute how many registers we need. */
5445 parms.intoffset = 0;
5446 function_arg_record_value_1 (type, 0, &parms, false);
5448 /* Take into account pending integer fields. */
5449 if (parms.intoffset != -1)
5451 unsigned int startbit, endbit;
5452 int intslots, this_slotno;
5454 startbit = parms.intoffset & -BITS_PER_WORD;
5455 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5456 intslots = (endbit - startbit) / BITS_PER_WORD;
5457 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5459 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5461 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5462 /* We need to pass this field on the stack. */
5466 parms.nregs += intslots;
5468 nregs = parms.nregs;
5470 /* Allocate the vector and handle some annoying special cases. */
5473 /* ??? Empty structure has no value? Duh? */
5476 /* Though there's nothing really to store, return a word register
5477 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5478 leads to breakage due to the fact that there are zero bytes to
5480 return gen_rtx_REG (mode, regbase);
5484 /* ??? C++ has structures with no fields, and yet a size. Give up
5485 for now and pass everything back in integer registers. */
5486 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5488 if (nregs + slotno > SPARC_INT_ARG_MAX)
5489 nregs = SPARC_INT_ARG_MAX - slotno;
5491 gcc_assert (nregs != 0);
5493 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5495 /* If at least one field must be passed on the stack, generate
5496 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5497 also be passed on the stack. We can't do much better because the
5498 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5499 of structures for which the fields passed exclusively in registers
5500 are not at the beginning of the structure. */
5502 XVECEXP (parms.ret, 0, 0)
5503 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5505 /* Fill in the entries. */
5507 parms.intoffset = 0;
5508 function_arg_record_value_2 (type, 0, &parms, false);
5509 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5511 gcc_assert (parms.nregs == nregs);
5516 /* Used by function_arg and sparc_function_value_1 to implement the conventions
5517 of the 64-bit ABI for passing and returning unions.
5518 Return an expression valid as a return value for the FUNCTION_ARG
5519 and TARGET_FUNCTION_VALUE.
5521 SIZE is the size in bytes of the union.
5522 MODE is the argument's machine mode.
5523 REGNO is the hard register the union will be passed in. */
5526 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5529 int nwords = ROUND_ADVANCE (size), i;
5532 /* See comment in previous function for empty structures. */
5534 return gen_rtx_REG (mode, regno);
5536 if (slotno == SPARC_INT_ARG_MAX - 1)
5539 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5541 for (i = 0; i < nwords; i++)
5543 /* Unions are passed left-justified. */
5544 XVECEXP (regs, 0, i)
5545 = gen_rtx_EXPR_LIST (VOIDmode,
5546 gen_rtx_REG (word_mode, regno),
5547 GEN_INT (UNITS_PER_WORD * i));
5554 /* Used by function_arg and sparc_function_value_1 to implement the conventions
5555 for passing and returning large (BLKmode) vectors.
5556 Return an expression valid as a return value for the FUNCTION_ARG
5557 and TARGET_FUNCTION_VALUE.
5559 SIZE is the size in bytes of the vector (at least 8 bytes).
5560 REGNO is the FP hard register the vector will be passed in. */
5563 function_arg_vector_value (int size, int regno)
5565 int i, nregs = size / 8;
5568 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5570 for (i = 0; i < nregs; i++)
5572 XVECEXP (regs, 0, i)
5573 = gen_rtx_EXPR_LIST (VOIDmode,
5574 gen_rtx_REG (DImode, regno + 2*i),
5581 /* Determine where to put an argument to a function.
5582 Value is zero to push the argument on the stack,
5583 or a hard register in which to store the argument.
5585 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5586 the preceding args and about the function being called.
5587 MODE is the argument's machine mode.
5588 TYPE is the data type of the argument (as a tree).
5589 This is null for libcalls where that information may
5591 NAMED is true if this argument is a named parameter
5592 (otherwise it is an extra parameter matching an ellipsis).
5593 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
5594 TARGET_FUNCTION_INCOMING_ARG. */
5597 sparc_function_arg_1 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
5598 const_tree type, bool named, bool incoming_p)
5600 int regbase = (incoming_p
5601 ? SPARC_INCOMING_INT_ARG_FIRST
5602 : SPARC_OUTGOING_INT_ARG_FIRST);
5603 int slotno, regno, padding;
5604 enum mode_class mclass = GET_MODE_CLASS (mode);
5606 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5611 /* Vector types deserve special treatment because they are polymorphic wrt
5612 their mode, depending upon whether VIS instructions are enabled. */
5613 if (type && TREE_CODE (type) == VECTOR_TYPE)
5615 HOST_WIDE_INT size = int_size_in_bytes (type);
5616 gcc_assert ((TARGET_ARCH32 && size <= 8)
5617 || (TARGET_ARCH64 && size <= 16));
5619 if (mode == BLKmode)
5620 return function_arg_vector_value (size,
5621 SPARC_FP_ARG_FIRST + 2*slotno);
5623 mclass = MODE_FLOAT;
5627 return gen_rtx_REG (mode, regno);
5629 /* Structures up to 16 bytes in size are passed in arg slots on the stack
5630 and are promoted to registers if possible. */
5631 if (type && TREE_CODE (type) == RECORD_TYPE)
5633 HOST_WIDE_INT size = int_size_in_bytes (type);
5634 gcc_assert (size <= 16);
5636 return function_arg_record_value (type, mode, slotno, named, regbase);
5639 /* Unions up to 16 bytes in size are passed in integer registers. */
5640 else if (type && TREE_CODE (type) == UNION_TYPE)
5642 HOST_WIDE_INT size = int_size_in_bytes (type);
5643 gcc_assert (size <= 16);
5645 return function_arg_union_value (size, mode, slotno, regno);
5648 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5649 but also have the slot allocated for them.
5650 If no prototype is in scope fp values in register slots get passed
5651 in two places, either fp regs and int regs or fp regs and memory. */
5652 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5653 && SPARC_FP_REG_P (regno))
5655 rtx reg = gen_rtx_REG (mode, regno);
5656 if (cum->prototype_p || cum->libcall_p)
5658 /* "* 2" because fp reg numbers are recorded in 4 byte
5661 /* ??? This will cause the value to be passed in the fp reg and
5662 in the stack. When a prototype exists we want to pass the
5663 value in the reg but reserve space on the stack. That's an
5664 optimization, and is deferred [for a bit]. */
5665 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5666 return gen_rtx_PARALLEL (mode,
5668 gen_rtx_EXPR_LIST (VOIDmode,
5669 NULL_RTX, const0_rtx),
5670 gen_rtx_EXPR_LIST (VOIDmode,
5674 /* ??? It seems that passing back a register even when past
5675 the area declared by REG_PARM_STACK_SPACE will allocate
5676 space appropriately, and will not copy the data onto the
5677 stack, exactly as we desire.
5679 This is due to locate_and_pad_parm being called in
5680 expand_call whenever reg_parm_stack_space > 0, which
5681 while beneficial to our example here, would seem to be
5682 in error from what had been intended. Ho hum... -- r~ */
5690 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5694 /* On incoming, we don't need to know that the value
5695 is passed in %f0 and %i0, and it confuses other parts
5696 causing needless spillage even on the simplest cases. */
5700 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5701 + (regno - SPARC_FP_ARG_FIRST) / 2);
5703 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5704 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5706 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5710 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5711 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5712 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5717 /* All other aggregate types are passed in an integer register in a mode
5718 corresponding to the size of the type. */
5719 else if (type && AGGREGATE_TYPE_P (type))
5721 HOST_WIDE_INT size = int_size_in_bytes (type);
5722 gcc_assert (size <= 16);
5724 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5727 return gen_rtx_REG (mode, regno);
5730 /* Handle the TARGET_FUNCTION_ARG target hook. */
5733 sparc_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5734 const_tree type, bool named)
5736 return sparc_function_arg_1 (cum, mode, type, named, false);
5739 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
5742 sparc_function_incoming_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5743 const_tree type, bool named)
5745 return sparc_function_arg_1 (cum, mode, type, named, true);
5748 /* For an arg passed partly in registers and partly in memory,
5749 this is the number of bytes of registers used.
5750 For args passed entirely in registers or entirely in memory, zero.
5752 Any arg that starts in the first 6 regs but won't entirely fit in them
5753 needs partial registers on v8. On v9, structures with integer
5754 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5755 values that begin in the last fp reg [where "last fp reg" varies with the
5756 mode] will be split between that reg and memory. */
5759 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5760 tree type, bool named)
5762 int slotno, regno, padding;
5764 /* We pass false for incoming_p here, it doesn't matter. */
5765 slotno = function_arg_slotno (cum, mode, type, named, false,
5773 if ((slotno + (mode == BLKmode
5774 ? ROUND_ADVANCE (int_size_in_bytes (type))
5775 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5776 > SPARC_INT_ARG_MAX)
5777 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5781 /* We are guaranteed by pass_by_reference that the size of the
5782 argument is not greater than 16 bytes, so we only need to return
5783 one word if the argument is partially passed in registers. */
5785 if (type && AGGREGATE_TYPE_P (type))
5787 int size = int_size_in_bytes (type);
5789 if (size > UNITS_PER_WORD
5790 && slotno == SPARC_INT_ARG_MAX - 1)
5791 return UNITS_PER_WORD;
5793 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5794 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5795 && ! (TARGET_FPU && named)))
5797 /* The complex types are passed as packed types. */
5798 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5799 && slotno == SPARC_INT_ARG_MAX - 1)
5800 return UNITS_PER_WORD;
5802 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5804 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5806 return UNITS_PER_WORD;
5813 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5814 Specify whether to pass the argument by reference. */
5817 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5818 enum machine_mode mode, const_tree type,
5819 bool named ATTRIBUTE_UNUSED)
5822 /* Original SPARC 32-bit ABI says that structures and unions,
5823 and quad-precision floats are passed by reference. For Pascal,
5824 also pass arrays by reference. All other base types are passed
5827 Extended ABI (as implemented by the Sun compiler) says that all
5828 complex floats are passed by reference. Pass complex integers
5829 in registers up to 8 bytes. More generally, enforce the 2-word
5830 cap for passing arguments in registers.
5832 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5833 integers are passed like floats of the same size, that is in
5834 registers up to 8 bytes. Pass all vector floats by reference
5835 like structure and unions. */
5836 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5838 /* Catch CDImode, TFmode, DCmode and TCmode. */
5839 || GET_MODE_SIZE (mode) > 8
5841 && TREE_CODE (type) == VECTOR_TYPE
5842 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5844 /* Original SPARC 64-bit ABI says that structures and unions
5845 smaller than 16 bytes are passed in registers, as well as
5846 all other base types.
5848 Extended ABI (as implemented by the Sun compiler) says that
5849 complex floats are passed in registers up to 16 bytes. Pass
5850 all complex integers in registers up to 16 bytes. More generally,
5851 enforce the 2-word cap for passing arguments in registers.
5853 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5854 integers are passed like floats of the same size, that is in
5855 registers (up to 16 bytes). Pass all vector floats like structure
5858 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5859 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5860 /* Catch CTImode and TCmode. */
5861 || GET_MODE_SIZE (mode) > 16);
5864 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
5865 Update the data in CUM to advance over an argument
5866 of mode MODE and data type TYPE.
5867 TYPE is null for libcalls where that information may not be available. */
5870 sparc_function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5871 const_tree type, bool named)
5875 /* We pass false for incoming_p here, it doesn't matter. */
5876 function_arg_slotno (cum, mode, type, named, false, ®no, &padding);
5878 /* If argument requires leading padding, add it. */
5879 cum->words += padding;
5883 cum->words += (mode != BLKmode
5884 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5885 : ROUND_ADVANCE (int_size_in_bytes (type)));
5889 if (type && AGGREGATE_TYPE_P (type))
5891 int size = int_size_in_bytes (type);
5895 else if (size <= 16)
5897 else /* passed by reference */
5902 cum->words += (mode != BLKmode
5903 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5904 : ROUND_ADVANCE (int_size_in_bytes (type)));
5909 /* Handle the FUNCTION_ARG_PADDING macro.
5910 For the 64 bit ABI structs are always stored left shifted in their
5914 function_arg_padding (enum machine_mode mode, const_tree type)
5916 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5919 /* Fall back to the default. */
5920 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5923 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5924 Specify whether to return the return value in memory. */
5927 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5930 /* Original SPARC 32-bit ABI says that structures and unions,
5931 and quad-precision floats are returned in memory. All other
5932 base types are returned in registers.
5934 Extended ABI (as implemented by the Sun compiler) says that
5935 all complex floats are returned in registers (8 FP registers
5936 at most for '_Complex long double'). Return all complex integers
5937 in registers (4 at most for '_Complex long long').
5939 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5940 integers are returned like floats of the same size, that is in
5941 registers up to 8 bytes and in memory otherwise. Return all
5942 vector floats in memory like structure and unions; note that
5943 they always have BLKmode like the latter. */
5944 return (TYPE_MODE (type) == BLKmode
5945 || TYPE_MODE (type) == TFmode
5946 || (TREE_CODE (type) == VECTOR_TYPE
5947 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5949 /* Original SPARC 64-bit ABI says that structures and unions
5950 smaller than 32 bytes are returned in registers, as well as
5951 all other base types.
5953 Extended ABI (as implemented by the Sun compiler) says that all
5954 complex floats are returned in registers (8 FP registers at most
5955 for '_Complex long double'). Return all complex integers in
5956 registers (4 at most for '_Complex TItype').
5958 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5959 integers are returned like floats of the same size, that is in
5960 registers. Return all vector floats like structure and unions;
5961 note that they always have BLKmode like the latter. */
5962 return ((TYPE_MODE (type) == BLKmode
5963 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5966 /* Handle the TARGET_STRUCT_VALUE target hook.
5967 Return where to find the structure return value address. */
5970 sparc_struct_value_rtx (tree fndecl, int incoming)
5979 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5980 STRUCT_VALUE_OFFSET));
5982 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5983 STRUCT_VALUE_OFFSET));
5985 /* Only follow the SPARC ABI for fixed-size structure returns.
5986 Variable size structure returns are handled per the normal
5987 procedures in GCC. This is enabled by -mstd-struct-return */
5989 && sparc_std_struct_return
5990 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
5991 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
5993 /* We must check and adjust the return address, as it is
5994 optional as to whether the return object is really
5996 rtx ret_rtx = gen_rtx_REG (Pmode, 31);
5997 rtx scratch = gen_reg_rtx (SImode);
5998 rtx endlab = gen_label_rtx ();
6000 /* Calculate the return object size */
6001 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
6002 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
6003 /* Construct a temporary return value */
6004 rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
6006 /* Implement SPARC 32-bit psABI callee returns struck checking
6009 Fetch the instruction where we will return to and see if
6010 it's an unimp instruction (the most significant 10 bits
6012 emit_move_insn (scratch, gen_rtx_MEM (SImode,
6013 plus_constant (ret_rtx, 8)));
6014 /* Assume the size is valid and pre-adjust */
6015 emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
6016 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
6017 emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
6018 /* Assign stack temp:
6019 Write the address of the memory pointed to by temp_val into
6020 the memory pointed to by mem */
6021 emit_move_insn (mem, XEXP (temp_val, 0));
6022 emit_label (endlab);
6025 set_mem_alias_set (mem, struct_value_alias_set);
6030 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
6031 For v9, function return values are subject to the same rules as arguments,
6032 except that up to 32 bytes may be returned in registers. */
6035 sparc_function_value_1 (const_tree type, enum machine_mode mode,
6038 /* Beware that the two values are swapped here wrt function_arg. */
6039 int regbase = (outgoing
6040 ? SPARC_INCOMING_INT_ARG_FIRST
6041 : SPARC_OUTGOING_INT_ARG_FIRST);
6042 enum mode_class mclass = GET_MODE_CLASS (mode);
6045 /* Vector types deserve special treatment because they are polymorphic wrt
6046 their mode, depending upon whether VIS instructions are enabled. */
6047 if (type && TREE_CODE (type) == VECTOR_TYPE)
6049 HOST_WIDE_INT size = int_size_in_bytes (type);
6050 gcc_assert ((TARGET_ARCH32 && size <= 8)
6051 || (TARGET_ARCH64 && size <= 32));
6053 if (mode == BLKmode)
6054 return function_arg_vector_value (size,
6055 SPARC_FP_ARG_FIRST);
6057 mclass = MODE_FLOAT;
6060 if (TARGET_ARCH64 && type)
6062 /* Structures up to 32 bytes in size are returned in registers. */
6063 if (TREE_CODE (type) == RECORD_TYPE)
6065 HOST_WIDE_INT size = int_size_in_bytes (type);
6066 gcc_assert (size <= 32);
6068 return function_arg_record_value (type, mode, 0, 1, regbase);
6071 /* Unions up to 32 bytes in size are returned in integer registers. */
6072 else if (TREE_CODE (type) == UNION_TYPE)
6074 HOST_WIDE_INT size = int_size_in_bytes (type);
6075 gcc_assert (size <= 32);
6077 return function_arg_union_value (size, mode, 0, regbase);
6080 /* Objects that require it are returned in FP registers. */
6081 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6084 /* All other aggregate types are returned in an integer register in a
6085 mode corresponding to the size of the type. */
6086 else if (AGGREGATE_TYPE_P (type))
6088 /* All other aggregate types are passed in an integer register
6089 in a mode corresponding to the size of the type. */
6090 HOST_WIDE_INT size = int_size_in_bytes (type);
6091 gcc_assert (size <= 32);
6093 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6095 /* ??? We probably should have made the same ABI change in
6096 3.4.0 as the one we made for unions. The latter was
6097 required by the SCD though, while the former is not
6098 specified, so we favored compatibility and efficiency.
6100 Now we're stuck for aggregates larger than 16 bytes,
6101 because OImode vanished in the meantime. Let's not
6102 try to be unduly clever, and simply follow the ABI
6103 for unions in that case. */
6104 if (mode == BLKmode)
6105 return function_arg_union_value (size, mode, 0, regbase);
6110 /* This must match sparc_promote_function_mode.
6111 ??? Maybe 32-bit pointers should actually remain in Pmode? */
6112 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6116 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
6117 regno = SPARC_FP_ARG_FIRST;
6121 return gen_rtx_REG (mode, regno);
6124 /* Handle TARGET_FUNCTION_VALUE.
6126 On SPARC the value is found in the first "output" register, but the called
6127 function leaves it in the first "input" register. */
6130 sparc_function_value (const_tree valtype,
6131 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
6134 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
6137 /* Handle TARGET_LIBCALL_VALUE. */
6140 sparc_libcall_value (enum machine_mode mode,
6141 const_rtx fun ATTRIBUTE_UNUSED)
6143 return sparc_function_value_1 (NULL_TREE, mode, false);
6146 /* Handle FUNCTION_VALUE_REGNO_P.
6147 On SPARC, the first "output" reg is used for integer values, and
6148 the first floating point register is used for floating point values. */
6151 sparc_function_value_regno_p (const unsigned int regno)
6153 return (regno == 8 || regno == 32);
6156 /* Do what is necessary for `va_start'. We look at the current function
6157 to determine if stdarg or varargs is used and return the address of
6158 the first unnamed parameter. */
6161 sparc_builtin_saveregs (void)
6163 int first_reg = crtl->args.info.words;
6167 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
6168 emit_move_insn (gen_rtx_MEM (word_mode,
6169 gen_rtx_PLUS (Pmode,
6171 GEN_INT (FIRST_PARM_OFFSET (0)
6174 gen_rtx_REG (word_mode,
6175 SPARC_INCOMING_INT_ARG_FIRST + regno));
6177 address = gen_rtx_PLUS (Pmode,
6179 GEN_INT (FIRST_PARM_OFFSET (0)
6180 + UNITS_PER_WORD * first_reg));
6185 /* Implement `va_start' for stdarg. */
6188 sparc_va_start (tree valist, rtx nextarg)
6190 nextarg = expand_builtin_saveregs ();
6191 std_expand_builtin_va_start (valist, nextarg);
6194 /* Implement `va_arg' for stdarg. */
6197 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6200 HOST_WIDE_INT size, rsize, align;
6203 tree ptrtype = build_pointer_type (type);
6205 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
6208 size = rsize = UNITS_PER_WORD;
6214 size = int_size_in_bytes (type);
6215 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
6220 /* For SPARC64, objects requiring 16-byte alignment get it. */
6221 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
6222 align = 2 * UNITS_PER_WORD;
6224 /* SPARC-V9 ABI states that structures up to 16 bytes in size
6225 are left-justified in their slots. */
6226 if (AGGREGATE_TYPE_P (type))
6229 size = rsize = UNITS_PER_WORD;
6239 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
6240 size_int (align - 1));
6241 incr = fold_convert (sizetype, incr);
6242 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
6244 incr = fold_convert (ptr_type_node, incr);
6247 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
6250 if (BYTES_BIG_ENDIAN && size < rsize)
6251 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
6252 size_int (rsize - size));
6256 addr = fold_convert (build_pointer_type (ptrtype), addr);
6257 addr = build_va_arg_indirect_ref (addr);
6260 /* If the address isn't aligned properly for the type, we need a temporary.
6261 FIXME: This is inefficient, usually we can do this in registers. */
6262 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
6264 tree tmp = create_tmp_var (type, "va_arg_tmp");
6265 tree dest_addr = build_fold_addr_expr (tmp);
6266 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
6267 3, dest_addr, addr, size_int (rsize));
6268 TREE_ADDRESSABLE (tmp) = 1;
6269 gimplify_and_add (copy, pre_p);
6274 addr = fold_convert (ptrtype, addr);
6277 = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
6278 gimplify_assign (valist, incr, post_p);
6280 return build_va_arg_indirect_ref (addr);
6283 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
6284 Specify whether the vector mode is supported by the hardware. */
6287 sparc_vector_mode_supported_p (enum machine_mode mode)
6289 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
6292 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
6294 static enum machine_mode
6295 sparc_preferred_simd_mode (enum machine_mode mode)
6313 /* Return the string to output an unconditional branch to LABEL, which is
6314 the operand number of the label.
6316 DEST is the destination insn (i.e. the label), INSN is the source. */
6319 output_ubranch (rtx dest, int label, rtx insn)
6321 static char string[64];
6322 bool v9_form = false;
6325 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
6327 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6328 - INSN_ADDRESSES (INSN_UID (insn)));
6329 /* Leave some instructions for "slop". */
6330 if (delta >= -260000 && delta < 260000)
6335 strcpy (string, "ba%*,pt\t%%xcc, ");
6337 strcpy (string, "b%*\t");
6339 p = strchr (string, '\0');
6350 /* Return the string to output a conditional branch to LABEL, which is
6351 the operand number of the label. OP is the conditional expression.
6352 XEXP (OP, 0) is assumed to be a condition code register (integer or
6353 floating point) and its mode specifies what kind of comparison we made.
6355 DEST is the destination insn (i.e. the label), INSN is the source.
6357 REVERSED is nonzero if we should reverse the sense of the comparison.
6359 ANNUL is nonzero if we should generate an annulling branch. */
6362 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
6365 static char string[64];
6366 enum rtx_code code = GET_CODE (op);
6367 rtx cc_reg = XEXP (op, 0);
6368 enum machine_mode mode = GET_MODE (cc_reg);
6369 const char *labelno, *branch;
6370 int spaces = 8, far;
6373 /* v9 branches are limited to +-1MB. If it is too far away,
6386 fbne,a,pn %fcc2, .LC29
6394 far = TARGET_V9 && (get_attr_length (insn) >= 3);
6397 /* Reversal of FP compares takes care -- an ordered compare
6398 becomes an unordered compare and vice versa. */
6399 if (mode == CCFPmode || mode == CCFPEmode)
6400 code = reverse_condition_maybe_unordered (code);
6402 code = reverse_condition (code);
6405 /* Start by writing the branch condition. */
6406 if (mode == CCFPmode || mode == CCFPEmode)
6457 /* ??? !v9: FP branches cannot be preceded by another floating point
6458 insn. Because there is currently no concept of pre-delay slots,
6459 we can fix this only by always emitting a nop before a floating
6464 strcpy (string, "nop\n\t");
6465 strcat (string, branch);
6478 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6490 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6511 strcpy (string, branch);
6513 spaces -= strlen (branch);
6514 p = strchr (string, '\0');
6516 /* Now add the annulling, the label, and a possible noop. */
6529 if (! far && insn && INSN_ADDRESSES_SET_P ())
6531 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6532 - INSN_ADDRESSES (INSN_UID (insn)));
6533 /* Leave some instructions for "slop". */
6534 if (delta < -260000 || delta >= 260000)
6538 if (mode == CCFPmode || mode == CCFPEmode)
6540 static char v9_fcc_labelno[] = "%%fccX, ";
6541 /* Set the char indicating the number of the fcc reg to use. */
6542 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6543 labelno = v9_fcc_labelno;
6546 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6550 else if (mode == CCXmode || mode == CCX_NOOVmode)
6552 labelno = "%%xcc, ";
6557 labelno = "%%icc, ";
6562 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6565 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6578 strcpy (p, labelno);
6579 p = strchr (p, '\0');
6582 strcpy (p, ".+12\n\t nop\n\tb\t");
6583 /* Skip the next insn if requested or
6584 if we know that it will be a nop. */
6585 if (annul || ! final_sequence)
6599 /* Emit a library call comparison between floating point X and Y.
6600 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
6601 Return the new operator to be used in the comparison sequence.
6603 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6604 values as arguments instead of the TFmode registers themselves,
6605 that's why we cannot call emit_float_lib_cmp. */
6608 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6611 rtx slot0, slot1, result, tem, tem2, libfunc;
6612 enum machine_mode mode;
6613 enum rtx_code new_comparison;
6618 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
6622 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
6626 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
6630 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
6634 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
6638 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
6649 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
6662 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6663 emit_move_insn (slot0, x);
6670 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6671 emit_move_insn (slot1, y);
6674 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6675 emit_library_call (libfunc, LCT_NORMAL,
6677 XEXP (slot0, 0), Pmode,
6678 XEXP (slot1, 0), Pmode);
6683 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6684 emit_library_call (libfunc, LCT_NORMAL,
6686 x, TFmode, y, TFmode);
6691 /* Immediately move the result of the libcall into a pseudo
6692 register so reload doesn't clobber the value if it needs
6693 the return register for a spill reg. */
6694 result = gen_reg_rtx (mode);
6695 emit_move_insn (result, hard_libcall_value (mode, libfunc));
6700 return gen_rtx_NE (VOIDmode, result, const0_rtx);
6703 new_comparison = (comparison == UNORDERED ? EQ : NE);
6704 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
6707 new_comparison = (comparison == UNGT ? GT : NE);
6708 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
6710 return gen_rtx_NE (VOIDmode, result, const2_rtx);
6712 tem = gen_reg_rtx (mode);
6714 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6716 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6717 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
6720 tem = gen_reg_rtx (mode);
6722 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6724 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6725 tem2 = gen_reg_rtx (mode);
6727 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6729 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6730 new_comparison = (comparison == UNEQ ? EQ : NE);
6731 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
6737 /* Generate an unsigned DImode to FP conversion. This is the same code
6738 optabs would emit if we didn't have TFmode patterns. */
6741 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6743 rtx neglab, donelab, i0, i1, f0, in, out;
6746 in = force_reg (DImode, operands[1]);
6747 neglab = gen_label_rtx ();
6748 donelab = gen_label_rtx ();
6749 i0 = gen_reg_rtx (DImode);
6750 i1 = gen_reg_rtx (DImode);
6751 f0 = gen_reg_rtx (mode);
6753 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6755 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6756 emit_jump_insn (gen_jump (donelab));
6759 emit_label (neglab);
6761 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6762 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6763 emit_insn (gen_iordi3 (i0, i0, i1));
6764 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6765 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6767 emit_label (donelab);
6770 /* Generate an FP to unsigned DImode conversion. This is the same code
6771 optabs would emit if we didn't have TFmode patterns. */
6774 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6776 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6779 in = force_reg (mode, operands[1]);
6780 neglab = gen_label_rtx ();
6781 donelab = gen_label_rtx ();
6782 i0 = gen_reg_rtx (DImode);
6783 i1 = gen_reg_rtx (DImode);
6784 limit = gen_reg_rtx (mode);
6785 f0 = gen_reg_rtx (mode);
6787 emit_move_insn (limit,
6788 CONST_DOUBLE_FROM_REAL_VALUE (
6789 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6790 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6792 emit_insn (gen_rtx_SET (VOIDmode,
6794 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6795 emit_jump_insn (gen_jump (donelab));
6798 emit_label (neglab);
6800 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6801 emit_insn (gen_rtx_SET (VOIDmode,
6803 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6804 emit_insn (gen_movdi (i1, const1_rtx));
6805 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6806 emit_insn (gen_xordi3 (out, i0, i1));
6808 emit_label (donelab);
6811 /* Return the string to output a conditional branch to LABEL, testing
6812 register REG. LABEL is the operand number of the label; REG is the
6813 operand number of the reg. OP is the conditional expression. The mode
6814 of REG says what kind of comparison we made.
6816 DEST is the destination insn (i.e. the label), INSN is the source.
6818 REVERSED is nonzero if we should reverse the sense of the comparison.
6820 ANNUL is nonzero if we should generate an annulling branch. */
6823 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6824 int annul, rtx insn)
6826 static char string[64];
6827 enum rtx_code code = GET_CODE (op);
6828 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6833 /* branch on register are limited to +-128KB. If it is too far away,
6846 brgez,a,pn %o1, .LC29
6852 ba,pt %xcc, .LC29 */
6854 far = get_attr_length (insn) >= 3;
6856 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6858 code = reverse_condition (code);
6860 /* Only 64 bit versions of these instructions exist. */
6861 gcc_assert (mode == DImode);
6863 /* Start by writing the branch condition. */
6868 strcpy (string, "brnz");
6872 strcpy (string, "brz");
6876 strcpy (string, "brgez");
6880 strcpy (string, "brlz");
6884 strcpy (string, "brlez");
6888 strcpy (string, "brgz");
6895 p = strchr (string, '\0');
6897 /* Now add the annulling, reg, label, and nop. */
6904 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6907 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6912 *p = p < string + 8 ? '\t' : ' ';
6920 int veryfar = 1, delta;
6922 if (INSN_ADDRESSES_SET_P ())
6924 delta = (INSN_ADDRESSES (INSN_UID (dest))
6925 - INSN_ADDRESSES (INSN_UID (insn)));
6926 /* Leave some instructions for "slop". */
6927 if (delta >= -260000 && delta < 260000)
6931 strcpy (p, ".+12\n\t nop\n\t");
6932 /* Skip the next insn if requested or
6933 if we know that it will be a nop. */
6934 if (annul || ! final_sequence)
6944 strcpy (p, "ba,pt\t%%xcc, ");
6958 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6959 Such instructions cannot be used in the delay slot of return insn on v9.
6960 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6964 epilogue_renumber (register rtx *where, int test)
6966 register const char *fmt;
6968 register enum rtx_code code;
6973 code = GET_CODE (*where);
6978 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6980 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6981 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6989 /* Do not replace the frame pointer with the stack pointer because
6990 it can cause the delayed instruction to load below the stack.
6991 This occurs when instructions like:
6993 (set (reg/i:SI 24 %i0)
6994 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6995 (const_int -20 [0xffffffec])) 0))
6997 are in the return delayed slot. */
6999 if (GET_CODE (XEXP (*where, 0)) == REG
7000 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
7001 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
7002 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
7007 if (SPARC_STACK_BIAS
7008 && GET_CODE (XEXP (*where, 0)) == REG
7009 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
7017 fmt = GET_RTX_FORMAT (code);
7019 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7024 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
7025 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
7028 else if (fmt[i] == 'e'
7029 && epilogue_renumber (&(XEXP (*where, i)), test))
7035 /* Leaf functions and non-leaf functions have different needs. */
7038 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
7041 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
7043 static const int *const reg_alloc_orders[] = {
7044 reg_leaf_alloc_order,
7045 reg_nonleaf_alloc_order};
7048 order_regs_for_local_alloc (void)
7050 static int last_order_nonleaf = 1;
7052 if (df_regs_ever_live_p (15) != last_order_nonleaf)
7054 last_order_nonleaf = !last_order_nonleaf;
7055 memcpy ((char *) reg_alloc_order,
7056 (const char *) reg_alloc_orders[last_order_nonleaf],
7057 FIRST_PSEUDO_REGISTER * sizeof (int));
7061 /* Return 1 if REG and MEM are legitimate enough to allow the various
7062 mem<-->reg splits to be run. */
7065 sparc_splitdi_legitimate (rtx reg, rtx mem)
7067 /* Punt if we are here by mistake. */
7068 gcc_assert (reload_completed);
7070 /* We must have an offsettable memory reference. */
7071 if (! offsettable_memref_p (mem))
7074 /* If we have legitimate args for ldd/std, we do not want
7075 the split to happen. */
7076 if ((REGNO (reg) % 2) == 0
7077 && mem_min_alignment (mem, 8))
7084 /* Return 1 if x and y are some kind of REG and they refer to
7085 different hard registers. This test is guaranteed to be
7086 run after reload. */
7089 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
7091 if (GET_CODE (x) != REG)
7093 if (GET_CODE (y) != REG)
7095 if (REGNO (x) == REGNO (y))
7100 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
7101 This makes them candidates for using ldd and std insns.
7103 Note reg1 and reg2 *must* be hard registers. */
7106 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
7108 /* We might have been passed a SUBREG. */
7109 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
7112 if (REGNO (reg1) % 2 != 0)
7115 /* Integer ldd is deprecated in SPARC V9 */
7116 if (TARGET_V9 && REGNO (reg1) < 32)
7119 return (REGNO (reg1) == REGNO (reg2) - 1);
7122 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
7125 This can only happen when addr1 and addr2, the addresses in mem1
7126 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
7127 addr1 must also be aligned on a 64-bit boundary.
7129 Also iff dependent_reg_rtx is not null it should not be used to
7130 compute the address for mem1, i.e. we cannot optimize a sequence
7142 But, note that the transformation from:
7147 is perfectly fine. Thus, the peephole2 patterns always pass us
7148 the destination register of the first load, never the second one.
7150 For stores we don't have a similar problem, so dependent_reg_rtx is
7154 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
7158 HOST_WIDE_INT offset1;
7160 /* The mems cannot be volatile. */
7161 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
7164 /* MEM1 should be aligned on a 64-bit boundary. */
7165 if (MEM_ALIGN (mem1) < 64)
7168 addr1 = XEXP (mem1, 0);
7169 addr2 = XEXP (mem2, 0);
7171 /* Extract a register number and offset (if used) from the first addr. */
7172 if (GET_CODE (addr1) == PLUS)
7174 /* If not a REG, return zero. */
7175 if (GET_CODE (XEXP (addr1, 0)) != REG)
7179 reg1 = REGNO (XEXP (addr1, 0));
7180 /* The offset must be constant! */
7181 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
7183 offset1 = INTVAL (XEXP (addr1, 1));
7186 else if (GET_CODE (addr1) != REG)
7190 reg1 = REGNO (addr1);
7191 /* This was a simple (mem (reg)) expression. Offset is 0. */
7195 /* Make sure the second address is a (mem (plus (reg) (const_int). */
7196 if (GET_CODE (addr2) != PLUS)
7199 if (GET_CODE (XEXP (addr2, 0)) != REG
7200 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
7203 if (reg1 != REGNO (XEXP (addr2, 0)))
7206 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
7209 /* The first offset must be evenly divisible by 8 to ensure the
7210 address is 64 bit aligned. */
7211 if (offset1 % 8 != 0)
7214 /* The offset for the second addr must be 4 more than the first addr. */
7215 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
7218 /* All the tests passed. addr1 and addr2 are valid for ldd and std
7223 /* Return 1 if reg is a pseudo, or is the first register in
7224 a hard register pair. This makes it suitable for use in
7225 ldd and std insns. */
7228 register_ok_for_ldd (rtx reg)
7230 /* We might have been passed a SUBREG. */
7234 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
7235 return (REGNO (reg) % 2 == 0);
7240 /* Return 1 if OP is a memory whose address is known to be
7241 aligned to 8-byte boundary, or a pseudo during reload.
7242 This makes it suitable for use in ldd and std insns. */
7245 memory_ok_for_ldd (rtx op)
7249 /* In 64-bit mode, we assume that the address is word-aligned. */
7250 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
7253 if ((reload_in_progress || reload_completed)
7254 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
7257 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
7259 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
7268 /* Print operand X (an rtx) in assembler syntax to file FILE.
7269 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
7270 For `%' followed by punctuation, CODE is the punctuation and X is null. */
7273 print_operand (FILE *file, rtx x, int code)
7278 /* Output an insn in a delay slot. */
7280 sparc_indent_opcode = 1;
7282 fputs ("\n\t nop", file);
7285 /* Output an annul flag if there's nothing for the delay slot and we
7286 are optimizing. This is always used with '(' below.
7287 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
7288 this is a dbx bug. So, we only do this when optimizing.
7289 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
7290 Always emit a nop in case the next instruction is a branch. */
7291 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
7295 /* Output a 'nop' if there's nothing for the delay slot and we are
7296 not optimizing. This is always used with '*' above. */
7297 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
7298 fputs ("\n\t nop", file);
7299 else if (final_sequence)
7300 sparc_indent_opcode = 1;
7303 /* Output the right displacement from the saved PC on function return.
7304 The caller may have placed an "unimp" insn immediately after the call
7305 so we have to account for it. This insn is used in the 32-bit ABI
7306 when calling a function that returns a non zero-sized structure. The
7307 64-bit ABI doesn't have it. Be careful to have this test be the same
7308 as that for the call. The exception is when sparc_std_struct_return
7309 is enabled, the psABI is followed exactly and the adjustment is made
7310 by the code in sparc_struct_value_rtx. The call emitted is the same
7311 when sparc_std_struct_return is enabled. */
7313 && cfun->returns_struct
7314 && !sparc_std_struct_return
7315 && DECL_SIZE (DECL_RESULT (current_function_decl))
7316 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
7318 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
7324 /* Output the Embedded Medium/Anywhere code model base register. */
7325 fputs (EMBMEDANY_BASE_REG, file);
7328 /* Print some local dynamic TLS name. */
7329 assemble_name (file, get_some_local_dynamic_name ());
7333 /* Adjust the operand to take into account a RESTORE operation. */
7334 if (GET_CODE (x) == CONST_INT)
7336 else if (GET_CODE (x) != REG)
7337 output_operand_lossage ("invalid %%Y operand");
7338 else if (REGNO (x) < 8)
7339 fputs (reg_names[REGNO (x)], file);
7340 else if (REGNO (x) >= 24 && REGNO (x) < 32)
7341 fputs (reg_names[REGNO (x)-16], file);
7343 output_operand_lossage ("invalid %%Y operand");
7346 /* Print out the low order register name of a register pair. */
7347 if (WORDS_BIG_ENDIAN)
7348 fputs (reg_names[REGNO (x)+1], file);
7350 fputs (reg_names[REGNO (x)], file);
7353 /* Print out the high order register name of a register pair. */
7354 if (WORDS_BIG_ENDIAN)
7355 fputs (reg_names[REGNO (x)], file);
7357 fputs (reg_names[REGNO (x)+1], file);
7360 /* Print out the second register name of a register pair or quad.
7361 I.e., R (%o0) => %o1. */
7362 fputs (reg_names[REGNO (x)+1], file);
7365 /* Print out the third register name of a register quad.
7366 I.e., S (%o0) => %o2. */
7367 fputs (reg_names[REGNO (x)+2], file);
7370 /* Print out the fourth register name of a register quad.
7371 I.e., T (%o0) => %o3. */
7372 fputs (reg_names[REGNO (x)+3], file);
7375 /* Print a condition code register. */
7376 if (REGNO (x) == SPARC_ICC_REG)
7378 /* We don't handle CC[X]_NOOVmode because they're not supposed
7380 if (GET_MODE (x) == CCmode)
7381 fputs ("%icc", file);
7382 else if (GET_MODE (x) == CCXmode)
7383 fputs ("%xcc", file);
7388 /* %fccN register */
7389 fputs (reg_names[REGNO (x)], file);
7392 /* Print the operand's address only. */
7393 output_address (XEXP (x, 0));
7396 /* In this case we need a register. Use %g0 if the
7397 operand is const0_rtx. */
7399 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
7401 fputs ("%g0", file);
7408 switch (GET_CODE (x))
7410 case IOR: fputs ("or", file); break;
7411 case AND: fputs ("and", file); break;
7412 case XOR: fputs ("xor", file); break;
7413 default: output_operand_lossage ("invalid %%A operand");
7418 switch (GET_CODE (x))
7420 case IOR: fputs ("orn", file); break;
7421 case AND: fputs ("andn", file); break;
7422 case XOR: fputs ("xnor", file); break;
7423 default: output_operand_lossage ("invalid %%B operand");
7427 /* These are used by the conditional move instructions. */
7431 enum rtx_code rc = GET_CODE (x);
7435 enum machine_mode mode = GET_MODE (XEXP (x, 0));
7436 if (mode == CCFPmode || mode == CCFPEmode)
7437 rc = reverse_condition_maybe_unordered (GET_CODE (x));
7439 rc = reverse_condition (GET_CODE (x));
7443 case NE: fputs ("ne", file); break;
7444 case EQ: fputs ("e", file); break;
7445 case GE: fputs ("ge", file); break;
7446 case GT: fputs ("g", file); break;
7447 case LE: fputs ("le", file); break;
7448 case LT: fputs ("l", file); break;
7449 case GEU: fputs ("geu", file); break;
7450 case GTU: fputs ("gu", file); break;
7451 case LEU: fputs ("leu", file); break;
7452 case LTU: fputs ("lu", file); break;
7453 case LTGT: fputs ("lg", file); break;
7454 case UNORDERED: fputs ("u", file); break;
7455 case ORDERED: fputs ("o", file); break;
7456 case UNLT: fputs ("ul", file); break;
7457 case UNLE: fputs ("ule", file); break;
7458 case UNGT: fputs ("ug", file); break;
7459 case UNGE: fputs ("uge", file); break;
7460 case UNEQ: fputs ("ue", file); break;
7461 default: output_operand_lossage (code == 'c'
7462 ? "invalid %%c operand"
7463 : "invalid %%C operand");
7468 /* These are used by the movr instruction pattern. */
7472 enum rtx_code rc = (code == 'd'
7473 ? reverse_condition (GET_CODE (x))
7477 case NE: fputs ("ne", file); break;
7478 case EQ: fputs ("e", file); break;
7479 case GE: fputs ("gez", file); break;
7480 case LT: fputs ("lz", file); break;
7481 case LE: fputs ("lez", file); break;
7482 case GT: fputs ("gz", file); break;
7483 default: output_operand_lossage (code == 'd'
7484 ? "invalid %%d operand"
7485 : "invalid %%D operand");
7492 /* Print a sign-extended character. */
7493 int i = trunc_int_for_mode (INTVAL (x), QImode);
7494 fprintf (file, "%d", i);
7499 /* Operand must be a MEM; write its address. */
7500 if (GET_CODE (x) != MEM)
7501 output_operand_lossage ("invalid %%f operand");
7502 output_address (XEXP (x, 0));
7507 /* Print a sign-extended 32-bit value. */
7509 if (GET_CODE(x) == CONST_INT)
7511 else if (GET_CODE(x) == CONST_DOUBLE)
7512 i = CONST_DOUBLE_LOW (x);
7515 output_operand_lossage ("invalid %%s operand");
7518 i = trunc_int_for_mode (i, SImode);
7519 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7524 /* Do nothing special. */
7528 /* Undocumented flag. */
7529 output_operand_lossage ("invalid operand output code");
7532 if (GET_CODE (x) == REG)
7533 fputs (reg_names[REGNO (x)], file);
7534 else if (GET_CODE (x) == MEM)
7537 /* Poor Sun assembler doesn't understand absolute addressing. */
7538 if (CONSTANT_P (XEXP (x, 0)))
7539 fputs ("%g0+", file);
7540 output_address (XEXP (x, 0));
7543 else if (GET_CODE (x) == HIGH)
7545 fputs ("%hi(", file);
7546 output_addr_const (file, XEXP (x, 0));
7549 else if (GET_CODE (x) == LO_SUM)
7551 print_operand (file, XEXP (x, 0), 0);
7552 if (TARGET_CM_MEDMID)
7553 fputs ("+%l44(", file);
7555 fputs ("+%lo(", file);
7556 output_addr_const (file, XEXP (x, 1));
7559 else if (GET_CODE (x) == CONST_DOUBLE
7560 && (GET_MODE (x) == VOIDmode
7561 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7563 if (CONST_DOUBLE_HIGH (x) == 0)
7564 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7565 else if (CONST_DOUBLE_HIGH (x) == -1
7566 && CONST_DOUBLE_LOW (x) < 0)
7567 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7569 output_operand_lossage ("long long constant not a valid immediate operand");
7571 else if (GET_CODE (x) == CONST_DOUBLE)
7572 output_operand_lossage ("floating point constant not a valid immediate operand");
7573 else { output_addr_const (file, x); }
7576 /* Target hook for assembling integer objects. The sparc version has
7577 special handling for aligned DI-mode objects. */
7580 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7582 /* ??? We only output .xword's for symbols and only then in environments
7583 where the assembler can handle them. */
7584 if (aligned_p && size == 8
7585 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7589 assemble_integer_with_op ("\t.xword\t", x);
7594 assemble_aligned_integer (4, const0_rtx);
7595 assemble_aligned_integer (4, x);
7599 return default_assemble_integer (x, size, aligned_p);
7602 /* Return the value of a code used in the .proc pseudo-op that says
7603 what kind of result this function returns. For non-C types, we pick
7604 the closest C type. */
7606 #ifndef SHORT_TYPE_SIZE
7607 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7610 #ifndef INT_TYPE_SIZE
7611 #define INT_TYPE_SIZE BITS_PER_WORD
7614 #ifndef LONG_TYPE_SIZE
7615 #define LONG_TYPE_SIZE BITS_PER_WORD
7618 #ifndef LONG_LONG_TYPE_SIZE
7619 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7622 #ifndef FLOAT_TYPE_SIZE
7623 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7626 #ifndef DOUBLE_TYPE_SIZE
7627 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7630 #ifndef LONG_DOUBLE_TYPE_SIZE
7631 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7635 sparc_type_code (register tree type)
7637 register unsigned long qualifiers = 0;
7638 register unsigned shift;
7640 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7641 setting more, since some assemblers will give an error for this. Also,
7642 we must be careful to avoid shifts of 32 bits or more to avoid getting
7643 unpredictable results. */
7645 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7647 switch (TREE_CODE (type))
7653 qualifiers |= (3 << shift);
7658 qualifiers |= (2 << shift);
7662 case REFERENCE_TYPE:
7664 qualifiers |= (1 << shift);
7668 return (qualifiers | 8);
7671 case QUAL_UNION_TYPE:
7672 return (qualifiers | 9);
7675 return (qualifiers | 10);
7678 return (qualifiers | 16);
7681 /* If this is a range type, consider it to be the underlying
7683 if (TREE_TYPE (type) != 0)
7686 /* Carefully distinguish all the standard types of C,
7687 without messing up if the language is not C. We do this by
7688 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7689 look at both the names and the above fields, but that's redundant.
7690 Any type whose size is between two C types will be considered
7691 to be the wider of the two types. Also, we do not have a
7692 special code to use for "long long", so anything wider than
7693 long is treated the same. Note that we can't distinguish
7694 between "int" and "long" in this code if they are the same
7695 size, but that's fine, since neither can the assembler. */
7697 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7698 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7700 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7701 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7703 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7704 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7707 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7710 /* If this is a range type, consider it to be the underlying
7712 if (TREE_TYPE (type) != 0)
7715 /* Carefully distinguish all the standard types of C,
7716 without messing up if the language is not C. */
7718 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7719 return (qualifiers | 6);
7722 return (qualifiers | 7);
7724 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7725 /* ??? We need to distinguish between double and float complex types,
7726 but I don't know how yet because I can't reach this code from
7727 existing front-ends. */
7728 return (qualifiers | 7); /* Who knows? */
7731 case BOOLEAN_TYPE: /* Boolean truth value type. */
7732 case LANG_TYPE: /* ? */
7736 gcc_unreachable (); /* Not a type! */
7743 /* Nested function support. */
7745 /* Emit RTL insns to initialize the variable parts of a trampoline.
7746 FNADDR is an RTX for the address of the function's pure code.
7747 CXT is an RTX for the static chain value for the function.
7749 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7750 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7751 (to store insns). This is a bit excessive. Perhaps a different
7752 mechanism would be better here.
7754 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7757 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
7759 /* SPARC 32-bit trampoline:
7762 sethi %hi(static), %g2
7764 or %g2, %lo(static), %g2
7766 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7767 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7771 (adjust_address (m_tramp, SImode, 0),
7772 expand_binop (SImode, ior_optab,
7773 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7774 size_int (10), 0, 1),
7775 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7776 NULL_RTX, 1, OPTAB_DIRECT));
7779 (adjust_address (m_tramp, SImode, 4),
7780 expand_binop (SImode, ior_optab,
7781 expand_shift (RSHIFT_EXPR, SImode, cxt,
7782 size_int (10), 0, 1),
7783 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7784 NULL_RTX, 1, OPTAB_DIRECT));
7787 (adjust_address (m_tramp, SImode, 8),
7788 expand_binop (SImode, ior_optab,
7789 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7790 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7791 NULL_RTX, 1, OPTAB_DIRECT));
7794 (adjust_address (m_tramp, SImode, 12),
7795 expand_binop (SImode, ior_optab,
7796 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7797 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7798 NULL_RTX, 1, OPTAB_DIRECT));
7800 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7801 aligned on a 16 byte boundary so one flush clears it all. */
7802 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
7803 if (sparc_cpu != PROCESSOR_ULTRASPARC
7804 && sparc_cpu != PROCESSOR_ULTRASPARC3
7805 && sparc_cpu != PROCESSOR_NIAGARA
7806 && sparc_cpu != PROCESSOR_NIAGARA2)
7807 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
7809 /* Call __enable_execute_stack after writing onto the stack to make sure
7810 the stack address is accessible. */
7811 #ifdef ENABLE_EXECUTE_STACK
7812 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7813 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
7818 /* The 64-bit version is simpler because it makes more sense to load the
7819 values as "immediate" data out of the trampoline. It's also easier since
7820 we can read the PC without clobbering a register. */
7823 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
7825 /* SPARC 64-bit trampoline:
7834 emit_move_insn (adjust_address (m_tramp, SImode, 0),
7835 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7836 emit_move_insn (adjust_address (m_tramp, SImode, 4),
7837 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7838 emit_move_insn (adjust_address (m_tramp, SImode, 8),
7839 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7840 emit_move_insn (adjust_address (m_tramp, SImode, 12),
7841 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7842 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
7843 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
7844 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
7846 if (sparc_cpu != PROCESSOR_ULTRASPARC
7847 && sparc_cpu != PROCESSOR_ULTRASPARC3
7848 && sparc_cpu != PROCESSOR_NIAGARA
7849 && sparc_cpu != PROCESSOR_NIAGARA2)
7850 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
7852 /* Call __enable_execute_stack after writing onto the stack to make sure
7853 the stack address is accessible. */
7854 #ifdef ENABLE_EXECUTE_STACK
7855 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7856 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
7860 /* Worker for TARGET_TRAMPOLINE_INIT. */
7863 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
7865 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
7866 cxt = force_reg (Pmode, cxt);
7868 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
7870 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
7873 /* Adjust the cost of a scheduling dependency. Return the new cost of
7874 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7877 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7879 enum attr_type insn_type;
7881 if (! recog_memoized (insn))
7884 insn_type = get_attr_type (insn);
7886 if (REG_NOTE_KIND (link) == 0)
7888 /* Data dependency; DEP_INSN writes a register that INSN reads some
7891 /* if a load, then the dependence must be on the memory address;
7892 add an extra "cycle". Note that the cost could be two cycles
7893 if the reg was written late in an instruction group; we ca not tell
7895 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7898 /* Get the delay only if the address of the store is the dependence. */
7899 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7901 rtx pat = PATTERN(insn);
7902 rtx dep_pat = PATTERN (dep_insn);
7904 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7905 return cost; /* This should not happen! */
7907 /* The dependency between the two instructions was on the data that
7908 is being stored. Assume that this implies that the address of the
7909 store is not dependent. */
7910 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7913 return cost + 3; /* An approximation. */
7916 /* A shift instruction cannot receive its data from an instruction
7917 in the same cycle; add a one cycle penalty. */
7918 if (insn_type == TYPE_SHIFT)
7919 return cost + 3; /* Split before cascade into shift. */
7923 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7924 INSN writes some cycles later. */
7926 /* These are only significant for the fpu unit; writing a fp reg before
7927 the fpu has finished with it stalls the processor. */
7929 /* Reusing an integer register causes no problems. */
7930 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7938 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7940 enum attr_type insn_type, dep_type;
7941 rtx pat = PATTERN(insn);
7942 rtx dep_pat = PATTERN (dep_insn);
7944 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7947 insn_type = get_attr_type (insn);
7948 dep_type = get_attr_type (dep_insn);
7950 switch (REG_NOTE_KIND (link))
7953 /* Data dependency; DEP_INSN writes a register that INSN reads some
7960 /* Get the delay iff the address of the store is the dependence. */
7961 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7964 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7971 /* If a load, then the dependence must be on the memory address. If
7972 the addresses aren't equal, then it might be a false dependency */
7973 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7975 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7976 || GET_CODE (SET_DEST (dep_pat)) != MEM
7977 || GET_CODE (SET_SRC (pat)) != MEM
7978 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7979 XEXP (SET_SRC (pat), 0)))
7987 /* Compare to branch latency is 0. There is no benefit from
7988 separating compare and branch. */
7989 if (dep_type == TYPE_COMPARE)
7991 /* Floating point compare to branch latency is less than
7992 compare to conditional move. */
7993 if (dep_type == TYPE_FPCMP)
8002 /* Anti-dependencies only penalize the fpu unit. */
8003 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8015 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
8019 case PROCESSOR_SUPERSPARC:
8020 cost = supersparc_adjust_cost (insn, link, dep, cost);
8022 case PROCESSOR_HYPERSPARC:
8023 case PROCESSOR_SPARCLITE86X:
8024 cost = hypersparc_adjust_cost (insn, link, dep, cost);
8033 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
8034 int sched_verbose ATTRIBUTE_UNUSED,
8035 int max_ready ATTRIBUTE_UNUSED)
8039 sparc_use_sched_lookahead (void)
8041 if (sparc_cpu == PROCESSOR_NIAGARA
8042 || sparc_cpu == PROCESSOR_NIAGARA2)
8044 if (sparc_cpu == PROCESSOR_ULTRASPARC
8045 || sparc_cpu == PROCESSOR_ULTRASPARC3)
8047 if ((1 << sparc_cpu) &
8048 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
8049 (1 << PROCESSOR_SPARCLITE86X)))
8055 sparc_issue_rate (void)
8059 case PROCESSOR_NIAGARA:
8060 case PROCESSOR_NIAGARA2:
8064 /* Assume V9 processors are capable of at least dual-issue. */
8066 case PROCESSOR_SUPERSPARC:
8068 case PROCESSOR_HYPERSPARC:
8069 case PROCESSOR_SPARCLITE86X:
8071 case PROCESSOR_ULTRASPARC:
8072 case PROCESSOR_ULTRASPARC3:
8078 set_extends (rtx insn)
8080 register rtx pat = PATTERN (insn);
8082 switch (GET_CODE (SET_SRC (pat)))
8084 /* Load and some shift instructions zero extend. */
8087 /* sethi clears the high bits */
8089 /* LO_SUM is used with sethi. sethi cleared the high
8090 bits and the values used with lo_sum are positive */
8092 /* Store flag stores 0 or 1 */
8102 rtx op0 = XEXP (SET_SRC (pat), 0);
8103 rtx op1 = XEXP (SET_SRC (pat), 1);
8104 if (GET_CODE (op1) == CONST_INT)
8105 return INTVAL (op1) >= 0;
8106 if (GET_CODE (op0) != REG)
8108 if (sparc_check_64 (op0, insn) == 1)
8110 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8115 rtx op0 = XEXP (SET_SRC (pat), 0);
8116 rtx op1 = XEXP (SET_SRC (pat), 1);
8117 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
8119 if (GET_CODE (op1) == CONST_INT)
8120 return INTVAL (op1) >= 0;
8121 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8124 return GET_MODE (SET_SRC (pat)) == SImode;
8125 /* Positive integers leave the high bits zero. */
8127 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
8129 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
8132 return - (GET_MODE (SET_SRC (pat)) == SImode);
8134 return sparc_check_64 (SET_SRC (pat), insn);
8140 /* We _ought_ to have only one kind per function, but... */
8141 static GTY(()) rtx sparc_addr_diff_list;
8142 static GTY(()) rtx sparc_addr_list;
8145 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
8147 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
8149 sparc_addr_diff_list
8150 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
8152 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
8156 sparc_output_addr_vec (rtx vec)
8158 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8159 int idx, vlen = XVECLEN (body, 0);
8161 #ifdef ASM_OUTPUT_ADDR_VEC_START
8162 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8165 #ifdef ASM_OUTPUT_CASE_LABEL
8166 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8169 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8172 for (idx = 0; idx < vlen; idx++)
8174 ASM_OUTPUT_ADDR_VEC_ELT
8175 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
8178 #ifdef ASM_OUTPUT_ADDR_VEC_END
8179 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8184 sparc_output_addr_diff_vec (rtx vec)
8186 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8187 rtx base = XEXP (XEXP (body, 0), 0);
8188 int idx, vlen = XVECLEN (body, 1);
8190 #ifdef ASM_OUTPUT_ADDR_VEC_START
8191 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8194 #ifdef ASM_OUTPUT_CASE_LABEL
8195 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8198 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8201 for (idx = 0; idx < vlen; idx++)
8203 ASM_OUTPUT_ADDR_DIFF_ELT
8206 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
8207 CODE_LABEL_NUMBER (base));
8210 #ifdef ASM_OUTPUT_ADDR_VEC_END
8211 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8216 sparc_output_deferred_case_vectors (void)
8221 if (sparc_addr_list == NULL_RTX
8222 && sparc_addr_diff_list == NULL_RTX)
8225 /* Align to cache line in the function's code section. */
8226 switch_to_section (current_function_section ());
8228 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
8230 ASM_OUTPUT_ALIGN (asm_out_file, align);
8232 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
8233 sparc_output_addr_vec (XEXP (t, 0));
8234 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
8235 sparc_output_addr_diff_vec (XEXP (t, 0));
8237 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
8240 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
8241 unknown. Return 1 if the high bits are zero, -1 if the register is
8244 sparc_check_64 (rtx x, rtx insn)
8246 /* If a register is set only once it is safe to ignore insns this
8247 code does not know how to handle. The loop will either recognize
8248 the single set and return the correct value or fail to recognize
8253 gcc_assert (GET_CODE (x) == REG);
8255 if (GET_MODE (x) == DImode)
8256 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
8258 if (flag_expensive_optimizations
8259 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
8265 insn = get_last_insn_anywhere ();
8270 while ((insn = PREV_INSN (insn)))
8272 switch (GET_CODE (insn))
8285 rtx pat = PATTERN (insn);
8286 if (GET_CODE (pat) != SET)
8288 if (rtx_equal_p (x, SET_DEST (pat)))
8289 return set_extends (insn);
8290 if (y && rtx_equal_p (y, SET_DEST (pat)))
8291 return set_extends (insn);
8292 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
8300 /* Returns assembly code to perform a DImode shift using
8301 a 64-bit global or out register on SPARC-V8+. */
8303 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
8305 static char asm_code[60];
8307 /* The scratch register is only required when the destination
8308 register is not a 64-bit global or out register. */
8309 if (which_alternative != 2)
8310 operands[3] = operands[0];
8312 /* We can only shift by constants <= 63. */
8313 if (GET_CODE (operands[2]) == CONST_INT)
8314 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
8316 if (GET_CODE (operands[1]) == CONST_INT)
8318 output_asm_insn ("mov\t%1, %3", operands);
8322 output_asm_insn ("sllx\t%H1, 32, %3", operands);
8323 if (sparc_check_64 (operands[1], insn) <= 0)
8324 output_asm_insn ("srl\t%L1, 0, %L1", operands);
8325 output_asm_insn ("or\t%L1, %3, %3", operands);
8328 strcpy(asm_code, opcode);
8330 if (which_alternative != 2)
8331 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
8333 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
8336 /* Output rtl to increment the profiler label LABELNO
8337 for profiling a function entry. */
8340 sparc_profile_hook (int labelno)
8345 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
8346 if (NO_PROFILE_COUNTERS)
8348 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
8352 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8353 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
8354 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
8358 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
8361 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
8362 tree decl ATTRIBUTE_UNUSED)
8364 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
8366 if (!(flags & SECTION_DEBUG))
8367 fputs (",#alloc", asm_out_file);
8368 if (flags & SECTION_WRITE)
8369 fputs (",#write", asm_out_file);
8370 if (flags & SECTION_TLS)
8371 fputs (",#tls", asm_out_file);
8372 if (flags & SECTION_CODE)
8373 fputs (",#execinstr", asm_out_file);
8375 /* ??? Handle SECTION_BSS. */
8377 fputc ('\n', asm_out_file);
8380 /* We do not allow indirect calls to be optimized into sibling calls.
8382 We cannot use sibling calls when delayed branches are disabled
8383 because they will likely require the call delay slot to be filled.
8385 Also, on SPARC 32-bit we cannot emit a sibling call when the
8386 current function returns a structure. This is because the "unimp
8387 after call" convention would cause the callee to return to the
8388 wrong place. The generic code already disallows cases where the
8389 function being called returns a structure.
8391 It may seem strange how this last case could occur. Usually there
8392 is code after the call which jumps to epilogue code which dumps the
8393 return value into the struct return area. That ought to invalidate
8394 the sibling call right? Well, in the C++ case we can end up passing
8395 the pointer to the struct return area to a constructor (which returns
8396 void) and then nothing else happens. Such a sibling call would look
8397 valid without the added check here.
8399 VxWorks PIC PLT entries require the global pointer to be initialized
8400 on entry. We therefore can't emit sibling calls to them. */
8402 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8405 && flag_delayed_branch
8406 && (TARGET_ARCH64 || ! cfun->returns_struct)
8407 && !(TARGET_VXWORKS_RTP
8409 && !targetm.binds_local_p (decl)));
8412 /* libfunc renaming. */
8413 #include "config/gofast.h"
8416 sparc_init_libfuncs (void)
8420 /* Use the subroutines that Sun's library provides for integer
8421 multiply and divide. The `*' prevents an underscore from
8422 being prepended by the compiler. .umul is a little faster
8424 set_optab_libfunc (smul_optab, SImode, "*.umul");
8425 set_optab_libfunc (sdiv_optab, SImode, "*.div");
8426 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
8427 set_optab_libfunc (smod_optab, SImode, "*.rem");
8428 set_optab_libfunc (umod_optab, SImode, "*.urem");
8430 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
8431 set_optab_libfunc (add_optab, TFmode, "_Q_add");
8432 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
8433 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
8434 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
8435 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
8437 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
8438 is because with soft-float, the SFmode and DFmode sqrt
8439 instructions will be absent, and the compiler will notice and
8440 try to use the TFmode sqrt instruction for calls to the
8441 builtin function sqrt, but this fails. */
8443 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
8445 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
8446 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
8447 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
8448 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
8449 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
8450 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
8452 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
8453 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
8454 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
8455 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
8457 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
8458 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
8459 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
8460 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
8462 if (DITF_CONVERSION_LIBFUNCS)
8464 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
8465 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
8466 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
8467 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
8470 if (SUN_CONVERSION_LIBFUNCS)
8472 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
8473 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
8474 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
8475 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
8480 /* In the SPARC 64bit ABI, SImode multiply and divide functions
8481 do not exist in the library. Make sure the compiler does not
8482 emit calls to them by accident. (It should always use the
8483 hardware instructions.) */
8484 set_optab_libfunc (smul_optab, SImode, 0);
8485 set_optab_libfunc (sdiv_optab, SImode, 0);
8486 set_optab_libfunc (udiv_optab, SImode, 0);
8487 set_optab_libfunc (smod_optab, SImode, 0);
8488 set_optab_libfunc (umod_optab, SImode, 0);
8490 if (SUN_INTEGER_MULTIPLY_64)
8492 set_optab_libfunc (smul_optab, DImode, "__mul64");
8493 set_optab_libfunc (sdiv_optab, DImode, "__div64");
8494 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
8495 set_optab_libfunc (smod_optab, DImode, "__rem64");
8496 set_optab_libfunc (umod_optab, DImode, "__urem64");
8499 if (SUN_CONVERSION_LIBFUNCS)
8501 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
8502 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
8503 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
8504 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
8508 gofast_maybe_init_libfuncs ();
8511 #define def_builtin(NAME, CODE, TYPE) \
8512 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
8515 /* Implement the TARGET_INIT_BUILTINS target hook.
8516 Create builtin functions for special SPARC instructions. */
8519 sparc_init_builtins (void)
8522 sparc_vis_init_builtins ();
8525 /* Create builtin functions for VIS 1.0 instructions. */
8528 sparc_vis_init_builtins (void)
8530 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
8531 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
8532 tree v4hi = build_vector_type (intHI_type_node, 4);
8533 tree v2hi = build_vector_type (intHI_type_node, 2);
8534 tree v2si = build_vector_type (intSI_type_node, 2);
8536 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
8537 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
8538 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
8539 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
8540 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
8541 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
8542 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
8543 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
8544 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
8545 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
8546 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
8547 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
8548 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
8550 intDI_type_node, 0);
8551 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
8553 intDI_type_node, 0);
8554 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
8556 intSI_type_node, 0);
8557 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
8559 intDI_type_node, 0);
8561 /* Packing and expanding vectors. */
8562 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
8563 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
8564 v8qi_ftype_v2si_v8qi);
8565 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
8567 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
8568 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
8569 v8qi_ftype_v4qi_v4qi);
8571 /* Multiplications. */
8572 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
8573 v4hi_ftype_v4qi_v4hi);
8574 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
8575 v4hi_ftype_v4qi_v2hi);
8576 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
8577 v4hi_ftype_v4qi_v2hi);
8578 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
8579 v4hi_ftype_v8qi_v4hi);
8580 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
8581 v4hi_ftype_v8qi_v4hi);
8582 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
8583 v2si_ftype_v4qi_v2hi);
8584 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
8585 v2si_ftype_v4qi_v2hi);
8587 /* Data aligning. */
8588 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
8589 v4hi_ftype_v4hi_v4hi);
8590 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
8591 v8qi_ftype_v8qi_v8qi);
8592 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
8593 v2si_ftype_v2si_v2si);
8594 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
8597 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
8600 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
8603 /* Pixel distance. */
8604 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
8605 di_ftype_v8qi_v8qi_di);
8608 /* Handle TARGET_EXPAND_BUILTIN target hook.
8609 Expand builtin functions for sparc intrinsics. */
8612 sparc_expand_builtin (tree exp, rtx target,
8613 rtx subtarget ATTRIBUTE_UNUSED,
8614 enum machine_mode tmode ATTRIBUTE_UNUSED,
8615 int ignore ATTRIBUTE_UNUSED)
8618 call_expr_arg_iterator iter;
8619 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8620 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
8622 enum machine_mode mode[4];
8625 mode[0] = insn_data[icode].operand[0].mode;
8627 || GET_MODE (target) != mode[0]
8628 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
8629 op[0] = gen_reg_rtx (mode[0]);
8633 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8636 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8637 op[arg_count] = expand_normal (arg);
8639 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8641 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8647 pat = GEN_FCN (icode) (op[0], op[1]);
8650 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8653 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8668 sparc_vis_mul8x16 (int e8, int e16)
8670 return (e8 * e16 + 128) / 256;
8673 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8674 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8675 constants. A tree list with the results of the multiplications is returned,
8676 and each element in the list is of INNER_TYPE. */
8679 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8681 tree n_elts = NULL_TREE;
8686 case CODE_FOR_fmul8x16_vis:
8687 for (; elts0 && elts1;
8688 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8691 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8692 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8693 n_elts = tree_cons (NULL_TREE,
8694 build_int_cst (inner_type, val),
8699 case CODE_FOR_fmul8x16au_vis:
8700 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8702 for (; elts0; elts0 = TREE_CHAIN (elts0))
8705 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8707 n_elts = tree_cons (NULL_TREE,
8708 build_int_cst (inner_type, val),
8713 case CODE_FOR_fmul8x16al_vis:
8714 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8716 for (; elts0; elts0 = TREE_CHAIN (elts0))
8719 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8721 n_elts = tree_cons (NULL_TREE,
8722 build_int_cst (inner_type, val),
8731 return nreverse (n_elts);
8734 /* Handle TARGET_FOLD_BUILTIN target hook.
8735 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8736 result of the function call is ignored. NULL_TREE is returned if the
8737 function could not be folded. */
8740 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
8741 tree *args, bool ignore)
8743 tree arg0, arg1, arg2;
8744 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8745 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
8748 && icode != CODE_FOR_alignaddrsi_vis
8749 && icode != CODE_FOR_alignaddrdi_vis)
8750 return fold_convert (rtype, integer_zero_node);
8754 case CODE_FOR_fexpand_vis:
8758 if (TREE_CODE (arg0) == VECTOR_CST)
8760 tree inner_type = TREE_TYPE (rtype);
8761 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8762 tree n_elts = NULL_TREE;
8764 for (; elts; elts = TREE_CHAIN (elts))
8766 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8767 n_elts = tree_cons (NULL_TREE,
8768 build_int_cst (inner_type, val),
8771 return build_vector (rtype, nreverse (n_elts));
8775 case CODE_FOR_fmul8x16_vis:
8776 case CODE_FOR_fmul8x16au_vis:
8777 case CODE_FOR_fmul8x16al_vis:
8783 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8785 tree inner_type = TREE_TYPE (rtype);
8786 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8787 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8788 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
8791 return build_vector (rtype, n_elts);
8795 case CODE_FOR_fpmerge_vis:
8801 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8803 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8804 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8805 tree n_elts = NULL_TREE;
8807 for (; elts0 && elts1;
8808 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8810 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8811 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8814 return build_vector (rtype, nreverse (n_elts));
8818 case CODE_FOR_pdist_vis:
8826 if (TREE_CODE (arg0) == VECTOR_CST
8827 && TREE_CODE (arg1) == VECTOR_CST
8828 && TREE_CODE (arg2) == INTEGER_CST)
8831 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8832 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8833 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8834 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8836 for (; elts0 && elts1;
8837 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8839 unsigned HOST_WIDE_INT
8840 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8841 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8842 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8843 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8845 unsigned HOST_WIDE_INT l;
8848 overflow |= neg_double (low1, high1, &l, &h);
8849 overflow |= add_double (low0, high0, l, h, &l, &h);
8851 overflow |= neg_double (l, h, &l, &h);
8853 overflow |= add_double (low, high, l, h, &low, &high);
8856 gcc_assert (overflow == 0);
8858 return build_int_cst_wide (rtype, low, high);
8868 /* ??? This duplicates information provided to the compiler by the
8869 ??? scheduler description. Some day, teach genautomata to output
8870 ??? the latencies and then CSE will just use that. */
8873 sparc_rtx_costs (rtx x, int code, int outer_code, int *total,
8874 bool speed ATTRIBUTE_UNUSED)
8876 enum machine_mode mode = GET_MODE (x);
8877 bool float_mode_p = FLOAT_MODE_P (mode);
8882 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8900 if (GET_MODE (x) == VOIDmode
8901 && ((CONST_DOUBLE_HIGH (x) == 0
8902 && CONST_DOUBLE_LOW (x) < 0x1000)
8903 || (CONST_DOUBLE_HIGH (x) == -1
8904 && CONST_DOUBLE_LOW (x) < 0
8905 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8912 /* If outer-code was a sign or zero extension, a cost
8913 of COSTS_N_INSNS (1) was already added in. This is
8914 why we are subtracting it back out. */
8915 if (outer_code == ZERO_EXTEND)
8917 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8919 else if (outer_code == SIGN_EXTEND)
8921 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8923 else if (float_mode_p)
8925 *total = sparc_costs->float_load;
8929 *total = sparc_costs->int_load;
8937 *total = sparc_costs->float_plusminus;
8939 *total = COSTS_N_INSNS (1);
8944 *total = sparc_costs->float_mul;
8945 else if (! TARGET_HARD_MUL)
8946 *total = COSTS_N_INSNS (25);
8952 if (sparc_costs->int_mul_bit_factor)
8956 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8958 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8959 for (nbits = 0; value != 0; value &= value - 1)
8962 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8963 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8965 rtx x1 = XEXP (x, 1);
8966 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8967 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8969 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8971 for (; value2 != 0; value2 &= value2 - 1)
8979 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8980 bit_cost = COSTS_N_INSNS (bit_cost);
8984 *total = sparc_costs->int_mulX + bit_cost;
8986 *total = sparc_costs->int_mul + bit_cost;
8993 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
9003 *total = sparc_costs->float_div_df;
9005 *total = sparc_costs->float_div_sf;
9010 *total = sparc_costs->int_divX;
9012 *total = sparc_costs->int_div;
9019 *total = COSTS_N_INSNS (1);
9026 case UNSIGNED_FLOAT:
9030 case FLOAT_TRUNCATE:
9031 *total = sparc_costs->float_move;
9036 *total = sparc_costs->float_sqrt_df;
9038 *total = sparc_costs->float_sqrt_sf;
9043 *total = sparc_costs->float_cmp;
9045 *total = COSTS_N_INSNS (1);
9050 *total = sparc_costs->float_cmove;
9052 *total = sparc_costs->int_cmove;
9056 /* Handle the NAND vector patterns. */
9057 if (sparc_vector_mode_supported_p (GET_MODE (x))
9058 && GET_CODE (XEXP (x, 0)) == NOT
9059 && GET_CODE (XEXP (x, 1)) == NOT)
9061 *total = COSTS_N_INSNS (1);
9072 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
9073 This is achieved by means of a manual dynamic stack space allocation in
9074 the current frame. We make the assumption that SEQ doesn't contain any
9075 function calls, with the possible exception of calls to the PIC helper. */
9078 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
9080 /* We must preserve the lowest 16 words for the register save area. */
9081 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
9082 /* We really need only 2 words of fresh stack space. */
9083 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
9086 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
9087 SPARC_STACK_BIAS + offset));
9089 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
9090 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
9092 emit_insn (gen_rtx_SET (VOIDmode,
9093 adjust_address (slot, word_mode, UNITS_PER_WORD),
9097 emit_insn (gen_rtx_SET (VOIDmode,
9099 adjust_address (slot, word_mode, UNITS_PER_WORD)));
9100 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
9101 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
9104 /* Output the assembler code for a thunk function. THUNK_DECL is the
9105 declaration for the thunk function itself, FUNCTION is the decl for
9106 the target function. DELTA is an immediate constant offset to be
9107 added to THIS. If VCALL_OFFSET is nonzero, the word at address
9108 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
9111 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
9112 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9115 rtx this_rtx, insn, funexp;
9116 unsigned int int_arg_first;
9118 reload_completed = 1;
9119 epilogue_completed = 1;
9121 emit_note (NOTE_INSN_PROLOGUE_END);
9123 if (flag_delayed_branch)
9125 /* We will emit a regular sibcall below, so we need to instruct
9126 output_sibcall that we are in a leaf function. */
9127 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
9129 /* This will cause final.c to invoke leaf_renumber_regs so we
9130 must behave as if we were in a not-yet-leafified function. */
9131 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
9135 /* We will emit the sibcall manually below, so we will need to
9136 manually spill non-leaf registers. */
9137 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
9139 /* We really are in a leaf function. */
9140 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
9143 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
9144 returns a structure, the structure return pointer is there instead. */
9146 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9147 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
9149 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
9151 /* Add DELTA. When possible use a plain add, otherwise load it into
9152 a register first. */
9155 rtx delta_rtx = GEN_INT (delta);
9157 if (! SPARC_SIMM13_P (delta))
9159 rtx scratch = gen_rtx_REG (Pmode, 1);
9160 emit_move_insn (scratch, delta_rtx);
9161 delta_rtx = scratch;
9164 /* THIS_RTX += DELTA. */
9165 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
9168 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
9171 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
9172 rtx scratch = gen_rtx_REG (Pmode, 1);
9174 gcc_assert (vcall_offset < 0);
9176 /* SCRATCH = *THIS_RTX. */
9177 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
9179 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
9180 may not have any available scratch register at this point. */
9181 if (SPARC_SIMM13_P (vcall_offset))
9183 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
9184 else if (! fixed_regs[5]
9185 /* The below sequence is made up of at least 2 insns,
9186 while the default method may need only one. */
9187 && vcall_offset < -8192)
9189 rtx scratch2 = gen_rtx_REG (Pmode, 5);
9190 emit_move_insn (scratch2, vcall_offset_rtx);
9191 vcall_offset_rtx = scratch2;
9195 rtx increment = GEN_INT (-4096);
9197 /* VCALL_OFFSET is a negative number whose typical range can be
9198 estimated as -32768..0 in 32-bit mode. In almost all cases
9199 it is therefore cheaper to emit multiple add insns than
9200 spilling and loading the constant into a register (at least
9202 while (! SPARC_SIMM13_P (vcall_offset))
9204 emit_insn (gen_add2_insn (scratch, increment));
9205 vcall_offset += 4096;
9207 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
9210 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
9211 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
9212 gen_rtx_PLUS (Pmode,
9214 vcall_offset_rtx)));
9216 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
9217 emit_insn (gen_add2_insn (this_rtx, scratch));
9220 /* Generate a tail call to the target function. */
9221 if (! TREE_USED (function))
9223 assemble_external (function);
9224 TREE_USED (function) = 1;
9226 funexp = XEXP (DECL_RTL (function), 0);
9228 if (flag_delayed_branch)
9230 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
9231 insn = emit_call_insn (gen_sibcall (funexp));
9232 SIBLING_CALL_P (insn) = 1;
9236 /* The hoops we have to jump through in order to generate a sibcall
9237 without using delay slots... */
9238 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
9242 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
9243 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
9245 /* Delay emitting the PIC helper function because it needs to
9246 change the section and we are emitting assembly code. */
9247 load_pic_register (); /* clobbers %o7 */
9248 scratch = sparc_legitimize_pic_address (funexp, scratch);
9251 emit_and_preserve (seq, spill_reg, spill_reg2);
9253 else if (TARGET_ARCH32)
9255 emit_insn (gen_rtx_SET (VOIDmode,
9257 gen_rtx_HIGH (SImode, funexp)));
9258 emit_insn (gen_rtx_SET (VOIDmode,
9260 gen_rtx_LO_SUM (SImode, scratch, funexp)));
9262 else /* TARGET_ARCH64 */
9264 switch (sparc_cmodel)
9268 /* The destination can serve as a temporary. */
9269 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
9274 /* The destination cannot serve as a temporary. */
9275 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
9277 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
9280 emit_and_preserve (seq, spill_reg, 0);
9288 emit_jump_insn (gen_indirect_jump (scratch));
9293 /* Run just enough of rest_of_compilation to get the insns emitted.
9294 There's not really enough bulk here to make other passes such as
9295 instruction scheduling worth while. Note that use_thunk calls
9296 assemble_start_function and assemble_end_function. */
9297 insn = get_insns ();
9298 insn_locators_alloc ();
9299 shorten_branches (insn);
9300 final_start_function (insn, file, 1);
9301 final (insn, file, 1);
9302 final_end_function ();
9304 reload_completed = 0;
9305 epilogue_completed = 0;
9308 /* Return true if sparc_output_mi_thunk would be able to output the
9309 assembler code for the thunk function specified by the arguments
9310 it is passed, and false otherwise. */
9312 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
9313 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
9314 HOST_WIDE_INT vcall_offset,
9315 const_tree function ATTRIBUTE_UNUSED)
9317 /* Bound the loop used in the default method above. */
9318 return (vcall_offset >= -32768 || ! fixed_regs[5]);
9321 /* How to allocate a 'struct machine_function'. */
9323 static struct machine_function *
9324 sparc_init_machine_status (void)
9326 return ggc_alloc_cleared_machine_function ();
9329 /* Locate some local-dynamic symbol still in use by this function
9330 so that we can print its name in local-dynamic base patterns. */
9333 get_some_local_dynamic_name (void)
9337 if (cfun->machine->some_ld_name)
9338 return cfun->machine->some_ld_name;
9340 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
9342 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
9343 return cfun->machine->some_ld_name;
9349 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
9354 && GET_CODE (x) == SYMBOL_REF
9355 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
9357 cfun->machine->some_ld_name = XSTR (x, 0);
9364 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
9365 This is called from dwarf2out.c to emit call frame instructions
9366 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
9368 sparc_dwarf_handle_frame_unspec (const char *label,
9369 rtx pattern ATTRIBUTE_UNUSED,
9370 int index ATTRIBUTE_UNUSED)
9372 gcc_assert (index == UNSPECV_SAVEW);
9373 dwarf2out_window_save (label);
9376 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
9377 We need to emit DTP-relative relocations. */
9380 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
9385 fputs ("\t.word\t%r_tls_dtpoff32(", file);
9388 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
9393 output_addr_const (file, x);
9397 /* Do whatever processing is required at the end of a file. */
9400 sparc_file_end (void)
9402 /* If need to emit the special PIC helper function, do so now. */
9403 if (pic_helper_needed)
9405 unsigned int regno = REGNO (pic_offset_table_rtx);
9406 const char *pic_name = reg_names[regno];
9408 #ifdef DWARF2_UNWIND_INFO
9412 get_pc_thunk_name (name, regno);
9413 if (USE_HIDDEN_LINKONCE)
9415 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
9416 get_identifier (name),
9417 build_function_type (void_type_node,
9419 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
9420 NULL_TREE, void_type_node);
9421 TREE_STATIC (decl) = 1;
9422 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
9423 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
9424 DECL_VISIBILITY_SPECIFIED (decl) = 1;
9425 allocate_struct_function (decl, true);
9426 current_function_decl = decl;
9427 init_varasm_status ();
9428 assemble_start_function (decl, name);
9432 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9433 switch_to_section (text_section);
9435 ASM_OUTPUT_ALIGN (asm_out_file, align);
9436 ASM_OUTPUT_LABEL (asm_out_file, name);
9439 #ifdef DWARF2_UNWIND_INFO
9440 do_cfi = dwarf2out_do_cfi_asm ();
9442 fprintf (asm_out_file, "\t.cfi_startproc\n");
9444 if (flag_delayed_branch)
9445 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
9446 pic_name, pic_name);
9448 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
9449 pic_name, pic_name);
9450 #ifdef DWARF2_UNWIND_INFO
9452 fprintf (asm_out_file, "\t.cfi_endproc\n");
9456 if (NEED_INDICATE_EXEC_STACK)
9457 file_end_indicate_exec_stack ();
9460 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9461 /* Implement TARGET_MANGLE_TYPE. */
9464 sparc_mangle_type (const_tree type)
9467 && TYPE_MAIN_VARIANT (type) == long_double_type_node
9468 && TARGET_LONG_DOUBLE_128)
9471 /* For all other types, use normal C++ mangling. */
9476 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
9477 compare and swap on the word containing the byte or half-word. */
9480 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
9482 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
9483 rtx addr = gen_reg_rtx (Pmode);
9484 rtx off = gen_reg_rtx (SImode);
9485 rtx oldv = gen_reg_rtx (SImode);
9486 rtx newv = gen_reg_rtx (SImode);
9487 rtx oldvalue = gen_reg_rtx (SImode);
9488 rtx newvalue = gen_reg_rtx (SImode);
9489 rtx res = gen_reg_rtx (SImode);
9490 rtx resv = gen_reg_rtx (SImode);
9491 rtx memsi, val, mask, end_label, loop_label, cc;
9493 emit_insn (gen_rtx_SET (VOIDmode, addr,
9494 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
9496 if (Pmode != SImode)
9497 addr1 = gen_lowpart (SImode, addr1);
9498 emit_insn (gen_rtx_SET (VOIDmode, off,
9499 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
9501 memsi = gen_rtx_MEM (SImode, addr);
9502 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
9503 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
9505 val = force_reg (SImode, memsi);
9507 emit_insn (gen_rtx_SET (VOIDmode, off,
9508 gen_rtx_XOR (SImode, off,
9509 GEN_INT (GET_MODE (mem) == QImode
9512 emit_insn (gen_rtx_SET (VOIDmode, off,
9513 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
9515 if (GET_MODE (mem) == QImode)
9516 mask = force_reg (SImode, GEN_INT (0xff));
9518 mask = force_reg (SImode, GEN_INT (0xffff));
9520 emit_insn (gen_rtx_SET (VOIDmode, mask,
9521 gen_rtx_ASHIFT (SImode, mask, off)));
9523 emit_insn (gen_rtx_SET (VOIDmode, val,
9524 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9527 oldval = gen_lowpart (SImode, oldval);
9528 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9529 gen_rtx_ASHIFT (SImode, oldval, off)));
9531 newval = gen_lowpart_common (SImode, newval);
9532 emit_insn (gen_rtx_SET (VOIDmode, newv,
9533 gen_rtx_ASHIFT (SImode, newval, off)));
9535 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9536 gen_rtx_AND (SImode, oldv, mask)));
9538 emit_insn (gen_rtx_SET (VOIDmode, newv,
9539 gen_rtx_AND (SImode, newv, mask)));
9541 end_label = gen_label_rtx ();
9542 loop_label = gen_label_rtx ();
9543 emit_label (loop_label);
9545 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
9546 gen_rtx_IOR (SImode, oldv, val)));
9548 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
9549 gen_rtx_IOR (SImode, newv, val)));
9551 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
9553 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
9555 emit_insn (gen_rtx_SET (VOIDmode, resv,
9556 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9559 cc = gen_compare_reg_1 (NE, resv, val);
9560 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
9562 /* Use cbranchcc4 to separate the compare and branch! */
9563 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
9564 cc, const0_rtx, loop_label));
9566 emit_label (end_label);
9568 emit_insn (gen_rtx_SET (VOIDmode, res,
9569 gen_rtx_AND (SImode, res, mask)));
9571 emit_insn (gen_rtx_SET (VOIDmode, res,
9572 gen_rtx_LSHIFTRT (SImode, res, off)));
9574 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
9577 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
9580 sparc_frame_pointer_required (void)
9582 return !(leaf_function_p () && only_leaf_regs_used ());
9585 /* The way this is structured, we can't eliminate SFP in favor of SP
9586 if the frame pointer is required: we want to use the SFP->HFP elimination
9587 in that case. But the test in update_eliminables doesn't know we are
9588 assuming below that we only do the former elimination. */
9591 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
9593 return (to == HARD_FRAME_POINTER_REGNUM
9594 || !targetm.frame_pointer_required ());
9597 #include "gt-sparc.h"