1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "insn-codes.h"
35 #include "conditions.h"
37 #include "insn-attr.h"
44 #include "diagnostic-core.h"
50 #include "target-def.h"
51 #include "cfglayout.h"
53 #include "langhooks.h"
56 #include "dwarf2out.h"
60 struct processor_costs cypress_costs = {
61 COSTS_N_INSNS (2), /* int load */
62 COSTS_N_INSNS (2), /* int signed load */
63 COSTS_N_INSNS (2), /* int zeroed load */
64 COSTS_N_INSNS (2), /* float load */
65 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
66 COSTS_N_INSNS (5), /* fadd, fsub */
67 COSTS_N_INSNS (1), /* fcmp */
68 COSTS_N_INSNS (1), /* fmov, fmovr */
69 COSTS_N_INSNS (7), /* fmul */
70 COSTS_N_INSNS (37), /* fdivs */
71 COSTS_N_INSNS (37), /* fdivd */
72 COSTS_N_INSNS (63), /* fsqrts */
73 COSTS_N_INSNS (63), /* fsqrtd */
74 COSTS_N_INSNS (1), /* imul */
75 COSTS_N_INSNS (1), /* imulX */
76 0, /* imul bit factor */
77 COSTS_N_INSNS (1), /* idiv */
78 COSTS_N_INSNS (1), /* idivX */
79 COSTS_N_INSNS (1), /* movcc/movr */
80 0, /* shift penalty */
84 struct processor_costs supersparc_costs = {
85 COSTS_N_INSNS (1), /* int load */
86 COSTS_N_INSNS (1), /* int signed load */
87 COSTS_N_INSNS (1), /* int zeroed load */
88 COSTS_N_INSNS (0), /* float load */
89 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
90 COSTS_N_INSNS (3), /* fadd, fsub */
91 COSTS_N_INSNS (3), /* fcmp */
92 COSTS_N_INSNS (1), /* fmov, fmovr */
93 COSTS_N_INSNS (3), /* fmul */
94 COSTS_N_INSNS (6), /* fdivs */
95 COSTS_N_INSNS (9), /* fdivd */
96 COSTS_N_INSNS (12), /* fsqrts */
97 COSTS_N_INSNS (12), /* fsqrtd */
98 COSTS_N_INSNS (4), /* imul */
99 COSTS_N_INSNS (4), /* imulX */
100 0, /* imul bit factor */
101 COSTS_N_INSNS (4), /* idiv */
102 COSTS_N_INSNS (4), /* idivX */
103 COSTS_N_INSNS (1), /* movcc/movr */
104 1, /* shift penalty */
108 struct processor_costs hypersparc_costs = {
109 COSTS_N_INSNS (1), /* int load */
110 COSTS_N_INSNS (1), /* int signed load */
111 COSTS_N_INSNS (1), /* int zeroed load */
112 COSTS_N_INSNS (1), /* float load */
113 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
114 COSTS_N_INSNS (1), /* fadd, fsub */
115 COSTS_N_INSNS (1), /* fcmp */
116 COSTS_N_INSNS (1), /* fmov, fmovr */
117 COSTS_N_INSNS (1), /* fmul */
118 COSTS_N_INSNS (8), /* fdivs */
119 COSTS_N_INSNS (12), /* fdivd */
120 COSTS_N_INSNS (17), /* fsqrts */
121 COSTS_N_INSNS (17), /* fsqrtd */
122 COSTS_N_INSNS (17), /* imul */
123 COSTS_N_INSNS (17), /* imulX */
124 0, /* imul bit factor */
125 COSTS_N_INSNS (17), /* idiv */
126 COSTS_N_INSNS (17), /* idivX */
127 COSTS_N_INSNS (1), /* movcc/movr */
128 0, /* shift penalty */
132 struct processor_costs sparclet_costs = {
133 COSTS_N_INSNS (3), /* int load */
134 COSTS_N_INSNS (3), /* int signed load */
135 COSTS_N_INSNS (1), /* int zeroed load */
136 COSTS_N_INSNS (1), /* float load */
137 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
138 COSTS_N_INSNS (1), /* fadd, fsub */
139 COSTS_N_INSNS (1), /* fcmp */
140 COSTS_N_INSNS (1), /* fmov, fmovr */
141 COSTS_N_INSNS (1), /* fmul */
142 COSTS_N_INSNS (1), /* fdivs */
143 COSTS_N_INSNS (1), /* fdivd */
144 COSTS_N_INSNS (1), /* fsqrts */
145 COSTS_N_INSNS (1), /* fsqrtd */
146 COSTS_N_INSNS (5), /* imul */
147 COSTS_N_INSNS (5), /* imulX */
148 0, /* imul bit factor */
149 COSTS_N_INSNS (5), /* idiv */
150 COSTS_N_INSNS (5), /* idivX */
151 COSTS_N_INSNS (1), /* movcc/movr */
152 0, /* shift penalty */
156 struct processor_costs ultrasparc_costs = {
157 COSTS_N_INSNS (2), /* int load */
158 COSTS_N_INSNS (3), /* int signed load */
159 COSTS_N_INSNS (2), /* int zeroed load */
160 COSTS_N_INSNS (2), /* float load */
161 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
162 COSTS_N_INSNS (4), /* fadd, fsub */
163 COSTS_N_INSNS (1), /* fcmp */
164 COSTS_N_INSNS (2), /* fmov, fmovr */
165 COSTS_N_INSNS (4), /* fmul */
166 COSTS_N_INSNS (13), /* fdivs */
167 COSTS_N_INSNS (23), /* fdivd */
168 COSTS_N_INSNS (13), /* fsqrts */
169 COSTS_N_INSNS (23), /* fsqrtd */
170 COSTS_N_INSNS (4), /* imul */
171 COSTS_N_INSNS (4), /* imulX */
172 2, /* imul bit factor */
173 COSTS_N_INSNS (37), /* idiv */
174 COSTS_N_INSNS (68), /* idivX */
175 COSTS_N_INSNS (2), /* movcc/movr */
176 2, /* shift penalty */
180 struct processor_costs ultrasparc3_costs = {
181 COSTS_N_INSNS (2), /* int load */
182 COSTS_N_INSNS (3), /* int signed load */
183 COSTS_N_INSNS (3), /* int zeroed load */
184 COSTS_N_INSNS (2), /* float load */
185 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
186 COSTS_N_INSNS (4), /* fadd, fsub */
187 COSTS_N_INSNS (5), /* fcmp */
188 COSTS_N_INSNS (3), /* fmov, fmovr */
189 COSTS_N_INSNS (4), /* fmul */
190 COSTS_N_INSNS (17), /* fdivs */
191 COSTS_N_INSNS (20), /* fdivd */
192 COSTS_N_INSNS (20), /* fsqrts */
193 COSTS_N_INSNS (29), /* fsqrtd */
194 COSTS_N_INSNS (6), /* imul */
195 COSTS_N_INSNS (6), /* imulX */
196 0, /* imul bit factor */
197 COSTS_N_INSNS (40), /* idiv */
198 COSTS_N_INSNS (71), /* idivX */
199 COSTS_N_INSNS (2), /* movcc/movr */
200 0, /* shift penalty */
204 struct processor_costs niagara_costs = {
205 COSTS_N_INSNS (3), /* int load */
206 COSTS_N_INSNS (3), /* int signed load */
207 COSTS_N_INSNS (3), /* int zeroed load */
208 COSTS_N_INSNS (9), /* float load */
209 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
210 COSTS_N_INSNS (8), /* fadd, fsub */
211 COSTS_N_INSNS (26), /* fcmp */
212 COSTS_N_INSNS (8), /* fmov, fmovr */
213 COSTS_N_INSNS (29), /* fmul */
214 COSTS_N_INSNS (54), /* fdivs */
215 COSTS_N_INSNS (83), /* fdivd */
216 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
217 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
218 COSTS_N_INSNS (11), /* imul */
219 COSTS_N_INSNS (11), /* imulX */
220 0, /* imul bit factor */
221 COSTS_N_INSNS (72), /* idiv */
222 COSTS_N_INSNS (72), /* idivX */
223 COSTS_N_INSNS (1), /* movcc/movr */
224 0, /* shift penalty */
228 struct processor_costs niagara2_costs = {
229 COSTS_N_INSNS (3), /* int load */
230 COSTS_N_INSNS (3), /* int signed load */
231 COSTS_N_INSNS (3), /* int zeroed load */
232 COSTS_N_INSNS (3), /* float load */
233 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
234 COSTS_N_INSNS (6), /* fadd, fsub */
235 COSTS_N_INSNS (6), /* fcmp */
236 COSTS_N_INSNS (6), /* fmov, fmovr */
237 COSTS_N_INSNS (6), /* fmul */
238 COSTS_N_INSNS (19), /* fdivs */
239 COSTS_N_INSNS (33), /* fdivd */
240 COSTS_N_INSNS (19), /* fsqrts */
241 COSTS_N_INSNS (33), /* fsqrtd */
242 COSTS_N_INSNS (5), /* imul */
243 COSTS_N_INSNS (5), /* imulX */
244 0, /* imul bit factor */
245 COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
246 COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
247 COSTS_N_INSNS (1), /* movcc/movr */
248 0, /* shift penalty */
251 const struct processor_costs *sparc_costs = &cypress_costs;
253 #ifdef HAVE_AS_RELAX_OPTION
254 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
255 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
256 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
257 somebody does not branch between the sethi and jmp. */
258 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
260 #define LEAF_SIBCALL_SLOT_RESERVED_P \
261 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
264 /* Global variables for machine-dependent things. */
266 /* Size of frame. Need to know this to emit return insns from leaf procedures.
267 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
268 reload pass. This is important as the value is later used for scheduling
269 (to see what can go in a delay slot).
270 APPARENT_FSIZE is the size of the stack less the register save area and less
271 the outgoing argument area. It is used when saving call preserved regs. */
272 static HOST_WIDE_INT apparent_fsize;
273 static HOST_WIDE_INT actual_fsize;
275 /* Number of live general or floating point registers needed to be
276 saved (as 4-byte quantities). */
277 static int num_gfregs;
279 /* The alias set for prologue/epilogue register save/restore. */
280 static GTY(()) alias_set_type sparc_sr_alias_set;
282 /* The alias set for the structure return value. */
283 static GTY(()) alias_set_type struct_value_alias_set;
285 /* Vector to say how input registers are mapped to output registers.
286 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
287 eliminate it. You must use -fomit-frame-pointer to get that. */
288 char leaf_reg_remap[] =
289 { 0, 1, 2, 3, 4, 5, 6, 7,
290 -1, -1, -1, -1, -1, -1, 14, -1,
291 -1, -1, -1, -1, -1, -1, -1, -1,
292 8, 9, 10, 11, 12, 13, -1, 15,
294 32, 33, 34, 35, 36, 37, 38, 39,
295 40, 41, 42, 43, 44, 45, 46, 47,
296 48, 49, 50, 51, 52, 53, 54, 55,
297 56, 57, 58, 59, 60, 61, 62, 63,
298 64, 65, 66, 67, 68, 69, 70, 71,
299 72, 73, 74, 75, 76, 77, 78, 79,
300 80, 81, 82, 83, 84, 85, 86, 87,
301 88, 89, 90, 91, 92, 93, 94, 95,
302 96, 97, 98, 99, 100};
304 /* Vector, indexed by hard register number, which contains 1
305 for a register that is allowable in a candidate for leaf
306 function treatment. */
307 char sparc_leaf_regs[] =
308 { 1, 1, 1, 1, 1, 1, 1, 1,
309 0, 0, 0, 0, 0, 0, 1, 0,
310 0, 0, 0, 0, 0, 0, 0, 0,
311 1, 1, 1, 1, 1, 1, 0, 1,
312 1, 1, 1, 1, 1, 1, 1, 1,
313 1, 1, 1, 1, 1, 1, 1, 1,
314 1, 1, 1, 1, 1, 1, 1, 1,
315 1, 1, 1, 1, 1, 1, 1, 1,
316 1, 1, 1, 1, 1, 1, 1, 1,
317 1, 1, 1, 1, 1, 1, 1, 1,
318 1, 1, 1, 1, 1, 1, 1, 1,
319 1, 1, 1, 1, 1, 1, 1, 1,
322 struct GTY(()) machine_function
324 /* Some local-dynamic TLS symbol name. */
325 const char *some_ld_name;
327 /* True if the current function is leaf and uses only leaf regs,
328 so that the SPARC leaf function optimization can be applied.
329 Private version of current_function_uses_only_leaf_regs, see
330 sparc_expand_prologue for the rationale. */
333 /* True if the data calculated by sparc_expand_prologue are valid. */
334 bool prologue_data_valid_p;
337 #define sparc_leaf_function_p cfun->machine->leaf_function_p
338 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
340 /* Register we pretend to think the frame pointer is allocated to.
341 Normally, this is %fp, but if we are in a leaf procedure, this
342 is %sp+"something". We record "something" separately as it may
343 be too big for reg+constant addressing. */
344 static rtx frame_base_reg;
345 static HOST_WIDE_INT frame_base_offset;
347 /* 1 if the next opcode is to be specially indented. */
348 int sparc_indent_opcode = 0;
350 static bool sparc_handle_option (size_t, const char *, int);
351 static void sparc_init_modes (void);
352 static void scan_record_type (tree, int *, int *, int *);
353 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
354 tree, int, int, int *, int *);
356 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
357 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
359 static void sparc_emit_set_const32 (rtx, rtx);
360 static void sparc_emit_set_const64 (rtx, rtx);
361 static void sparc_output_addr_vec (rtx);
362 static void sparc_output_addr_diff_vec (rtx);
363 static void sparc_output_deferred_case_vectors (void);
364 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
365 static rtx sparc_builtin_saveregs (void);
366 static int epilogue_renumber (rtx *, int);
367 static bool sparc_assemble_integer (rtx, unsigned int, int);
368 static int set_extends (rtx);
369 static void load_pic_register (void);
370 static int save_or_restore_regs (int, int, rtx, int, int);
371 static void emit_save_or_restore_regs (int);
372 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
373 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
374 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
375 tree) ATTRIBUTE_UNUSED;
376 static int sparc_adjust_cost (rtx, rtx, rtx, int);
377 static int sparc_issue_rate (void);
378 static void sparc_sched_init (FILE *, int, int);
379 static int sparc_use_sched_lookahead (void);
381 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
382 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
383 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
384 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
385 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
387 static bool sparc_function_ok_for_sibcall (tree, tree);
388 static void sparc_init_libfuncs (void);
389 static void sparc_init_builtins (void);
390 static void sparc_vis_init_builtins (void);
391 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
392 static tree sparc_fold_builtin (tree, int, tree *, bool);
393 static int sparc_vis_mul8x16 (int, int);
394 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
395 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
396 HOST_WIDE_INT, tree);
397 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
398 HOST_WIDE_INT, const_tree);
399 static struct machine_function * sparc_init_machine_status (void);
400 static bool sparc_cannot_force_const_mem (rtx);
401 static rtx sparc_tls_get_addr (void);
402 static rtx sparc_tls_got (void);
403 static const char *get_some_local_dynamic_name (void);
404 static int get_some_local_dynamic_name_1 (rtx *, void *);
405 static bool sparc_rtx_costs (rtx, int, int, int *, bool);
406 static bool sparc_promote_prototypes (const_tree);
407 static rtx sparc_function_value (const_tree, const_tree, bool);
408 static rtx sparc_libcall_value (enum machine_mode, const_rtx);
409 static bool sparc_function_value_regno_p (const unsigned int);
410 static rtx sparc_struct_value_rtx (tree, int);
411 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
412 int *, const_tree, int);
413 static bool sparc_return_in_memory (const_tree, const_tree);
414 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
415 static void sparc_va_start (tree, rtx);
416 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
417 static bool sparc_vector_mode_supported_p (enum machine_mode);
418 static bool sparc_tls_referenced_p (rtx);
419 static rtx legitimize_tls_address (rtx);
420 static rtx legitimize_pic_address (rtx, rtx);
421 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
422 static bool sparc_mode_dependent_address_p (const_rtx);
423 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
424 enum machine_mode, const_tree, bool);
425 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
426 enum machine_mode, tree, bool);
427 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
428 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
429 static void sparc_file_end (void);
430 static bool sparc_frame_pointer_required (void);
431 static bool sparc_can_eliminate (const int, const int);
432 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
433 static const char *sparc_mangle_type (const_tree);
435 static void sparc_trampoline_init (rtx, tree, rtx);
437 #ifdef SUBTARGET_ATTRIBUTE_TABLE
438 /* Table of valid machine attributes. */
439 static const struct attribute_spec sparc_attribute_table[] =
441 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
442 SUBTARGET_ATTRIBUTE_TABLE,
443 { NULL, 0, 0, false, false, false, NULL }
447 /* Option handling. */
450 enum cmodel sparc_cmodel;
452 char sparc_hard_reg_printed[8];
454 struct sparc_cpu_select sparc_select[] =
456 /* switch name, tune arch */
457 { (char *)0, "default", 1, 1 },
458 { (char *)0, "-mcpu=", 1, 1 },
459 { (char *)0, "-mtune=", 1, 0 },
463 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
464 enum processor_type sparc_cpu;
466 /* Whether
\fan FPU option was specified. */
467 static bool fpu_option_set = false;
469 /* Initialize the GCC target structure. */
471 /* The default is to use .half rather than .short for aligned HI objects. */
472 #undef TARGET_ASM_ALIGNED_HI_OP
473 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
475 #undef TARGET_ASM_UNALIGNED_HI_OP
476 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
477 #undef TARGET_ASM_UNALIGNED_SI_OP
478 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
479 #undef TARGET_ASM_UNALIGNED_DI_OP
480 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
482 /* The target hook has to handle DI-mode values. */
483 #undef TARGET_ASM_INTEGER
484 #define TARGET_ASM_INTEGER sparc_assemble_integer
486 #undef TARGET_ASM_FUNCTION_PROLOGUE
487 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
488 #undef TARGET_ASM_FUNCTION_EPILOGUE
489 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
491 #undef TARGET_SCHED_ADJUST_COST
492 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
493 #undef TARGET_SCHED_ISSUE_RATE
494 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
495 #undef TARGET_SCHED_INIT
496 #define TARGET_SCHED_INIT sparc_sched_init
497 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
498 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
500 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
501 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
503 #undef TARGET_INIT_LIBFUNCS
504 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
505 #undef TARGET_INIT_BUILTINS
506 #define TARGET_INIT_BUILTINS sparc_init_builtins
508 #undef TARGET_LEGITIMIZE_ADDRESS
509 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
510 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
511 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
513 #undef TARGET_EXPAND_BUILTIN
514 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
515 #undef TARGET_FOLD_BUILTIN
516 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
519 #undef TARGET_HAVE_TLS
520 #define TARGET_HAVE_TLS true
523 #undef TARGET_CANNOT_FORCE_CONST_MEM
524 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
526 #undef TARGET_ASM_OUTPUT_MI_THUNK
527 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
528 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
529 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
531 #undef TARGET_RTX_COSTS
532 #define TARGET_RTX_COSTS sparc_rtx_costs
533 #undef TARGET_ADDRESS_COST
534 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
536 #undef TARGET_PROMOTE_FUNCTION_MODE
537 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
539 #undef TARGET_PROMOTE_PROTOTYPES
540 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
542 #undef TARGET_FUNCTION_VALUE
543 #define TARGET_FUNCTION_VALUE sparc_function_value
544 #undef TARGET_LIBCALL_VALUE
545 #define TARGET_LIBCALL_VALUE sparc_libcall_value
546 #undef TARGET_FUNCTION_VALUE_REGNO_P
547 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
549 #undef TARGET_STRUCT_VALUE_RTX
550 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
551 #undef TARGET_RETURN_IN_MEMORY
552 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
553 #undef TARGET_MUST_PASS_IN_STACK
554 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
555 #undef TARGET_PASS_BY_REFERENCE
556 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
557 #undef TARGET_ARG_PARTIAL_BYTES
558 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
560 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
561 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
562 #undef TARGET_STRICT_ARGUMENT_NAMING
563 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
565 #undef TARGET_EXPAND_BUILTIN_VA_START
566 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
567 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
568 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
570 #undef TARGET_VECTOR_MODE_SUPPORTED_P
571 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
573 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
574 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
576 #ifdef SUBTARGET_INSERT_ATTRIBUTES
577 #undef TARGET_INSERT_ATTRIBUTES
578 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
581 #ifdef SUBTARGET_ATTRIBUTE_TABLE
582 #undef TARGET_ATTRIBUTE_TABLE
583 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
586 #undef TARGET_RELAXED_ORDERING
587 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
589 #undef TARGET_DEFAULT_TARGET_FLAGS
590 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
591 #undef TARGET_HANDLE_OPTION
592 #define TARGET_HANDLE_OPTION sparc_handle_option
594 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
595 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
596 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
599 #undef TARGET_ASM_FILE_END
600 #define TARGET_ASM_FILE_END sparc_file_end
602 #undef TARGET_FRAME_POINTER_REQUIRED
603 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
605 #undef TARGET_CAN_ELIMINATE
606 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
608 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
609 #undef TARGET_MANGLE_TYPE
610 #define TARGET_MANGLE_TYPE sparc_mangle_type
613 #undef TARGET_LEGITIMATE_ADDRESS_P
614 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
616 #undef TARGET_TRAMPOLINE_INIT
617 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
619 struct gcc_target targetm = TARGET_INITIALIZER;
621 /* Implement TARGET_HANDLE_OPTION. */
624 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
629 case OPT_mhard_float:
630 case OPT_msoft_float:
631 fpu_option_set = true;
635 sparc_select[1].string = arg;
639 sparc_select[2].string = arg;
646 /* Validate and override various options, and do some machine dependent
650 sparc_override_options (void)
652 static struct code_model {
653 const char *const name;
654 const enum cmodel value;
655 } const cmodels[] = {
657 { "medlow", CM_MEDLOW },
658 { "medmid", CM_MEDMID },
659 { "medany", CM_MEDANY },
660 { "embmedany", CM_EMBMEDANY },
661 { NULL, (enum cmodel) 0 }
663 const struct code_model *cmodel;
664 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
665 static struct cpu_default {
667 const char *const name;
668 } const cpu_default[] = {
669 /* There must be one entry here for each TARGET_CPU value. */
670 { TARGET_CPU_sparc, "cypress" },
671 { TARGET_CPU_sparclet, "tsc701" },
672 { TARGET_CPU_sparclite, "f930" },
673 { TARGET_CPU_v8, "v8" },
674 { TARGET_CPU_hypersparc, "hypersparc" },
675 { TARGET_CPU_sparclite86x, "sparclite86x" },
676 { TARGET_CPU_supersparc, "supersparc" },
677 { TARGET_CPU_v9, "v9" },
678 { TARGET_CPU_ultrasparc, "ultrasparc" },
679 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
680 { TARGET_CPU_niagara, "niagara" },
681 { TARGET_CPU_niagara2, "niagara2" },
684 const struct cpu_default *def;
685 /* Table of values for -m{cpu,tune}=. */
686 static struct cpu_table {
687 const char *const name;
688 const enum processor_type processor;
691 } const cpu_table[] = {
692 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
693 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
694 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
695 /* TI TMS390Z55 supersparc */
696 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
697 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
698 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
699 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
700 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
701 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
702 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
703 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
705 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
707 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
708 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
709 /* TI ultrasparc I, II, IIi */
710 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
711 /* Although insns using %y are deprecated, it is a clear win on current
713 |MASK_DEPRECATED_V8_INSNS},
714 /* TI ultrasparc III */
715 /* ??? Check if %y issue still holds true in ultra3. */
716 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
718 { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
719 { "niagara2", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9},
720 { 0, (enum processor_type) 0, 0, 0 }
722 const struct cpu_table *cpu;
723 const struct sparc_cpu_select *sel;
726 #ifndef SPARC_BI_ARCH
727 /* Check for unsupported architecture size. */
728 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
729 error ("%s is not supported by this configuration",
730 DEFAULT_ARCH32_P ? "-m64" : "-m32");
733 /* We force all 64bit archs to use 128 bit long double */
734 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
736 error ("-mlong-double-64 not allowed with -m64");
737 target_flags |= MASK_LONG_DOUBLE_128;
740 /* Code model selection. */
741 sparc_cmodel = SPARC_DEFAULT_CMODEL;
745 sparc_cmodel = CM_32;
748 if (sparc_cmodel_string != NULL)
752 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
753 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
755 if (cmodel->name == NULL)
756 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
758 sparc_cmodel = cmodel->value;
761 error ("-mcmodel= is not supported on 32 bit systems");
764 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
766 /* Set the default CPU. */
767 for (def = &cpu_default[0]; def->name; ++def)
768 if (def->cpu == TARGET_CPU_DEFAULT)
770 gcc_assert (def->name);
771 sparc_select[0].string = def->name;
773 for (sel = &sparc_select[0]; sel->name; ++sel)
777 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
778 if (! strcmp (sel->string, cpu->name))
781 sparc_cpu = cpu->processor;
785 target_flags &= ~cpu->disable;
786 target_flags |= cpu->enable;
792 error ("bad value (%s) for %s switch", sel->string, sel->name);
796 /* If -mfpu or -mno-fpu was explicitly used, don't override with
797 the processor default. */
799 target_flags = (target_flags & ~MASK_FPU) | fpu;
801 /* Don't allow -mvis if FPU is disabled. */
803 target_flags &= ~MASK_VIS;
805 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
807 -m64 also implies v9. */
808 if (TARGET_VIS || TARGET_ARCH64)
810 target_flags |= MASK_V9;
811 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
814 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
815 if (TARGET_V9 && TARGET_ARCH32)
816 target_flags |= MASK_DEPRECATED_V8_INSNS;
818 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
819 if (! TARGET_V9 || TARGET_ARCH64)
820 target_flags &= ~MASK_V8PLUS;
822 /* Don't use stack biasing in 32 bit mode. */
824 target_flags &= ~MASK_STACK_BIAS;
826 /* Supply a default value for align_functions. */
827 if (align_functions == 0
828 && (sparc_cpu == PROCESSOR_ULTRASPARC
829 || sparc_cpu == PROCESSOR_ULTRASPARC3
830 || sparc_cpu == PROCESSOR_NIAGARA
831 || sparc_cpu == PROCESSOR_NIAGARA2))
832 align_functions = 32;
834 /* Validate PCC_STRUCT_RETURN. */
835 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
836 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
838 /* Only use .uaxword when compiling for a 64-bit target. */
840 targetm.asm_out.unaligned_op.di = NULL;
842 /* Do various machine dependent initializations. */
845 /* Acquire unique alias sets for our private stuff. */
846 sparc_sr_alias_set = new_alias_set ();
847 struct_value_alias_set = new_alias_set ();
849 /* Set up function hooks. */
850 init_machine_status = sparc_init_machine_status;
855 case PROCESSOR_CYPRESS:
856 sparc_costs = &cypress_costs;
859 case PROCESSOR_SPARCLITE:
860 case PROCESSOR_SUPERSPARC:
861 sparc_costs = &supersparc_costs;
865 case PROCESSOR_HYPERSPARC:
866 case PROCESSOR_SPARCLITE86X:
867 sparc_costs = &hypersparc_costs;
869 case PROCESSOR_SPARCLET:
870 case PROCESSOR_TSC701:
871 sparc_costs = &sparclet_costs;
874 case PROCESSOR_ULTRASPARC:
875 sparc_costs = &ultrasparc_costs;
877 case PROCESSOR_ULTRASPARC3:
878 sparc_costs = &ultrasparc3_costs;
880 case PROCESSOR_NIAGARA:
881 sparc_costs = &niagara_costs;
883 case PROCESSOR_NIAGARA2:
884 sparc_costs = &niagara2_costs;
888 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
889 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
890 target_flags |= MASK_LONG_DOUBLE_128;
893 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
894 set_param_value ("simultaneous-prefetches",
895 ((sparc_cpu == PROCESSOR_ULTRASPARC
896 || sparc_cpu == PROCESSOR_NIAGARA
897 || sparc_cpu == PROCESSOR_NIAGARA2)
899 : (sparc_cpu == PROCESSOR_ULTRASPARC3
901 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
902 set_param_value ("l1-cache-line-size",
903 ((sparc_cpu == PROCESSOR_ULTRASPARC
904 || sparc_cpu == PROCESSOR_ULTRASPARC3
905 || sparc_cpu == PROCESSOR_NIAGARA
906 || sparc_cpu == PROCESSOR_NIAGARA2)
910 /* Miscellaneous utilities. */
912 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
913 or branch on register contents instructions. */
916 v9_regcmp_p (enum rtx_code code)
918 return (code == EQ || code == NE || code == GE || code == LT
919 || code == LE || code == GT);
922 /* Nonzero if OP is a floating point constant which can
923 be loaded into an integer register using a single
924 sethi instruction. */
929 if (GET_CODE (op) == CONST_DOUBLE)
934 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
935 REAL_VALUE_TO_TARGET_SINGLE (r, i);
936 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
942 /* Nonzero if OP is a floating point constant which can
943 be loaded into an integer register using a single
949 if (GET_CODE (op) == CONST_DOUBLE)
954 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
955 REAL_VALUE_TO_TARGET_SINGLE (r, i);
956 return SPARC_SIMM13_P (i);
962 /* Nonzero if OP is a floating point constant which can
963 be loaded into an integer register using a high/losum
964 instruction sequence. */
967 fp_high_losum_p (rtx op)
969 /* The constraints calling this should only be in
970 SFmode move insns, so any constant which cannot
971 be moved using a single insn will do. */
972 if (GET_CODE (op) == CONST_DOUBLE)
977 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
978 REAL_VALUE_TO_TARGET_SINGLE (r, i);
979 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
985 /* Expand a move instruction. Return true if all work is done. */
988 sparc_expand_move (enum machine_mode mode, rtx *operands)
990 /* Handle sets of MEM first. */
991 if (GET_CODE (operands[0]) == MEM)
993 /* 0 is a register (or a pair of registers) on SPARC. */
994 if (register_or_zero_operand (operands[1], mode))
997 if (!reload_in_progress)
999 operands[0] = validize_mem (operands[0]);
1000 operands[1] = force_reg (mode, operands[1]);
1004 /* Fixup TLS cases. */
1006 && CONSTANT_P (operands[1])
1007 && sparc_tls_referenced_p (operands [1]))
1009 operands[1] = legitimize_tls_address (operands[1]);
1013 /* Fixup PIC cases. */
1014 if (flag_pic && CONSTANT_P (operands[1]))
1016 if (pic_address_needs_scratch (operands[1]))
1017 operands[1] = legitimize_pic_address (operands[1], NULL_RTX);
1019 /* VxWorks does not impose a fixed gap between segments; the run-time
1020 gap can be different from the object-file gap. We therefore can't
1021 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1022 are absolutely sure that X is in the same segment as the GOT.
1023 Unfortunately, the flexibility of linker scripts means that we
1024 can't be sure of that in general, so assume that _G_O_T_-relative
1025 accesses are never valid on VxWorks. */
1026 if (GET_CODE (operands[1]) == LABEL_REF && !TARGET_VXWORKS_RTP)
1030 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1036 gcc_assert (TARGET_ARCH64);
1037 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1042 if (symbolic_operand (operands[1], mode))
1044 operands[1] = legitimize_pic_address (operands[1],
1046 ? operands[0] : NULL_RTX);
1051 /* If we are trying to toss an integer constant into FP registers,
1052 or loading a FP or vector constant, force it into memory. */
1053 if (CONSTANT_P (operands[1])
1054 && REG_P (operands[0])
1055 && (SPARC_FP_REG_P (REGNO (operands[0]))
1056 || SCALAR_FLOAT_MODE_P (mode)
1057 || VECTOR_MODE_P (mode)))
1059 /* emit_group_store will send such bogosity to us when it is
1060 not storing directly into memory. So fix this up to avoid
1061 crashes in output_constant_pool. */
1062 if (operands [1] == const0_rtx)
1063 operands[1] = CONST0_RTX (mode);
1065 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1066 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1067 && const_zero_operand (operands[1], mode))
1070 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1071 /* We are able to build any SF constant in integer registers
1072 with at most 2 instructions. */
1074 /* And any DF constant in integer registers. */
1076 && (reload_completed || reload_in_progress))))
1079 operands[1] = force_const_mem (mode, operands[1]);
1080 if (!reload_in_progress)
1081 operands[1] = validize_mem (operands[1]);
1085 /* Accept non-constants and valid constants unmodified. */
1086 if (!CONSTANT_P (operands[1])
1087 || GET_CODE (operands[1]) == HIGH
1088 || input_operand (operands[1], mode))
1094 /* All QImode constants require only one insn, so proceed. */
1099 sparc_emit_set_const32 (operands[0], operands[1]);
1103 /* input_operand should have filtered out 32-bit mode. */
1104 sparc_emit_set_const64 (operands[0], operands[1]);
1114 /* Load OP1, a 32-bit constant, into OP0, a register.
1115 We know it can't be done in one insn when we get
1116 here, the move expander guarantees this. */
1119 sparc_emit_set_const32 (rtx op0, rtx op1)
1121 enum machine_mode mode = GET_MODE (op0);
1124 if (reload_in_progress || reload_completed)
1127 temp = gen_reg_rtx (mode);
1129 if (GET_CODE (op1) == CONST_INT)
1131 gcc_assert (!small_int_operand (op1, mode)
1132 && !const_high_operand (op1, mode));
1134 /* Emit them as real moves instead of a HIGH/LO_SUM,
1135 this way CSE can see everything and reuse intermediate
1136 values if it wants. */
1137 emit_insn (gen_rtx_SET (VOIDmode, temp,
1138 GEN_INT (INTVAL (op1)
1139 & ~(HOST_WIDE_INT)0x3ff)));
1141 emit_insn (gen_rtx_SET (VOIDmode,
1143 gen_rtx_IOR (mode, temp,
1144 GEN_INT (INTVAL (op1) & 0x3ff))));
1148 /* A symbol, emit in the traditional way. */
1149 emit_insn (gen_rtx_SET (VOIDmode, temp,
1150 gen_rtx_HIGH (mode, op1)));
1151 emit_insn (gen_rtx_SET (VOIDmode,
1152 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1156 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1157 If TEMP is nonzero, we are forbidden to use any other scratch
1158 registers. Otherwise, we are allowed to generate them as needed.
1160 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1161 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1164 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1166 rtx temp1, temp2, temp3, temp4, temp5;
1169 if (temp && GET_MODE (temp) == TImode)
1172 temp = gen_rtx_REG (DImode, REGNO (temp));
1175 /* SPARC-V9 code-model support. */
1176 switch (sparc_cmodel)
1179 /* The range spanned by all instructions in the object is less
1180 than 2^31 bytes (2GB) and the distance from any instruction
1181 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1182 than 2^31 bytes (2GB).
1184 The executable must be in the low 4TB of the virtual address
1187 sethi %hi(symbol), %temp1
1188 or %temp1, %lo(symbol), %reg */
1190 temp1 = temp; /* op0 is allowed. */
1192 temp1 = gen_reg_rtx (DImode);
1194 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1195 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1199 /* The range spanned by all instructions in the object is less
1200 than 2^31 bytes (2GB) and the distance from any instruction
1201 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1202 than 2^31 bytes (2GB).
1204 The executable must be in the low 16TB of the virtual address
1207 sethi %h44(symbol), %temp1
1208 or %temp1, %m44(symbol), %temp2
1209 sllx %temp2, 12, %temp3
1210 or %temp3, %l44(symbol), %reg */
1215 temp3 = temp; /* op0 is allowed. */
1219 temp1 = gen_reg_rtx (DImode);
1220 temp2 = gen_reg_rtx (DImode);
1221 temp3 = gen_reg_rtx (DImode);
1224 emit_insn (gen_seth44 (temp1, op1));
1225 emit_insn (gen_setm44 (temp2, temp1, op1));
1226 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1227 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1228 emit_insn (gen_setl44 (op0, temp3, op1));
1232 /* The range spanned by all instructions in the object is less
1233 than 2^31 bytes (2GB) and the distance from any instruction
1234 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1235 than 2^31 bytes (2GB).
1237 The executable can be placed anywhere in the virtual address
1240 sethi %hh(symbol), %temp1
1241 sethi %lm(symbol), %temp2
1242 or %temp1, %hm(symbol), %temp3
1243 sllx %temp3, 32, %temp4
1244 or %temp4, %temp2, %temp5
1245 or %temp5, %lo(symbol), %reg */
1248 /* It is possible that one of the registers we got for operands[2]
1249 might coincide with that of operands[0] (which is why we made
1250 it TImode). Pick the other one to use as our scratch. */
1251 if (rtx_equal_p (temp, op0))
1253 gcc_assert (ti_temp);
1254 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1257 temp2 = temp; /* op0 is _not_ allowed, see above. */
1264 temp1 = gen_reg_rtx (DImode);
1265 temp2 = gen_reg_rtx (DImode);
1266 temp3 = gen_reg_rtx (DImode);
1267 temp4 = gen_reg_rtx (DImode);
1268 temp5 = gen_reg_rtx (DImode);
1271 emit_insn (gen_sethh (temp1, op1));
1272 emit_insn (gen_setlm (temp2, op1));
1273 emit_insn (gen_sethm (temp3, temp1, op1));
1274 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1275 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1276 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1277 gen_rtx_PLUS (DImode, temp4, temp2)));
1278 emit_insn (gen_setlo (op0, temp5, op1));
1282 /* Old old old backwards compatibility kruft here.
1283 Essentially it is MEDLOW with a fixed 64-bit
1284 virtual base added to all data segment addresses.
1285 Text-segment stuff is computed like MEDANY, we can't
1286 reuse the code above because the relocation knobs
1289 Data segment: sethi %hi(symbol), %temp1
1290 add %temp1, EMBMEDANY_BASE_REG, %temp2
1291 or %temp2, %lo(symbol), %reg */
1292 if (data_segment_operand (op1, GET_MODE (op1)))
1296 temp1 = temp; /* op0 is allowed. */
1301 temp1 = gen_reg_rtx (DImode);
1302 temp2 = gen_reg_rtx (DImode);
1305 emit_insn (gen_embmedany_sethi (temp1, op1));
1306 emit_insn (gen_embmedany_brsum (temp2, temp1));
1307 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1310 /* Text segment: sethi %uhi(symbol), %temp1
1311 sethi %hi(symbol), %temp2
1312 or %temp1, %ulo(symbol), %temp3
1313 sllx %temp3, 32, %temp4
1314 or %temp4, %temp2, %temp5
1315 or %temp5, %lo(symbol), %reg */
1320 /* It is possible that one of the registers we got for operands[2]
1321 might coincide with that of operands[0] (which is why we made
1322 it TImode). Pick the other one to use as our scratch. */
1323 if (rtx_equal_p (temp, op0))
1325 gcc_assert (ti_temp);
1326 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1329 temp2 = temp; /* op0 is _not_ allowed, see above. */
1336 temp1 = gen_reg_rtx (DImode);
1337 temp2 = gen_reg_rtx (DImode);
1338 temp3 = gen_reg_rtx (DImode);
1339 temp4 = gen_reg_rtx (DImode);
1340 temp5 = gen_reg_rtx (DImode);
1343 emit_insn (gen_embmedany_textuhi (temp1, op1));
1344 emit_insn (gen_embmedany_texthi (temp2, op1));
1345 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1346 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1347 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1348 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1349 gen_rtx_PLUS (DImode, temp4, temp2)));
1350 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1359 #if HOST_BITS_PER_WIDE_INT == 32
1361 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1366 /* These avoid problems when cross compiling. If we do not
1367 go through all this hair then the optimizer will see
1368 invalid REG_EQUAL notes or in some cases none at all. */
1369 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1370 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1371 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1372 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1374 /* The optimizer is not to assume anything about exactly
1375 which bits are set for a HIGH, they are unspecified.
1376 Unfortunately this leads to many missed optimizations
1377 during CSE. We mask out the non-HIGH bits, and matches
1378 a plain movdi, to alleviate this problem. */
1380 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1382 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1386 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1388 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1392 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1394 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1398 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1400 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1403 /* Worker routines for 64-bit constant formation on arch64.
1404 One of the key things to be doing in these emissions is
1405 to create as many temp REGs as possible. This makes it
1406 possible for half-built constants to be used later when
1407 such values are similar to something required later on.
1408 Without doing this, the optimizer cannot see such
1411 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1412 unsigned HOST_WIDE_INT, int);
1415 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1416 unsigned HOST_WIDE_INT low_bits, int is_neg)
1418 unsigned HOST_WIDE_INT high_bits;
1421 high_bits = (~low_bits) & 0xffffffff;
1423 high_bits = low_bits;
1425 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1428 emit_insn (gen_rtx_SET (VOIDmode, op0,
1429 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1433 /* If we are XOR'ing with -1, then we should emit a one's complement
1434 instead. This way the combiner will notice logical operations
1435 such as ANDN later on and substitute. */
1436 if ((low_bits & 0x3ff) == 0x3ff)
1438 emit_insn (gen_rtx_SET (VOIDmode, op0,
1439 gen_rtx_NOT (DImode, temp)));
1443 emit_insn (gen_rtx_SET (VOIDmode, op0,
1444 gen_safe_XOR64 (temp,
1445 (-(HOST_WIDE_INT)0x400
1446 | (low_bits & 0x3ff)))));
1451 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1452 unsigned HOST_WIDE_INT, int);
1455 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1456 unsigned HOST_WIDE_INT high_bits,
1457 unsigned HOST_WIDE_INT low_immediate,
1462 if ((high_bits & 0xfffffc00) != 0)
1464 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1465 if ((high_bits & ~0xfffffc00) != 0)
1466 emit_insn (gen_rtx_SET (VOIDmode, op0,
1467 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1473 emit_insn (gen_safe_SET64 (temp, high_bits));
1477 /* Now shift it up into place. */
1478 emit_insn (gen_rtx_SET (VOIDmode, op0,
1479 gen_rtx_ASHIFT (DImode, temp2,
1480 GEN_INT (shift_count))));
1482 /* If there is a low immediate part piece, finish up by
1483 putting that in as well. */
1484 if (low_immediate != 0)
1485 emit_insn (gen_rtx_SET (VOIDmode, op0,
1486 gen_safe_OR64 (op0, low_immediate)));
1489 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1490 unsigned HOST_WIDE_INT);
1492 /* Full 64-bit constant decomposition. Even though this is the
1493 'worst' case, we still optimize a few things away. */
1495 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1496 unsigned HOST_WIDE_INT high_bits,
1497 unsigned HOST_WIDE_INT low_bits)
1501 if (reload_in_progress || reload_completed)
1504 sub_temp = gen_reg_rtx (DImode);
1506 if ((high_bits & 0xfffffc00) != 0)
1508 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1509 if ((high_bits & ~0xfffffc00) != 0)
1510 emit_insn (gen_rtx_SET (VOIDmode,
1512 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1518 emit_insn (gen_safe_SET64 (temp, high_bits));
1522 if (!reload_in_progress && !reload_completed)
1524 rtx temp2 = gen_reg_rtx (DImode);
1525 rtx temp3 = gen_reg_rtx (DImode);
1526 rtx temp4 = gen_reg_rtx (DImode);
1528 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1529 gen_rtx_ASHIFT (DImode, sub_temp,
1532 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1533 if ((low_bits & ~0xfffffc00) != 0)
1535 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1536 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1537 emit_insn (gen_rtx_SET (VOIDmode, op0,
1538 gen_rtx_PLUS (DImode, temp4, temp3)));
1542 emit_insn (gen_rtx_SET (VOIDmode, op0,
1543 gen_rtx_PLUS (DImode, temp4, temp2)));
1548 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1549 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1550 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1553 /* We are in the middle of reload, so this is really
1554 painful. However we do still make an attempt to
1555 avoid emitting truly stupid code. */
1556 if (low1 != const0_rtx)
1558 emit_insn (gen_rtx_SET (VOIDmode, op0,
1559 gen_rtx_ASHIFT (DImode, sub_temp,
1560 GEN_INT (to_shift))));
1561 emit_insn (gen_rtx_SET (VOIDmode, op0,
1562 gen_rtx_IOR (DImode, op0, low1)));
1570 if (low2 != const0_rtx)
1572 emit_insn (gen_rtx_SET (VOIDmode, op0,
1573 gen_rtx_ASHIFT (DImode, sub_temp,
1574 GEN_INT (to_shift))));
1575 emit_insn (gen_rtx_SET (VOIDmode, op0,
1576 gen_rtx_IOR (DImode, op0, low2)));
1584 emit_insn (gen_rtx_SET (VOIDmode, op0,
1585 gen_rtx_ASHIFT (DImode, sub_temp,
1586 GEN_INT (to_shift))));
1587 if (low3 != const0_rtx)
1588 emit_insn (gen_rtx_SET (VOIDmode, op0,
1589 gen_rtx_IOR (DImode, op0, low3)));
1594 /* Analyze a 64-bit constant for certain properties. */
1595 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1596 unsigned HOST_WIDE_INT,
1597 int *, int *, int *);
1600 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1601 unsigned HOST_WIDE_INT low_bits,
1602 int *hbsp, int *lbsp, int *abbasp)
1604 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1607 lowest_bit_set = highest_bit_set = -1;
1611 if ((lowest_bit_set == -1)
1612 && ((low_bits >> i) & 1))
1614 if ((highest_bit_set == -1)
1615 && ((high_bits >> (32 - i - 1)) & 1))
1616 highest_bit_set = (64 - i - 1);
1619 && ((highest_bit_set == -1)
1620 || (lowest_bit_set == -1)));
1626 if ((lowest_bit_set == -1)
1627 && ((high_bits >> i) & 1))
1628 lowest_bit_set = i + 32;
1629 if ((highest_bit_set == -1)
1630 && ((low_bits >> (32 - i - 1)) & 1))
1631 highest_bit_set = 32 - i - 1;
1634 && ((highest_bit_set == -1)
1635 || (lowest_bit_set == -1)));
1637 /* If there are no bits set this should have gone out
1638 as one instruction! */
1639 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1640 all_bits_between_are_set = 1;
1641 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1645 if ((low_bits & (1 << i)) != 0)
1650 if ((high_bits & (1 << (i - 32))) != 0)
1653 all_bits_between_are_set = 0;
1656 *hbsp = highest_bit_set;
1657 *lbsp = lowest_bit_set;
1658 *abbasp = all_bits_between_are_set;
1661 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1664 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1665 unsigned HOST_WIDE_INT low_bits)
1667 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1670 || high_bits == 0xffffffff)
1673 analyze_64bit_constant (high_bits, low_bits,
1674 &highest_bit_set, &lowest_bit_set,
1675 &all_bits_between_are_set);
1677 if ((highest_bit_set == 63
1678 || lowest_bit_set == 0)
1679 && all_bits_between_are_set != 0)
1682 if ((highest_bit_set - lowest_bit_set) < 21)
1688 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1689 unsigned HOST_WIDE_INT,
1692 static unsigned HOST_WIDE_INT
1693 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1694 unsigned HOST_WIDE_INT low_bits,
1695 int lowest_bit_set, int shift)
1697 HOST_WIDE_INT hi, lo;
1699 if (lowest_bit_set < 32)
1701 lo = (low_bits >> lowest_bit_set) << shift;
1702 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1707 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1709 gcc_assert (! (hi & lo));
1713 /* Here we are sure to be arch64 and this is an integer constant
1714 being loaded into a register. Emit the most efficient
1715 insn sequence possible. Detection of all the 1-insn cases
1716 has been done already. */
1718 sparc_emit_set_const64 (rtx op0, rtx op1)
1720 unsigned HOST_WIDE_INT high_bits, low_bits;
1721 int lowest_bit_set, highest_bit_set;
1722 int all_bits_between_are_set;
1725 /* Sanity check that we know what we are working with. */
1726 gcc_assert (TARGET_ARCH64
1727 && (GET_CODE (op0) == SUBREG
1728 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1730 if (reload_in_progress || reload_completed)
1733 if (GET_CODE (op1) != CONST_INT)
1735 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1740 temp = gen_reg_rtx (DImode);
1742 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1743 low_bits = (INTVAL (op1) & 0xffffffff);
1745 /* low_bits bits 0 --> 31
1746 high_bits bits 32 --> 63 */
1748 analyze_64bit_constant (high_bits, low_bits,
1749 &highest_bit_set, &lowest_bit_set,
1750 &all_bits_between_are_set);
1752 /* First try for a 2-insn sequence. */
1754 /* These situations are preferred because the optimizer can
1755 * do more things with them:
1757 * sllx %reg, shift, %reg
1759 * srlx %reg, shift, %reg
1760 * 3) mov some_small_const, %reg
1761 * sllx %reg, shift, %reg
1763 if (((highest_bit_set == 63
1764 || lowest_bit_set == 0)
1765 && all_bits_between_are_set != 0)
1766 || ((highest_bit_set - lowest_bit_set) < 12))
1768 HOST_WIDE_INT the_const = -1;
1769 int shift = lowest_bit_set;
1771 if ((highest_bit_set != 63
1772 && lowest_bit_set != 0)
1773 || all_bits_between_are_set == 0)
1776 create_simple_focus_bits (high_bits, low_bits,
1779 else if (lowest_bit_set == 0)
1780 shift = -(63 - highest_bit_set);
1782 gcc_assert (SPARC_SIMM13_P (the_const));
1783 gcc_assert (shift != 0);
1785 emit_insn (gen_safe_SET64 (temp, the_const));
1787 emit_insn (gen_rtx_SET (VOIDmode,
1789 gen_rtx_ASHIFT (DImode,
1793 emit_insn (gen_rtx_SET (VOIDmode,
1795 gen_rtx_LSHIFTRT (DImode,
1797 GEN_INT (-shift))));
1801 /* Now a range of 22 or less bits set somewhere.
1802 * 1) sethi %hi(focus_bits), %reg
1803 * sllx %reg, shift, %reg
1804 * 2) sethi %hi(focus_bits), %reg
1805 * srlx %reg, shift, %reg
1807 if ((highest_bit_set - lowest_bit_set) < 21)
1809 unsigned HOST_WIDE_INT focus_bits =
1810 create_simple_focus_bits (high_bits, low_bits,
1811 lowest_bit_set, 10);
1813 gcc_assert (SPARC_SETHI_P (focus_bits));
1814 gcc_assert (lowest_bit_set != 10);
1816 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1818 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1819 if (lowest_bit_set < 10)
1820 emit_insn (gen_rtx_SET (VOIDmode,
1822 gen_rtx_LSHIFTRT (DImode, temp,
1823 GEN_INT (10 - lowest_bit_set))));
1824 else if (lowest_bit_set > 10)
1825 emit_insn (gen_rtx_SET (VOIDmode,
1827 gen_rtx_ASHIFT (DImode, temp,
1828 GEN_INT (lowest_bit_set - 10))));
1832 /* 1) sethi %hi(low_bits), %reg
1833 * or %reg, %lo(low_bits), %reg
1834 * 2) sethi %hi(~low_bits), %reg
1835 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1838 || high_bits == 0xffffffff)
1840 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1841 (high_bits == 0xffffffff));
1845 /* Now, try 3-insn sequences. */
1847 /* 1) sethi %hi(high_bits), %reg
1848 * or %reg, %lo(high_bits), %reg
1849 * sllx %reg, 32, %reg
1853 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1857 /* We may be able to do something quick
1858 when the constant is negated, so try that. */
1859 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1860 (~low_bits) & 0xfffffc00))
1862 /* NOTE: The trailing bits get XOR'd so we need the
1863 non-negated bits, not the negated ones. */
1864 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1866 if ((((~high_bits) & 0xffffffff) == 0
1867 && ((~low_bits) & 0x80000000) == 0)
1868 || (((~high_bits) & 0xffffffff) == 0xffffffff
1869 && ((~low_bits) & 0x80000000) != 0))
1871 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1873 if ((SPARC_SETHI_P (fast_int)
1874 && (~high_bits & 0xffffffff) == 0)
1875 || SPARC_SIMM13_P (fast_int))
1876 emit_insn (gen_safe_SET64 (temp, fast_int));
1878 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1883 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1884 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1885 sparc_emit_set_const64 (temp, negated_const);
1888 /* If we are XOR'ing with -1, then we should emit a one's complement
1889 instead. This way the combiner will notice logical operations
1890 such as ANDN later on and substitute. */
1891 if (trailing_bits == 0x3ff)
1893 emit_insn (gen_rtx_SET (VOIDmode, op0,
1894 gen_rtx_NOT (DImode, temp)));
1898 emit_insn (gen_rtx_SET (VOIDmode,
1900 gen_safe_XOR64 (temp,
1901 (-0x400 | trailing_bits))));
1906 /* 1) sethi %hi(xxx), %reg
1907 * or %reg, %lo(xxx), %reg
1908 * sllx %reg, yyy, %reg
1910 * ??? This is just a generalized version of the low_bits==0
1911 * thing above, FIXME...
1913 if ((highest_bit_set - lowest_bit_set) < 32)
1915 unsigned HOST_WIDE_INT focus_bits =
1916 create_simple_focus_bits (high_bits, low_bits,
1919 /* We can't get here in this state. */
1920 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1922 /* So what we know is that the set bits straddle the
1923 middle of the 64-bit word. */
1924 sparc_emit_set_const64_quick2 (op0, temp,
1930 /* 1) sethi %hi(high_bits), %reg
1931 * or %reg, %lo(high_bits), %reg
1932 * sllx %reg, 32, %reg
1933 * or %reg, low_bits, %reg
1935 if (SPARC_SIMM13_P(low_bits)
1936 && ((int)low_bits > 0))
1938 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1942 /* The easiest way when all else fails, is full decomposition. */
1943 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1945 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1947 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1948 return the mode to be used for the comparison. For floating-point,
1949 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1950 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1951 processing is needed. */
1954 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1956 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1982 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1983 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1985 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1986 return CCX_NOOVmode;
1992 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1999 /* Emit the compare insn and return the CC reg for a CODE comparison
2000 with operands X and Y. */
2003 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2005 enum machine_mode mode;
2008 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2011 mode = SELECT_CC_MODE (code, x, y);
2013 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2014 fcc regs (cse can't tell they're really call clobbered regs and will
2015 remove a duplicate comparison even if there is an intervening function
2016 call - it will then try to reload the cc reg via an int reg which is why
2017 we need the movcc patterns). It is possible to provide the movcc
2018 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2019 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2020 to tell cse that CCFPE mode registers (even pseudos) are call
2023 /* ??? This is an experiment. Rather than making changes to cse which may
2024 or may not be easy/clean, we do our own cse. This is possible because
2025 we will generate hard registers. Cse knows they're call clobbered (it
2026 doesn't know the same thing about pseudos). If we guess wrong, no big
2027 deal, but if we win, great! */
2029 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2030 #if 1 /* experiment */
2033 /* We cycle through the registers to ensure they're all exercised. */
2034 static int next_fcc_reg = 0;
2035 /* Previous x,y for each fcc reg. */
2036 static rtx prev_args[4][2];
2038 /* Scan prev_args for x,y. */
2039 for (reg = 0; reg < 4; reg++)
2040 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2045 prev_args[reg][0] = x;
2046 prev_args[reg][1] = y;
2047 next_fcc_reg = (next_fcc_reg + 1) & 3;
2049 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2052 cc_reg = gen_reg_rtx (mode);
2053 #endif /* ! experiment */
2054 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2055 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2057 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2059 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2060 will only result in an unrecognizable insn so no point in asserting. */
2061 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2067 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2070 gen_compare_reg (rtx cmp)
2072 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2075 /* This function is used for v9 only.
2076 DEST is the target of the Scc insn.
2077 CODE is the code for an Scc's comparison.
2078 X and Y are the values we compare.
2080 This function is needed to turn
2083 (gt (reg:CCX 100 %icc)
2087 (gt:DI (reg:CCX 100 %icc)
2090 IE: The instruction recognizer needs to see the mode of the comparison to
2091 find the right instruction. We could use "gt:DI" right in the
2092 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2095 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2098 && (GET_MODE (x) == DImode
2099 || GET_MODE (dest) == DImode))
2102 /* Try to use the movrCC insns. */
2104 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2106 && v9_regcmp_p (compare_code))
2111 /* Special case for op0 != 0. This can be done with one instruction if
2114 if (compare_code == NE
2115 && GET_MODE (dest) == DImode
2116 && rtx_equal_p (op0, dest))
2118 emit_insn (gen_rtx_SET (VOIDmode, dest,
2119 gen_rtx_IF_THEN_ELSE (DImode,
2120 gen_rtx_fmt_ee (compare_code, DImode,
2127 if (reg_overlap_mentioned_p (dest, op0))
2129 /* Handle the case where dest == x.
2130 We "early clobber" the result. */
2131 op0 = gen_reg_rtx (GET_MODE (x));
2132 emit_move_insn (op0, x);
2135 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2136 if (GET_MODE (op0) != DImode)
2138 temp = gen_reg_rtx (DImode);
2139 convert_move (temp, op0, 0);
2143 emit_insn (gen_rtx_SET (VOIDmode, dest,
2144 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2145 gen_rtx_fmt_ee (compare_code, DImode,
2153 x = gen_compare_reg_1 (compare_code, x, y);
2156 gcc_assert (GET_MODE (x) != CC_NOOVmode
2157 && GET_MODE (x) != CCX_NOOVmode);
2159 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2160 emit_insn (gen_rtx_SET (VOIDmode, dest,
2161 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2162 gen_rtx_fmt_ee (compare_code,
2163 GET_MODE (x), x, y),
2164 const1_rtx, dest)));
2170 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2171 without jumps using the addx/subx instructions. */
2174 emit_scc_insn (rtx operands[])
2181 /* The quad-word fp compare library routines all return nonzero to indicate
2182 true, which is different from the equivalent libgcc routines, so we must
2183 handle them specially here. */
2184 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2186 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2187 GET_CODE (operands[1]));
2188 operands[2] = XEXP (operands[1], 0);
2189 operands[3] = XEXP (operands[1], 1);
2192 code = GET_CODE (operands[1]);
2196 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2197 more applications). The exception to this is "reg != 0" which can
2198 be done in one instruction on v9 (so we do it). */
2201 if (GET_MODE (x) == SImode)
2203 rtx pat = gen_seqsi_special (operands[0], x, y);
2207 else if (GET_MODE (x) == DImode)
2209 rtx pat = gen_seqdi_special (operands[0], x, y);
2217 if (GET_MODE (x) == SImode)
2219 rtx pat = gen_snesi_special (operands[0], x, y);
2223 else if (GET_MODE (x) == DImode)
2225 rtx pat = gen_snedi_special (operands[0], x, y);
2231 /* For the rest, on v9 we can use conditional moves. */
2235 if (gen_v9_scc (operands[0], code, x, y))
2239 /* We can do LTU and GEU using the addx/subx instructions too. And
2240 for GTU/LEU, if both operands are registers swap them and fall
2241 back to the easy case. */
2242 if (code == GTU || code == LEU)
2244 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2245 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2250 code = swap_condition (code);
2254 if (code == LTU || code == GEU)
2256 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2257 gen_rtx_fmt_ee (code, SImode,
2258 gen_compare_reg_1 (code, x, y),
2263 /* Nope, do branches. */
2267 /* Emit a conditional jump insn for the v9 architecture using comparison code
2268 CODE and jump target LABEL.
2269 This function exists to take advantage of the v9 brxx insns. */
2272 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2274 emit_jump_insn (gen_rtx_SET (VOIDmode,
2276 gen_rtx_IF_THEN_ELSE (VOIDmode,
2277 gen_rtx_fmt_ee (code, GET_MODE (op0),
2279 gen_rtx_LABEL_REF (VOIDmode, label),
2284 emit_conditional_branch_insn (rtx operands[])
2286 /* The quad-word fp compare library routines all return nonzero to indicate
2287 true, which is different from the equivalent libgcc routines, so we must
2288 handle them specially here. */
2289 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2291 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2292 GET_CODE (operands[0]));
2293 operands[1] = XEXP (operands[0], 0);
2294 operands[2] = XEXP (operands[0], 1);
2297 if (TARGET_ARCH64 && operands[2] == const0_rtx
2298 && GET_CODE (operands[1]) == REG
2299 && GET_MODE (operands[1]) == DImode)
2301 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2305 operands[1] = gen_compare_reg (operands[0]);
2306 operands[2] = const0_rtx;
2307 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2308 operands[1], operands[2]);
2309 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2314 /* Generate a DFmode part of a hard TFmode register.
2315 REG is the TFmode hard register, LOW is 1 for the
2316 low 64bit of the register and 0 otherwise.
2319 gen_df_reg (rtx reg, int low)
2321 int regno = REGNO (reg);
2323 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2324 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2325 return gen_rtx_REG (DFmode, regno);
2328 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2329 Unlike normal calls, TFmode operands are passed by reference. It is
2330 assumed that no more than 3 operands are required. */
2333 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2335 rtx ret_slot = NULL, arg[3], func_sym;
2338 /* We only expect to be called for conversions, unary, and binary ops. */
2339 gcc_assert (nargs == 2 || nargs == 3);
2341 for (i = 0; i < nargs; ++i)
2343 rtx this_arg = operands[i];
2346 /* TFmode arguments and return values are passed by reference. */
2347 if (GET_MODE (this_arg) == TFmode)
2349 int force_stack_temp;
2351 force_stack_temp = 0;
2352 if (TARGET_BUGGY_QP_LIB && i == 0)
2353 force_stack_temp = 1;
2355 if (GET_CODE (this_arg) == MEM
2356 && ! force_stack_temp)
2357 this_arg = XEXP (this_arg, 0);
2358 else if (CONSTANT_P (this_arg)
2359 && ! force_stack_temp)
2361 this_slot = force_const_mem (TFmode, this_arg);
2362 this_arg = XEXP (this_slot, 0);
2366 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2368 /* Operand 0 is the return value. We'll copy it out later. */
2370 emit_move_insn (this_slot, this_arg);
2372 ret_slot = this_slot;
2374 this_arg = XEXP (this_slot, 0);
2381 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2383 if (GET_MODE (operands[0]) == TFmode)
2386 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2387 arg[0], GET_MODE (arg[0]),
2388 arg[1], GET_MODE (arg[1]));
2390 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2391 arg[0], GET_MODE (arg[0]),
2392 arg[1], GET_MODE (arg[1]),
2393 arg[2], GET_MODE (arg[2]));
2396 emit_move_insn (operands[0], ret_slot);
2402 gcc_assert (nargs == 2);
2404 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2405 GET_MODE (operands[0]), 1,
2406 arg[1], GET_MODE (arg[1]));
2408 if (ret != operands[0])
2409 emit_move_insn (operands[0], ret);
2413 /* Expand soft-float TFmode calls to sparc abi routines. */
2416 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2438 emit_soft_tfmode_libcall (func, 3, operands);
2442 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2446 gcc_assert (code == SQRT);
2449 emit_soft_tfmode_libcall (func, 2, operands);
2453 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2460 switch (GET_MODE (operands[1]))
2473 case FLOAT_TRUNCATE:
2474 switch (GET_MODE (operands[0]))
2488 switch (GET_MODE (operands[1]))
2493 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2503 case UNSIGNED_FLOAT:
2504 switch (GET_MODE (operands[1]))
2509 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2520 switch (GET_MODE (operands[0]))
2534 switch (GET_MODE (operands[0]))
2551 emit_soft_tfmode_libcall (func, 2, operands);
2554 /* Expand a hard-float tfmode operation. All arguments must be in
2558 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2562 if (GET_RTX_CLASS (code) == RTX_UNARY)
2564 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2565 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2569 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2570 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2571 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2572 operands[1], operands[2]);
2575 if (register_operand (operands[0], VOIDmode))
2578 dest = gen_reg_rtx (GET_MODE (operands[0]));
2580 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2582 if (dest != operands[0])
2583 emit_move_insn (operands[0], dest);
2587 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2589 if (TARGET_HARD_QUAD)
2590 emit_hard_tfmode_operation (code, operands);
2592 emit_soft_tfmode_binop (code, operands);
2596 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2598 if (TARGET_HARD_QUAD)
2599 emit_hard_tfmode_operation (code, operands);
2601 emit_soft_tfmode_unop (code, operands);
2605 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2607 if (TARGET_HARD_QUAD)
2608 emit_hard_tfmode_operation (code, operands);
2610 emit_soft_tfmode_cvt (code, operands);
2613 /* Return nonzero if a branch/jump/call instruction will be emitting
2614 nop into its delay slot. */
2617 empty_delay_slot (rtx insn)
2621 /* If no previous instruction (should not happen), return true. */
2622 if (PREV_INSN (insn) == NULL)
2625 seq = NEXT_INSN (PREV_INSN (insn));
2626 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2632 /* Return nonzero if TRIAL can go into the call delay slot. */
2635 tls_call_delay (rtx trial)
2640 call __tls_get_addr, %tgd_call (foo)
2641 add %l7, %o0, %o0, %tgd_add (foo)
2642 while Sun as/ld does not. */
2643 if (TARGET_GNU_TLS || !TARGET_TLS)
2646 pat = PATTERN (trial);
2648 /* We must reject tgd_add{32|64}, i.e.
2649 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2650 and tldm_add{32|64}, i.e.
2651 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2653 if (GET_CODE (pat) == SET
2654 && GET_CODE (SET_SRC (pat)) == PLUS)
2656 rtx unspec = XEXP (SET_SRC (pat), 1);
2658 if (GET_CODE (unspec) == UNSPEC
2659 && (XINT (unspec, 1) == UNSPEC_TLSGD
2660 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2667 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2668 instruction. RETURN_P is true if the v9 variant 'return' is to be
2669 considered in the test too.
2671 TRIAL must be a SET whose destination is a REG appropriate for the
2672 'restore' instruction or, if RETURN_P is true, for the 'return'
2676 eligible_for_restore_insn (rtx trial, bool return_p)
2678 rtx pat = PATTERN (trial);
2679 rtx src = SET_SRC (pat);
2681 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2682 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2683 && arith_operand (src, GET_MODE (src)))
2686 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2688 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2691 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2692 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2693 && arith_double_operand (src, GET_MODE (src)))
2694 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2696 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2697 else if (! TARGET_FPU && register_operand (src, SFmode))
2700 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2701 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2704 /* If we have the 'return' instruction, anything that does not use
2705 local or output registers and can go into a delay slot wins. */
2706 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2707 && (get_attr_in_uncond_branch_delay (trial)
2708 == IN_UNCOND_BRANCH_DELAY_TRUE))
2711 /* The 'restore src1,src2,dest' pattern for SImode. */
2712 else if (GET_CODE (src) == PLUS
2713 && register_operand (XEXP (src, 0), SImode)
2714 && arith_operand (XEXP (src, 1), SImode))
2717 /* The 'restore src1,src2,dest' pattern for DImode. */
2718 else if (GET_CODE (src) == PLUS
2719 && register_operand (XEXP (src, 0), DImode)
2720 && arith_double_operand (XEXP (src, 1), DImode))
2723 /* The 'restore src1,%lo(src2),dest' pattern. */
2724 else if (GET_CODE (src) == LO_SUM
2725 && ! TARGET_CM_MEDMID
2726 && ((register_operand (XEXP (src, 0), SImode)
2727 && immediate_operand (XEXP (src, 1), SImode))
2729 && register_operand (XEXP (src, 0), DImode)
2730 && immediate_operand (XEXP (src, 1), DImode))))
2733 /* The 'restore src,src,dest' pattern. */
2734 else if (GET_CODE (src) == ASHIFT
2735 && (register_operand (XEXP (src, 0), SImode)
2736 || register_operand (XEXP (src, 0), DImode))
2737 && XEXP (src, 1) == const1_rtx)
2743 /* Return nonzero if TRIAL can go into the function return's
2747 eligible_for_return_delay (rtx trial)
2751 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2754 if (get_attr_length (trial) != 1)
2757 /* If there are any call-saved registers, we should scan TRIAL if it
2758 does not reference them. For now just make it easy. */
2762 /* If the function uses __builtin_eh_return, the eh_return machinery
2763 occupies the delay slot. */
2764 if (crtl->calls_eh_return)
2767 /* In the case of a true leaf function, anything can go into the slot. */
2768 if (sparc_leaf_function_p)
2769 return get_attr_in_uncond_branch_delay (trial)
2770 == IN_UNCOND_BRANCH_DELAY_TRUE;
2772 pat = PATTERN (trial);
2774 /* Otherwise, only operations which can be done in tandem with
2775 a `restore' or `return' insn can go into the delay slot. */
2776 if (GET_CODE (SET_DEST (pat)) != REG
2777 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2780 /* If this instruction sets up floating point register and we have a return
2781 instruction, it can probably go in. But restore will not work
2783 if (REGNO (SET_DEST (pat)) >= 32)
2785 && ! epilogue_renumber (&pat, 1)
2786 && (get_attr_in_uncond_branch_delay (trial)
2787 == IN_UNCOND_BRANCH_DELAY_TRUE));
2789 return eligible_for_restore_insn (trial, true);
2792 /* Return nonzero if TRIAL can go into the sibling call's
2796 eligible_for_sibcall_delay (rtx trial)
2800 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2803 if (get_attr_length (trial) != 1)
2806 pat = PATTERN (trial);
2808 if (sparc_leaf_function_p)
2810 /* If the tail call is done using the call instruction,
2811 we have to restore %o7 in the delay slot. */
2812 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2815 /* %g1 is used to build the function address */
2816 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2822 /* Otherwise, only operations which can be done in tandem with
2823 a `restore' insn can go into the delay slot. */
2824 if (GET_CODE (SET_DEST (pat)) != REG
2825 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2826 || REGNO (SET_DEST (pat)) >= 32)
2829 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2831 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2834 return eligible_for_restore_insn (trial, false);
2838 short_branch (int uid1, int uid2)
2840 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2842 /* Leave a few words of "slop". */
2843 if (delta >= -1023 && delta <= 1022)
2849 /* Return nonzero if REG is not used after INSN.
2850 We assume REG is a reload reg, and therefore does
2851 not live past labels or calls or jumps. */
2853 reg_unused_after (rtx reg, rtx insn)
2855 enum rtx_code code, prev_code = UNKNOWN;
2857 while ((insn = NEXT_INSN (insn)))
2859 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2862 code = GET_CODE (insn);
2863 if (GET_CODE (insn) == CODE_LABEL)
2868 rtx set = single_set (insn);
2869 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2872 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2874 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2882 /* Determine if it's legal to put X into the constant pool. This
2883 is not possible if X contains the address of a symbol that is
2884 not constant (TLS) or not known at final link time (PIC). */
2887 sparc_cannot_force_const_mem (rtx x)
2889 switch (GET_CODE (x))
2894 /* Accept all non-symbolic constants. */
2898 /* Labels are OK iff we are non-PIC. */
2899 return flag_pic != 0;
2902 /* 'Naked' TLS symbol references are never OK,
2903 non-TLS symbols are OK iff we are non-PIC. */
2904 if (SYMBOL_REF_TLS_MODEL (x))
2907 return flag_pic != 0;
2910 return sparc_cannot_force_const_mem (XEXP (x, 0));
2913 return sparc_cannot_force_const_mem (XEXP (x, 0))
2914 || sparc_cannot_force_const_mem (XEXP (x, 1));
2923 static GTY(()) bool pic_helper_needed = false;
2924 static GTY(()) rtx pic_helper_symbol;
2925 static GTY(()) rtx global_offset_table;
2927 /* Ensure that we are not using patterns that are not OK with PIC. */
2935 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2936 && (GET_CODE (recog_data.operand[i]) != CONST
2937 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2938 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2939 == global_offset_table)
2940 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2948 /* Return true if X is an address which needs a temporary register when
2949 reloaded while generating PIC code. */
2952 pic_address_needs_scratch (rtx x)
2954 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2955 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2956 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2957 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2958 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2964 /* Determine if a given RTX is a valid constant. We already know this
2965 satisfies CONSTANT_P. */
2968 legitimate_constant_p (rtx x)
2970 switch (GET_CODE (x))
2974 if (sparc_tls_referenced_p (x))
2979 if (GET_MODE (x) == VOIDmode)
2982 /* Floating point constants are generally not ok.
2983 The only exception is 0.0 in VIS. */
2985 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
2986 && const_zero_operand (x, GET_MODE (x)))
2992 /* Vector constants are generally not ok.
2993 The only exception is 0 in VIS. */
2995 && const_zero_operand (x, GET_MODE (x)))
3007 /* Determine if a given RTX is a valid constant address. */
3010 constant_address_p (rtx x)
3012 switch (GET_CODE (x))
3020 if (flag_pic && pic_address_needs_scratch (x))
3022 return legitimate_constant_p (x);
3025 return !flag_pic && legitimate_constant_p (x);
3032 /* Nonzero if the constant value X is a legitimate general operand
3033 when generating PIC code. It is given that flag_pic is on and
3034 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3037 legitimate_pic_operand_p (rtx x)
3039 if (pic_address_needs_scratch (x))
3041 if (sparc_tls_referenced_p (x))
3046 /* Return nonzero if ADDR is a valid memory address.
3047 STRICT specifies whether strict register checking applies. */
3050 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3052 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3054 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3056 else if (GET_CODE (addr) == PLUS)
3058 rs1 = XEXP (addr, 0);
3059 rs2 = XEXP (addr, 1);
3061 /* Canonicalize. REG comes first, if there are no regs,
3062 LO_SUM comes first. */
3064 && GET_CODE (rs1) != SUBREG
3066 || GET_CODE (rs2) == SUBREG
3067 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3069 rs1 = XEXP (addr, 1);
3070 rs2 = XEXP (addr, 0);
3074 && rs1 == pic_offset_table_rtx
3076 && GET_CODE (rs2) != SUBREG
3077 && GET_CODE (rs2) != LO_SUM
3078 && GET_CODE (rs2) != MEM
3079 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
3080 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3081 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3083 || GET_CODE (rs1) == SUBREG)
3084 && RTX_OK_FOR_OFFSET_P (rs2)))
3089 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3090 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3092 /* We prohibit REG + REG for TFmode when there are no quad move insns
3093 and we consequently need to split. We do this because REG+REG
3094 is not an offsettable address. If we get the situation in reload
3095 where source and destination of a movtf pattern are both MEMs with
3096 REG+REG address, then only one of them gets converted to an
3097 offsettable address. */
3099 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3102 /* We prohibit REG + REG on ARCH32 if not optimizing for
3103 DFmode/DImode because then mem_min_alignment is likely to be zero
3104 after reload and the forced split would lack a matching splitter
3106 if (TARGET_ARCH32 && !optimize
3107 && (mode == DFmode || mode == DImode))
3110 else if (USE_AS_OFFSETABLE_LO10
3111 && GET_CODE (rs1) == LO_SUM
3113 && ! TARGET_CM_MEDMID
3114 && RTX_OK_FOR_OLO10_P (rs2))
3117 imm1 = XEXP (rs1, 1);
3118 rs1 = XEXP (rs1, 0);
3119 if (!CONSTANT_P (imm1)
3120 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3124 else if (GET_CODE (addr) == LO_SUM)
3126 rs1 = XEXP (addr, 0);
3127 imm1 = XEXP (addr, 1);
3129 if (!CONSTANT_P (imm1)
3130 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3133 /* We can't allow TFmode in 32-bit mode, because an offset greater
3134 than the alignment (8) may cause the LO_SUM to overflow. */
3135 if (mode == TFmode && TARGET_ARCH32)
3138 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3143 if (GET_CODE (rs1) == SUBREG)
3144 rs1 = SUBREG_REG (rs1);
3150 if (GET_CODE (rs2) == SUBREG)
3151 rs2 = SUBREG_REG (rs2);
3158 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3159 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3164 if ((REGNO (rs1) >= 32
3165 && REGNO (rs1) != FRAME_POINTER_REGNUM
3166 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3168 && (REGNO (rs2) >= 32
3169 && REGNO (rs2) != FRAME_POINTER_REGNUM
3170 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3176 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3178 static GTY(()) rtx sparc_tls_symbol;
3181 sparc_tls_get_addr (void)
3183 if (!sparc_tls_symbol)
3184 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3186 return sparc_tls_symbol;
3190 sparc_tls_got (void)
3195 crtl->uses_pic_offset_table = 1;
3196 return pic_offset_table_rtx;
3199 if (!global_offset_table)
3200 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3201 temp = gen_reg_rtx (Pmode);
3202 emit_move_insn (temp, global_offset_table);
3206 /* Return true if X contains a thread-local symbol. */
3209 sparc_tls_referenced_p (rtx x)
3211 if (!TARGET_HAVE_TLS)
3214 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3215 x = XEXP (XEXP (x, 0), 0);
3217 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
3220 /* That's all we handle in legitimize_tls_address for now. */
3224 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3225 this (thread-local) address. */
3228 legitimize_tls_address (rtx addr)
3230 rtx temp1, temp2, temp3, ret, o0, got, insn;
3232 gcc_assert (can_create_pseudo_p ());
3234 if (GET_CODE (addr) == SYMBOL_REF)
3235 switch (SYMBOL_REF_TLS_MODEL (addr))
3237 case TLS_MODEL_GLOBAL_DYNAMIC:
3239 temp1 = gen_reg_rtx (SImode);
3240 temp2 = gen_reg_rtx (SImode);
3241 ret = gen_reg_rtx (Pmode);
3242 o0 = gen_rtx_REG (Pmode, 8);
3243 got = sparc_tls_got ();
3244 emit_insn (gen_tgd_hi22 (temp1, addr));
3245 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3248 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3249 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3254 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3255 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3258 CALL_INSN_FUNCTION_USAGE (insn)
3259 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3260 CALL_INSN_FUNCTION_USAGE (insn));
3261 insn = get_insns ();
3263 emit_libcall_block (insn, ret, o0, addr);
3266 case TLS_MODEL_LOCAL_DYNAMIC:
3268 temp1 = gen_reg_rtx (SImode);
3269 temp2 = gen_reg_rtx (SImode);
3270 temp3 = gen_reg_rtx (Pmode);
3271 ret = gen_reg_rtx (Pmode);
3272 o0 = gen_rtx_REG (Pmode, 8);
3273 got = sparc_tls_got ();
3274 emit_insn (gen_tldm_hi22 (temp1));
3275 emit_insn (gen_tldm_lo10 (temp2, temp1));
3278 emit_insn (gen_tldm_add32 (o0, got, temp2));
3279 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3284 emit_insn (gen_tldm_add64 (o0, got, temp2));
3285 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3288 CALL_INSN_FUNCTION_USAGE (insn)
3289 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3290 CALL_INSN_FUNCTION_USAGE (insn));
3291 insn = get_insns ();
3293 emit_libcall_block (insn, temp3, o0,
3294 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3295 UNSPEC_TLSLD_BASE));
3296 temp1 = gen_reg_rtx (SImode);
3297 temp2 = gen_reg_rtx (SImode);
3298 emit_insn (gen_tldo_hix22 (temp1, addr));
3299 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3301 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3303 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3306 case TLS_MODEL_INITIAL_EXEC:
3307 temp1 = gen_reg_rtx (SImode);
3308 temp2 = gen_reg_rtx (SImode);
3309 temp3 = gen_reg_rtx (Pmode);
3310 got = sparc_tls_got ();
3311 emit_insn (gen_tie_hi22 (temp1, addr));
3312 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3314 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3316 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3319 ret = gen_reg_rtx (Pmode);
3321 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3324 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3328 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3331 case TLS_MODEL_LOCAL_EXEC:
3332 temp1 = gen_reg_rtx (Pmode);
3333 temp2 = gen_reg_rtx (Pmode);
3336 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3337 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3341 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3342 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3344 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3351 else if (GET_CODE (addr) == CONST)
3355 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
3357 base = legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
3358 offset = XEXP (XEXP (addr, 0), 1);
3360 base = force_operand (base, NULL_RTX);
3361 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
3362 offset = force_reg (Pmode, offset);
3363 ret = gen_rtx_PLUS (Pmode, base, offset);
3367 gcc_unreachable (); /* for now ... */
3372 /* Legitimize PIC addresses. If the address is already position-independent,
3373 we return ORIG. Newly generated position-independent addresses go into a
3374 reg. This is REG if nonzero, otherwise we allocate register(s) as
3378 legitimize_pic_address (rtx orig, rtx reg)
3380 bool gotdata_op = false;
3382 if (GET_CODE (orig) == SYMBOL_REF
3383 /* See the comment in sparc_expand_move. */
3384 || (TARGET_VXWORKS_RTP && GET_CODE (orig) == LABEL_REF))
3386 rtx pic_ref, address;
3391 gcc_assert (! reload_in_progress && ! reload_completed);
3392 reg = gen_reg_rtx (Pmode);
3397 /* If not during reload, allocate another temp reg here for loading
3398 in the address, so that these instructions can be optimized
3400 rtx temp_reg = ((reload_in_progress || reload_completed)
3401 ? reg : gen_reg_rtx (Pmode));
3403 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3404 won't get confused into thinking that these two instructions
3405 are loading in the true address of the symbol. If in the
3406 future a PIC rtx exists, that should be used instead. */
3409 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3410 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3414 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3415 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3423 crtl->uses_pic_offset_table = 1;
3427 insn = emit_insn (gen_movdi_pic_gotdata_op (reg, pic_offset_table_rtx,
3430 insn = emit_insn (gen_movsi_pic_gotdata_op (reg, pic_offset_table_rtx,
3435 pic_ref = gen_const_mem (Pmode,
3436 gen_rtx_PLUS (Pmode,
3437 pic_offset_table_rtx, address));
3438 insn = emit_move_insn (reg, pic_ref);
3440 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3442 set_unique_reg_note (insn, REG_EQUAL, orig);
3445 else if (GET_CODE (orig) == CONST)
3449 if (GET_CODE (XEXP (orig, 0)) == PLUS
3450 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3455 gcc_assert (! reload_in_progress && ! reload_completed);
3456 reg = gen_reg_rtx (Pmode);
3459 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3460 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
3461 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
3462 base == reg ? NULL_RTX : reg);
3464 if (GET_CODE (offset) == CONST_INT)
3466 if (SMALL_INT (offset))
3467 return plus_constant (base, INTVAL (offset));
3468 else if (! reload_in_progress && ! reload_completed)
3469 offset = force_reg (Pmode, offset);
3471 /* If we reach here, then something is seriously wrong. */
3474 return gen_rtx_PLUS (Pmode, base, offset);
3476 else if (GET_CODE (orig) == LABEL_REF)
3477 /* ??? Why do we do this? */
3478 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3479 the register is live instead, in case it is eliminated. */
3480 crtl->uses_pic_offset_table = 1;
3485 /* Try machine-dependent ways of modifying an illegitimate address X
3486 to be legitimate. If we find one, return the new, valid address.
3488 OLDX is the address as it was before break_out_memory_refs was called.
3489 In some cases it is useful to look at this to decide what needs to be done.
3491 MODE is the mode of the operand pointed to by X.
3493 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3496 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3497 enum machine_mode mode)
3501 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3502 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3503 force_operand (XEXP (x, 0), NULL_RTX));
3504 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3505 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3506 force_operand (XEXP (x, 1), NULL_RTX));
3507 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3508 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3510 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3511 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3512 force_operand (XEXP (x, 1), NULL_RTX));
3514 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3517 if (sparc_tls_referenced_p (x))
3518 x = legitimize_tls_address (x);
3520 x = legitimize_pic_address (x, NULL_RTX);
3521 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3522 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3523 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3524 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3525 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3526 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3527 else if (GET_CODE (x) == SYMBOL_REF
3528 || GET_CODE (x) == CONST
3529 || GET_CODE (x) == LABEL_REF)
3530 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3535 /* Return true if ADDR (a legitimate address expression)
3536 has an effect that depends on the machine mode it is used for.
3542 is not equivalent to
3544 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
3546 because [%l7+a+1] is interpreted as the address of (a+1). */
3550 sparc_mode_dependent_address_p (const_rtx addr)
3552 if (flag_pic && GET_CODE (addr) == PLUS)
3554 rtx op0 = XEXP (addr, 0);
3555 rtx op1 = XEXP (addr, 1);
3556 if (op0 == pic_offset_table_rtx
3557 && SYMBOLIC_CONST (op1))
3564 #ifdef HAVE_GAS_HIDDEN
3565 # define USE_HIDDEN_LINKONCE 1
3567 # define USE_HIDDEN_LINKONCE 0
3571 get_pc_thunk_name (char name[32], unsigned int regno)
3573 const char *pic_name = reg_names[regno];
3575 /* Skip the leading '%' as that cannot be used in a
3579 if (USE_HIDDEN_LINKONCE)
3580 sprintf (name, "__sparc_get_pc_thunk.%s", pic_name);
3582 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
3585 /* Emit code to load the PIC register. */
3588 load_pic_register (void)
3590 int orig_flag_pic = flag_pic;
3592 if (TARGET_VXWORKS_RTP)
3594 emit_insn (gen_vxworks_load_got ());
3595 emit_use (pic_offset_table_rtx);
3599 /* If we haven't initialized the special PIC symbols, do so now. */
3600 if (!pic_helper_needed)
3604 pic_helper_needed = true;
3606 get_pc_thunk_name (name, REGNO (pic_offset_table_rtx));
3607 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
3609 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3614 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3615 pic_helper_symbol));
3617 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3618 pic_helper_symbol));
3619 flag_pic = orig_flag_pic;
3621 /* Need to emit this whether or not we obey regdecls,
3622 since setjmp/longjmp can cause life info to screw up.
3623 ??? In the case where we don't obey regdecls, this is not sufficient
3624 since we may not fall out the bottom. */
3625 emit_use (pic_offset_table_rtx);
3628 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3629 address of the call target. */
3632 sparc_emit_call_insn (rtx pat, rtx addr)
3636 insn = emit_call_insn (pat);
3638 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3639 if (TARGET_VXWORKS_RTP
3641 && GET_CODE (addr) == SYMBOL_REF
3642 && (SYMBOL_REF_DECL (addr)
3643 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3644 : !SYMBOL_REF_LOCAL_P (addr)))
3646 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3647 crtl->uses_pic_offset_table = 1;
3651 /* Return 1 if RTX is a MEM which is known to be aligned to at
3652 least a DESIRED byte boundary. */
3655 mem_min_alignment (rtx mem, int desired)
3657 rtx addr, base, offset;
3659 /* If it's not a MEM we can't accept it. */
3660 if (GET_CODE (mem) != MEM)
3664 if (!TARGET_UNALIGNED_DOUBLES
3665 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3668 /* ??? The rest of the function predates MEM_ALIGN so
3669 there is probably a bit of redundancy. */
3670 addr = XEXP (mem, 0);
3671 base = offset = NULL_RTX;
3672 if (GET_CODE (addr) == PLUS)
3674 if (GET_CODE (XEXP (addr, 0)) == REG)
3676 base = XEXP (addr, 0);
3678 /* What we are saying here is that if the base
3679 REG is aligned properly, the compiler will make
3680 sure any REG based index upon it will be so
3682 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3683 offset = XEXP (addr, 1);
3685 offset = const0_rtx;
3688 else if (GET_CODE (addr) == REG)
3691 offset = const0_rtx;
3694 if (base != NULL_RTX)
3696 int regno = REGNO (base);
3698 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3700 /* Check if the compiler has recorded some information
3701 about the alignment of the base REG. If reload has
3702 completed, we already matched with proper alignments.
3703 If not running global_alloc, reload might give us
3704 unaligned pointer to local stack though. */
3706 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3707 || (optimize && reload_completed))
3708 && (INTVAL (offset) & (desired - 1)) == 0)
3713 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3717 else if (! TARGET_UNALIGNED_DOUBLES
3718 || CONSTANT_P (addr)
3719 || GET_CODE (addr) == LO_SUM)
3721 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3722 is true, in which case we can only assume that an access is aligned if
3723 it is to a constant address, or the address involves a LO_SUM. */
3727 /* An obviously unaligned address. */
3732 /* Vectors to keep interesting information about registers where it can easily
3733 be got. We used to use the actual mode value as the bit number, but there
3734 are more than 32 modes now. Instead we use two tables: one indexed by
3735 hard register number, and one indexed by mode. */
3737 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3738 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3739 mapped into one sparc_mode_class mode. */
3741 enum sparc_mode_class {
3742 S_MODE, D_MODE, T_MODE, O_MODE,
3743 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3747 /* Modes for single-word and smaller quantities. */
3748 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3750 /* Modes for double-word and smaller quantities. */
3751 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3753 /* Modes for quad-word and smaller quantities. */
3754 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3756 /* Modes for 8-word and smaller quantities. */
3757 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3759 /* Modes for single-float quantities. We must allow any single word or
3760 smaller quantity. This is because the fix/float conversion instructions
3761 take integer inputs/outputs from the float registers. */
3762 #define SF_MODES (S_MODES)
3764 /* Modes for double-float and smaller quantities. */
3765 #define DF_MODES (D_MODES)
3767 /* Modes for quad-float and smaller quantities. */
3768 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
3770 /* Modes for quad-float pairs and smaller quantities. */
3771 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
3773 /* Modes for double-float only quantities. */
3774 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3776 /* Modes for quad-float and double-float only quantities. */
3777 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
3779 /* Modes for quad-float pairs and double-float only quantities. */
3780 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
3782 /* Modes for condition codes. */
3783 #define CC_MODES (1 << (int) CC_MODE)
3784 #define CCFP_MODES (1 << (int) CCFP_MODE)
3786 /* Value is 1 if register/mode pair is acceptable on sparc.
3787 The funny mixture of D and T modes is because integer operations
3788 do not specially operate on tetra quantities, so non-quad-aligned
3789 registers can hold quadword quantities (except %o4 and %i4 because
3790 they cross fixed registers). */
3792 /* This points to either the 32 bit or the 64 bit version. */
3793 const int *hard_regno_mode_classes;
3795 static const int hard_32bit_mode_classes[] = {
3796 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3797 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3798 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3799 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3801 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3802 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3803 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3804 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3806 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3807 and none can hold SFmode/SImode values. */
3808 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3809 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3810 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3811 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3814 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3820 static const int hard_64bit_mode_classes[] = {
3821 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3822 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3823 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3824 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3826 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3827 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3828 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3829 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3831 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3832 and none can hold SFmode/SImode values. */
3833 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3834 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3835 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3836 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3839 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3845 int sparc_mode_class [NUM_MACHINE_MODES];
3847 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3850 sparc_init_modes (void)
3854 for (i = 0; i < NUM_MACHINE_MODES; i++)
3856 switch (GET_MODE_CLASS (i))
3859 case MODE_PARTIAL_INT:
3860 case MODE_COMPLEX_INT:
3861 if (GET_MODE_SIZE (i) <= 4)
3862 sparc_mode_class[i] = 1 << (int) S_MODE;
3863 else if (GET_MODE_SIZE (i) == 8)
3864 sparc_mode_class[i] = 1 << (int) D_MODE;
3865 else if (GET_MODE_SIZE (i) == 16)
3866 sparc_mode_class[i] = 1 << (int) T_MODE;
3867 else if (GET_MODE_SIZE (i) == 32)
3868 sparc_mode_class[i] = 1 << (int) O_MODE;
3870 sparc_mode_class[i] = 0;
3872 case MODE_VECTOR_INT:
3873 if (GET_MODE_SIZE (i) <= 4)
3874 sparc_mode_class[i] = 1 << (int)SF_MODE;
3875 else if (GET_MODE_SIZE (i) == 8)
3876 sparc_mode_class[i] = 1 << (int)DF_MODE;
3879 case MODE_COMPLEX_FLOAT:
3880 if (GET_MODE_SIZE (i) <= 4)
3881 sparc_mode_class[i] = 1 << (int) SF_MODE;
3882 else if (GET_MODE_SIZE (i) == 8)
3883 sparc_mode_class[i] = 1 << (int) DF_MODE;
3884 else if (GET_MODE_SIZE (i) == 16)
3885 sparc_mode_class[i] = 1 << (int) TF_MODE;
3886 else if (GET_MODE_SIZE (i) == 32)
3887 sparc_mode_class[i] = 1 << (int) OF_MODE;
3889 sparc_mode_class[i] = 0;
3892 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3893 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3895 sparc_mode_class[i] = 1 << (int) CC_MODE;
3898 sparc_mode_class[i] = 0;
3904 hard_regno_mode_classes = hard_64bit_mode_classes;
3906 hard_regno_mode_classes = hard_32bit_mode_classes;
3908 /* Initialize the array used by REGNO_REG_CLASS. */
3909 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3911 if (i < 16 && TARGET_V8PLUS)
3912 sparc_regno_reg_class[i] = I64_REGS;
3913 else if (i < 32 || i == FRAME_POINTER_REGNUM)
3914 sparc_regno_reg_class[i] = GENERAL_REGS;
3916 sparc_regno_reg_class[i] = FP_REGS;
3918 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3920 sparc_regno_reg_class[i] = FPCC_REGS;
3922 sparc_regno_reg_class[i] = NO_REGS;
3926 /* Compute the frame size required by the function. This function is called
3927 during the reload pass and also by sparc_expand_prologue. */
3930 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3932 int outgoing_args_size = (crtl->outgoing_args_size
3933 + REG_PARM_STACK_SPACE (current_function_decl));
3934 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
3939 for (i = 0; i < 8; i++)
3940 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3945 for (i = 0; i < 8; i += 2)
3946 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3947 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3951 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3952 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3953 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3956 /* Set up values for use in prologue and epilogue. */
3957 num_gfregs = n_regs;
3962 && crtl->outgoing_args_size == 0)
3963 actual_fsize = apparent_fsize = 0;
3966 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
3967 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3968 apparent_fsize += n_regs * 4;
3969 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3972 /* Make sure nothing can clobber our register windows.
3973 If a SAVE must be done, or there is a stack-local variable,
3974 the register window area must be allocated. */
3975 if (! leaf_function_p || size > 0)
3976 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3978 return SPARC_STACK_ALIGN (actual_fsize);
3981 /* Output any necessary .register pseudo-ops. */
3984 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3986 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3992 /* Check if %g[2367] were used without
3993 .register being printed for them already. */
3994 for (i = 2; i < 8; i++)
3996 if (df_regs_ever_live_p (i)
3997 && ! sparc_hard_reg_printed [i])
3999 sparc_hard_reg_printed [i] = 1;
4000 /* %g7 is used as TLS base register, use #ignore
4001 for it instead of #scratch. */
4002 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
4003 i == 7 ? "ignore" : "scratch");
4010 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
4012 #if PROBE_INTERVAL > 4096
4013 #error Cannot use indexed addressing mode for stack probing
4016 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
4017 inclusive. These are offsets from the current stack pointer.
4019 Note that we don't use the REG+REG addressing mode for the probes because
4020 of the stack bias in 64-bit mode. And it doesn't really buy us anything
4021 so the advantages of having a single code win here. */
4024 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
4026 rtx g1 = gen_rtx_REG (Pmode, 1);
4028 /* See if we have a constant small number of probes to generate. If so,
4029 that's the easy case. */
4030 if (size <= PROBE_INTERVAL)
4032 emit_move_insn (g1, GEN_INT (first));
4033 emit_insn (gen_rtx_SET (VOIDmode, g1,
4034 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4035 emit_stack_probe (plus_constant (g1, -size));
4038 /* The run-time loop is made up of 10 insns in the generic case while the
4039 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
4040 else if (size <= 5 * PROBE_INTERVAL)
4044 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
4045 emit_insn (gen_rtx_SET (VOIDmode, g1,
4046 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4047 emit_stack_probe (g1);
4049 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
4050 it exceeds SIZE. If only two probes are needed, this will not
4051 generate any code. Then probe at FIRST + SIZE. */
4052 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
4054 emit_insn (gen_rtx_SET (VOIDmode, g1,
4055 plus_constant (g1, -PROBE_INTERVAL)));
4056 emit_stack_probe (g1);
4059 emit_stack_probe (plus_constant (g1, (i - PROBE_INTERVAL) - size));
4062 /* Otherwise, do the same as above, but in a loop. Note that we must be
4063 extra careful with variables wrapping around because we might be at
4064 the very top (or the very bottom) of the address space and we have
4065 to be able to handle this case properly; in particular, we use an
4066 equality test for the loop condition. */
4069 HOST_WIDE_INT rounded_size;
4070 rtx g4 = gen_rtx_REG (Pmode, 4);
4072 emit_move_insn (g1, GEN_INT (first));
4075 /* Step 1: round SIZE to the previous multiple of the interval. */
4077 rounded_size = size & -PROBE_INTERVAL;
4078 emit_move_insn (g4, GEN_INT (rounded_size));
4081 /* Step 2: compute initial and final value of the loop counter. */
4083 /* TEST_ADDR = SP + FIRST. */
4084 emit_insn (gen_rtx_SET (VOIDmode, g1,
4085 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4087 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
4088 emit_insn (gen_rtx_SET (VOIDmode, g4, gen_rtx_MINUS (Pmode, g1, g4)));
4093 while (TEST_ADDR != LAST_ADDR)
4095 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
4099 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
4100 until it is equal to ROUNDED_SIZE. */
4103 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
4105 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
4108 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
4109 that SIZE is equal to ROUNDED_SIZE. */
4111 if (size != rounded_size)
4112 emit_stack_probe (plus_constant (g4, rounded_size - size));
4115 /* Make sure nothing is scheduled before we are done. */
4116 emit_insn (gen_blockage ());
4119 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
4120 absolute addresses. */
4123 output_probe_stack_range (rtx reg1, rtx reg2)
4125 static int labelno = 0;
4126 char loop_lab[32], end_lab[32];
4129 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
4130 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
4132 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
4134 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
4137 output_asm_insn ("cmp\t%0, %1", xops);
4139 fputs ("\tbe,pn\t%xcc,", asm_out_file);
4141 fputs ("\tbe\t", asm_out_file);
4142 assemble_name_raw (asm_out_file, end_lab);
4143 fputc ('\n', asm_out_file);
4145 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
4146 xops[1] = GEN_INT (-PROBE_INTERVAL);
4147 output_asm_insn (" add\t%0, %1, %0", xops);
4149 /* Probe at TEST_ADDR and branch. */
4151 fputs ("\tba,pt\t%xcc,", asm_out_file);
4153 fputs ("\tba\t", asm_out_file);
4154 assemble_name_raw (asm_out_file, loop_lab);
4155 fputc ('\n', asm_out_file);
4156 xops[1] = GEN_INT (SPARC_STACK_BIAS);
4157 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
4159 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
4164 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
4165 as needed. LOW should be double-word aligned for 32-bit registers.
4166 Return the new OFFSET. */
4169 #define SORR_RESTORE 1
4172 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
4177 if (TARGET_ARCH64 && high <= 32)
4179 for (i = low; i < high; i++)
4181 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4183 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
4184 set_mem_alias_set (mem, sparc_sr_alias_set);
4185 if (action == SORR_SAVE)
4187 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4188 RTX_FRAME_RELATED_P (insn) = 1;
4190 else /* action == SORR_RESTORE */
4191 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4198 for (i = low; i < high; i += 2)
4200 bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i];
4201 bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1];
4202 enum machine_mode mode;
4207 mode = i < 32 ? DImode : DFmode;
4212 mode = i < 32 ? SImode : SFmode;
4217 mode = i < 32 ? SImode : SFmode;
4224 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
4225 set_mem_alias_set (mem, sparc_sr_alias_set);
4226 if (action == SORR_SAVE)
4228 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4229 RTX_FRAME_RELATED_P (insn) = 1;
4231 else /* action == SORR_RESTORE */
4232 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4234 /* Always preserve double-word alignment. */
4235 offset = (offset + 7) & -8;
4242 /* Emit code to save call-saved registers. */
4245 emit_save_or_restore_regs (int action)
4247 HOST_WIDE_INT offset;
4250 offset = frame_base_offset - apparent_fsize;
4252 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
4254 /* ??? This might be optimized a little as %g1 might already have a
4255 value close enough that a single add insn will do. */
4256 /* ??? Although, all of this is probably only a temporary fix
4257 because if %g1 can hold a function result, then
4258 sparc_expand_epilogue will lose (the result will be
4260 base = gen_rtx_REG (Pmode, 1);
4261 emit_move_insn (base, GEN_INT (offset));
4262 emit_insn (gen_rtx_SET (VOIDmode,
4264 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
4268 base = frame_base_reg;
4270 offset = save_or_restore_regs (0, 8, base, offset, action);
4271 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
4274 /* Generate a save_register_window insn. */
4277 gen_save_register_window (rtx increment)
4280 return gen_save_register_windowdi (increment);
4282 return gen_save_register_windowsi (increment);
4285 /* Generate an increment for the stack pointer. */
4288 gen_stack_pointer_inc (rtx increment)
4290 return gen_rtx_SET (VOIDmode,
4292 gen_rtx_PLUS (Pmode,
4297 /* Generate a decrement for the stack pointer. */
4300 gen_stack_pointer_dec (rtx decrement)
4302 return gen_rtx_SET (VOIDmode,
4304 gen_rtx_MINUS (Pmode,
4309 /* Expand the function prologue. The prologue is responsible for reserving
4310 storage for the frame, saving the call-saved registers and loading the
4311 PIC register if needed. */
4314 sparc_expand_prologue (void)
4319 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4320 on the final value of the flag means deferring the prologue/epilogue
4321 expansion until just before the second scheduling pass, which is too
4322 late to emit multiple epilogues or return insns.
4324 Of course we are making the assumption that the value of the flag
4325 will not change between now and its final value. Of the three parts
4326 of the formula, only the last one can reasonably vary. Let's take a
4327 closer look, after assuming that the first two ones are set to true
4328 (otherwise the last value is effectively silenced).
4330 If only_leaf_regs_used returns false, the global predicate will also
4331 be false so the actual frame size calculated below will be positive.
4332 As a consequence, the save_register_window insn will be emitted in
4333 the instruction stream; now this insn explicitly references %fp
4334 which is not a leaf register so only_leaf_regs_used will always
4335 return false subsequently.
4337 If only_leaf_regs_used returns true, we hope that the subsequent
4338 optimization passes won't cause non-leaf registers to pop up. For
4339 example, the regrename pass has special provisions to not rename to
4340 non-leaf registers in a leaf function. */
4341 sparc_leaf_function_p
4342 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
4344 /* Need to use actual_fsize, since we are also allocating
4345 space for our callee (and our own register save area). */
4347 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4349 /* Advertise that the data calculated just above are now valid. */
4350 sparc_prologue_data_valid_p = true;
4352 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && actual_fsize)
4353 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, actual_fsize);
4355 if (sparc_leaf_function_p)
4357 frame_base_reg = stack_pointer_rtx;
4358 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4362 frame_base_reg = hard_frame_pointer_rtx;
4363 frame_base_offset = SPARC_STACK_BIAS;
4366 if (actual_fsize == 0)
4368 else if (sparc_leaf_function_p)
4370 if (actual_fsize <= 4096)
4371 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4372 else if (actual_fsize <= 8192)
4374 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4375 /* %sp is still the CFA register. */
4376 RTX_FRAME_RELATED_P (insn) = 1;
4378 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4382 rtx reg = gen_rtx_REG (Pmode, 1);
4383 emit_move_insn (reg, GEN_INT (-actual_fsize));
4384 insn = emit_insn (gen_stack_pointer_inc (reg));
4385 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4386 gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4389 RTX_FRAME_RELATED_P (insn) = 1;
4393 if (actual_fsize <= 4096)
4394 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4395 else if (actual_fsize <= 8192)
4397 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
4398 /* %sp is not the CFA register anymore. */
4399 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4403 rtx reg = gen_rtx_REG (Pmode, 1);
4404 emit_move_insn (reg, GEN_INT (-actual_fsize));
4405 insn = emit_insn (gen_save_register_window (reg));
4408 RTX_FRAME_RELATED_P (insn) = 1;
4409 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4410 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4414 emit_save_or_restore_regs (SORR_SAVE);
4416 /* Load the PIC register if needed. */
4417 if (flag_pic && crtl->uses_pic_offset_table)
4418 load_pic_register ();
4421 /* This function generates the assembly code for function entry, which boils
4422 down to emitting the necessary .register directives. */
4425 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4427 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4428 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4430 sparc_output_scratch_registers (file);
4433 /* Expand the function epilogue, either normal or part of a sibcall.
4434 We emit all the instructions except the return or the call. */
4437 sparc_expand_epilogue (void)
4440 emit_save_or_restore_regs (SORR_RESTORE);
4442 if (actual_fsize == 0)
4444 else if (sparc_leaf_function_p)
4446 if (actual_fsize <= 4096)
4447 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4448 else if (actual_fsize <= 8192)
4450 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4451 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4455 rtx reg = gen_rtx_REG (Pmode, 1);
4456 emit_move_insn (reg, GEN_INT (-actual_fsize));
4457 emit_insn (gen_stack_pointer_dec (reg));
4462 /* Return true if it is appropriate to emit `return' instructions in the
4463 body of a function. */
4466 sparc_can_use_return_insn_p (void)
4468 return sparc_prologue_data_valid_p
4469 && (actual_fsize == 0 || !sparc_leaf_function_p);
4472 /* This function generates the assembly code for function exit. */
4475 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4477 /* If code does not drop into the epilogue, we have to still output
4478 a dummy nop for the sake of sane backtraces. Otherwise, if the
4479 last two instructions of a function were "call foo; dslot;" this
4480 can make the return PC of foo (i.e. address of call instruction
4481 plus 8) point to the first instruction in the next function. */
4483 rtx insn, last_real_insn;
4485 insn = get_last_insn ();
4487 last_real_insn = prev_real_insn (insn);
4489 && GET_CODE (last_real_insn) == INSN
4490 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4491 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4493 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4494 fputs("\tnop\n", file);
4496 sparc_output_deferred_case_vectors ();
4499 /* Output a 'restore' instruction. */
4502 output_restore (rtx pat)
4508 fputs ("\t restore\n", asm_out_file);
4512 gcc_assert (GET_CODE (pat) == SET);
4514 operands[0] = SET_DEST (pat);
4515 pat = SET_SRC (pat);
4517 switch (GET_CODE (pat))
4520 operands[1] = XEXP (pat, 0);
4521 operands[2] = XEXP (pat, 1);
4522 output_asm_insn (" restore %r1, %2, %Y0", operands);
4525 operands[1] = XEXP (pat, 0);
4526 operands[2] = XEXP (pat, 1);
4527 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4530 operands[1] = XEXP (pat, 0);
4531 gcc_assert (XEXP (pat, 1) == const1_rtx);
4532 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4536 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4541 /* Output a return. */
4544 output_return (rtx insn)
4546 if (sparc_leaf_function_p)
4548 /* This is a leaf function so we don't have to bother restoring the
4549 register window, which frees us from dealing with the convoluted
4550 semantics of restore/return. We simply output the jump to the
4551 return address and the insn in the delay slot (if any). */
4553 gcc_assert (! crtl->calls_eh_return);
4555 return "jmp\t%%o7+%)%#";
4559 /* This is a regular function so we have to restore the register window.
4560 We may have a pending insn for the delay slot, which will be either
4561 combined with the 'restore' instruction or put in the delay slot of
4562 the 'return' instruction. */
4564 if (crtl->calls_eh_return)
4566 /* If the function uses __builtin_eh_return, the eh_return
4567 machinery occupies the delay slot. */
4568 gcc_assert (! final_sequence);
4570 if (! flag_delayed_branch)
4571 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4574 fputs ("\treturn\t%i7+8\n", asm_out_file);
4576 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4578 if (flag_delayed_branch)
4579 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4581 fputs ("\t nop\n", asm_out_file);
4583 else if (final_sequence)
4587 delay = NEXT_INSN (insn);
4590 pat = PATTERN (delay);
4592 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4594 epilogue_renumber (&pat, 0);
4595 return "return\t%%i7+%)%#";
4599 output_asm_insn ("jmp\t%%i7+%)", NULL);
4600 output_restore (pat);
4601 PATTERN (delay) = gen_blockage ();
4602 INSN_CODE (delay) = -1;
4607 /* The delay slot is empty. */
4609 return "return\t%%i7+%)\n\t nop";
4610 else if (flag_delayed_branch)
4611 return "jmp\t%%i7+%)\n\t restore";
4613 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4620 /* Output a sibling call. */
4623 output_sibcall (rtx insn, rtx call_operand)
4627 gcc_assert (flag_delayed_branch);
4629 operands[0] = call_operand;
4631 if (sparc_leaf_function_p)
4633 /* This is a leaf function so we don't have to bother restoring the
4634 register window. We simply output the jump to the function and
4635 the insn in the delay slot (if any). */
4637 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4640 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4643 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4644 it into branch if possible. */
4645 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4650 /* This is a regular function so we have to restore the register window.
4651 We may have a pending insn for the delay slot, which will be combined
4652 with the 'restore' instruction. */
4654 output_asm_insn ("call\t%a0, 0", operands);
4658 rtx delay = NEXT_INSN (insn);
4661 output_restore (PATTERN (delay));
4663 PATTERN (delay) = gen_blockage ();
4664 INSN_CODE (delay) = -1;
4667 output_restore (NULL_RTX);
4673 /* Functions for handling argument passing.
4675 For 32-bit, the first 6 args are normally in registers and the rest are
4676 pushed. Any arg that starts within the first 6 words is at least
4677 partially passed in a register unless its data type forbids.
4679 For 64-bit, the argument registers are laid out as an array of 16 elements
4680 and arguments are added sequentially. The first 6 int args and up to the
4681 first 16 fp args (depending on size) are passed in regs.
4683 Slot Stack Integral Float Float in structure Double Long Double
4684 ---- ----- -------- ----- ------------------ ------ -----------
4685 15 [SP+248] %f31 %f30,%f31 %d30
4686 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4687 13 [SP+232] %f27 %f26,%f27 %d26
4688 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4689 11 [SP+216] %f23 %f22,%f23 %d22
4690 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4691 9 [SP+200] %f19 %f18,%f19 %d18
4692 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4693 7 [SP+184] %f15 %f14,%f15 %d14
4694 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4695 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4696 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4697 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4698 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4699 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4700 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4702 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4704 Integral arguments are always passed as 64-bit quantities appropriately
4707 Passing of floating point values is handled as follows.
4708 If a prototype is in scope:
4709 If the value is in a named argument (i.e. not a stdarg function or a
4710 value not part of the `...') then the value is passed in the appropriate
4712 If the value is part of the `...' and is passed in one of the first 6
4713 slots then the value is passed in the appropriate int reg.
4714 If the value is part of the `...' and is not passed in one of the first 6
4715 slots then the value is passed in memory.
4716 If a prototype is not in scope:
4717 If the value is one of the first 6 arguments the value is passed in the
4718 appropriate integer reg and the appropriate fp reg.
4719 If the value is not one of the first 6 arguments the value is passed in
4720 the appropriate fp reg and in memory.
4723 Summary of the calling conventions implemented by GCC on the SPARC:
4726 size argument return value
4728 small integer <4 int. reg. int. reg.
4729 word 4 int. reg. int. reg.
4730 double word 8 int. reg. int. reg.
4732 _Complex small integer <8 int. reg. int. reg.
4733 _Complex word 8 int. reg. int. reg.
4734 _Complex double word 16 memory int. reg.
4736 vector integer <=8 int. reg. FP reg.
4737 vector integer >8 memory memory
4739 float 4 int. reg. FP reg.
4740 double 8 int. reg. FP reg.
4741 long double 16 memory memory
4743 _Complex float 8 memory FP reg.
4744 _Complex double 16 memory FP reg.
4745 _Complex long double 32 memory FP reg.
4747 vector float any memory memory
4749 aggregate any memory memory
4754 size argument return value
4756 small integer <8 int. reg. int. reg.
4757 word 8 int. reg. int. reg.
4758 double word 16 int. reg. int. reg.
4760 _Complex small integer <16 int. reg. int. reg.
4761 _Complex word 16 int. reg. int. reg.
4762 _Complex double word 32 memory int. reg.
4764 vector integer <=16 FP reg. FP reg.
4765 vector integer 16<s<=32 memory FP reg.
4766 vector integer >32 memory memory
4768 float 4 FP reg. FP reg.
4769 double 8 FP reg. FP reg.
4770 long double 16 FP reg. FP reg.
4772 _Complex float 8 FP reg. FP reg.
4773 _Complex double 16 FP reg. FP reg.
4774 _Complex long double 32 memory FP reg.
4776 vector float <=16 FP reg. FP reg.
4777 vector float 16<s<=32 memory FP reg.
4778 vector float >32 memory memory
4780 aggregate <=16 reg. reg.
4781 aggregate 16<s<=32 memory reg.
4782 aggregate >32 memory memory
4786 Note #1: complex floating-point types follow the extended SPARC ABIs as
4787 implemented by the Sun compiler.
4789 Note #2: integral vector types follow the scalar floating-point types
4790 conventions to match what is implemented by the Sun VIS SDK.
4792 Note #3: floating-point vector types follow the aggregate types
4796 /* Maximum number of int regs for args. */
4797 #define SPARC_INT_ARG_MAX 6
4798 /* Maximum number of fp regs for args. */
4799 #define SPARC_FP_ARG_MAX 16
4801 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4803 /* Handle the INIT_CUMULATIVE_ARGS macro.
4804 Initialize a variable CUM of type CUMULATIVE_ARGS
4805 for a call to a function whose data type is FNTYPE.
4806 For a library call, FNTYPE is 0. */
4809 init_cumulative_args (struct sparc_args *cum, tree fntype,
4810 rtx libname ATTRIBUTE_UNUSED,
4811 tree fndecl ATTRIBUTE_UNUSED)
4814 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4815 cum->libcall_p = fntype == 0;
4818 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4819 When a prototype says `char' or `short', really pass an `int'. */
4822 sparc_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED)
4824 return TARGET_ARCH32 ? true : false;
4827 /* Handle promotion of pointer and integer arguments. */
4829 static enum machine_mode
4830 sparc_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
4831 enum machine_mode mode,
4832 int *punsignedp ATTRIBUTE_UNUSED,
4833 const_tree fntype ATTRIBUTE_UNUSED,
4834 int for_return ATTRIBUTE_UNUSED)
4836 if (POINTER_TYPE_P (type))
4838 *punsignedp = POINTERS_EXTEND_UNSIGNED;
4842 /* For TARGET_ARCH64 we need this, as we don't have instructions
4843 for arithmetic operations which do zero/sign extension at the same time,
4844 so without this we end up with a srl/sra after every assignment to an
4845 user variable, which means very very bad code. */
4847 && GET_MODE_CLASS (mode) == MODE_INT
4848 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4854 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4857 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4859 return TARGET_ARCH64 ? true : false;
4862 /* Scan the record type TYPE and return the following predicates:
4863 - INTREGS_P: the record contains at least one field or sub-field
4864 that is eligible for promotion in integer registers.
4865 - FP_REGS_P: the record contains at least one field or sub-field
4866 that is eligible for promotion in floating-point registers.
4867 - PACKED_P: the record contains at least one field that is packed.
4869 Sub-fields are not taken into account for the PACKED_P predicate. */
4872 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4876 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4878 if (TREE_CODE (field) == FIELD_DECL)
4880 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4881 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4882 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4883 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4889 if (packed_p && DECL_PACKED (field))
4895 /* Compute the slot number to pass an argument in.
4896 Return the slot number or -1 if passing on the stack.
4898 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4899 the preceding args and about the function being called.
4900 MODE is the argument's machine mode.
4901 TYPE is the data type of the argument (as a tree).
4902 This is null for libcalls where that information may
4904 NAMED is nonzero if this argument is a named parameter
4905 (otherwise it is an extra parameter matching an ellipsis).
4906 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4907 *PREGNO records the register number to use if scalar type.
4908 *PPADDING records the amount of padding needed in words. */
4911 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4912 tree type, int named, int incoming_p,
4913 int *pregno, int *ppadding)
4915 int regbase = (incoming_p
4916 ? SPARC_INCOMING_INT_ARG_FIRST
4917 : SPARC_OUTGOING_INT_ARG_FIRST);
4918 int slotno = cum->words;
4919 enum mode_class mclass;
4924 if (type && TREE_ADDRESSABLE (type))
4930 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4933 /* For SPARC64, objects requiring 16-byte alignment get it. */
4935 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4936 && (slotno & 1) != 0)
4937 slotno++, *ppadding = 1;
4939 mclass = GET_MODE_CLASS (mode);
4940 if (type && TREE_CODE (type) == VECTOR_TYPE)
4942 /* Vector types deserve special treatment because they are
4943 polymorphic wrt their mode, depending upon whether VIS
4944 instructions are enabled. */
4945 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4947 /* The SPARC port defines no floating-point vector modes. */
4948 gcc_assert (mode == BLKmode);
4952 /* Integral vector types should either have a vector
4953 mode or an integral mode, because we are guaranteed
4954 by pass_by_reference that their size is not greater
4955 than 16 bytes and TImode is 16-byte wide. */
4956 gcc_assert (mode != BLKmode);
4958 /* Vector integers are handled like floats according to
4960 mclass = MODE_FLOAT;
4967 case MODE_COMPLEX_FLOAT:
4968 case MODE_VECTOR_INT:
4969 if (TARGET_ARCH64 && TARGET_FPU && named)
4971 if (slotno >= SPARC_FP_ARG_MAX)
4973 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4974 /* Arguments filling only one single FP register are
4975 right-justified in the outer double FP register. */
4976 if (GET_MODE_SIZE (mode) <= 4)
4983 case MODE_COMPLEX_INT:
4984 if (slotno >= SPARC_INT_ARG_MAX)
4986 regno = regbase + slotno;
4990 if (mode == VOIDmode)
4991 /* MODE is VOIDmode when generating the actual call. */
4994 gcc_assert (mode == BLKmode);
4998 || (TREE_CODE (type) != VECTOR_TYPE
4999 && TREE_CODE (type) != RECORD_TYPE))
5001 if (slotno >= SPARC_INT_ARG_MAX)
5003 regno = regbase + slotno;
5005 else /* TARGET_ARCH64 && type */
5007 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
5009 /* First see what kinds of registers we would need. */
5010 if (TREE_CODE (type) == VECTOR_TYPE)
5013 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
5015 /* The ABI obviously doesn't specify how packed structures
5016 are passed. These are defined to be passed in int regs
5017 if possible, otherwise memory. */
5018 if (packed_p || !named)
5019 fpregs_p = 0, intregs_p = 1;
5021 /* If all arg slots are filled, then must pass on stack. */
5022 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
5025 /* If there are only int args and all int arg slots are filled,
5026 then must pass on stack. */
5027 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
5030 /* Note that even if all int arg slots are filled, fp members may
5031 still be passed in regs if such regs are available.
5032 *PREGNO isn't set because there may be more than one, it's up
5033 to the caller to compute them. */
5046 /* Handle recursive register counting for structure field layout. */
5048 struct function_arg_record_value_parms
5050 rtx ret; /* return expression being built. */
5051 int slotno; /* slot number of the argument. */
5052 int named; /* whether the argument is named. */
5053 int regbase; /* regno of the base register. */
5054 int stack; /* 1 if part of the argument is on the stack. */
5055 int intoffset; /* offset of the first pending integer field. */
5056 unsigned int nregs; /* number of words passed in registers. */
5059 static void function_arg_record_value_3
5060 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
5061 static void function_arg_record_value_2
5062 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5063 static void function_arg_record_value_1
5064 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5065 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
5066 static rtx function_arg_union_value (int, enum machine_mode, int, int);
5068 /* A subroutine of function_arg_record_value. Traverse the structure
5069 recursively and determine how many registers will be required. */
5072 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
5073 struct function_arg_record_value_parms *parms,
5078 /* We need to compute how many registers are needed so we can
5079 allocate the PARALLEL but before we can do that we need to know
5080 whether there are any packed fields. The ABI obviously doesn't
5081 specify how structures are passed in this case, so they are
5082 defined to be passed in int regs if possible, otherwise memory,
5083 regardless of whether there are fp values present. */
5086 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5088 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5095 /* Compute how many registers we need. */
5096 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5098 if (TREE_CODE (field) == FIELD_DECL)
5100 HOST_WIDE_INT bitpos = startbitpos;
5102 if (DECL_SIZE (field) != 0)
5104 if (integer_zerop (DECL_SIZE (field)))
5107 if (host_integerp (bit_position (field), 1))
5108 bitpos += int_bit_position (field);
5111 /* ??? FIXME: else assume zero offset. */
5113 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5114 function_arg_record_value_1 (TREE_TYPE (field),
5118 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5119 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5124 if (parms->intoffset != -1)
5126 unsigned int startbit, endbit;
5127 int intslots, this_slotno;
5129 startbit = parms->intoffset & -BITS_PER_WORD;
5130 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5132 intslots = (endbit - startbit) / BITS_PER_WORD;
5133 this_slotno = parms->slotno + parms->intoffset
5136 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5138 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5139 /* We need to pass this field on the stack. */
5143 parms->nregs += intslots;
5144 parms->intoffset = -1;
5147 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
5148 If it wasn't true we wouldn't be here. */
5149 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5150 && DECL_MODE (field) == BLKmode)
5151 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5152 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5159 if (parms->intoffset == -1)
5160 parms->intoffset = bitpos;
5166 /* A subroutine of function_arg_record_value. Assign the bits of the
5167 structure between parms->intoffset and bitpos to integer registers. */
5170 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
5171 struct function_arg_record_value_parms *parms)
5173 enum machine_mode mode;
5175 unsigned int startbit, endbit;
5176 int this_slotno, intslots, intoffset;
5179 if (parms->intoffset == -1)
5182 intoffset = parms->intoffset;
5183 parms->intoffset = -1;
5185 startbit = intoffset & -BITS_PER_WORD;
5186 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5187 intslots = (endbit - startbit) / BITS_PER_WORD;
5188 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
5190 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
5194 /* If this is the trailing part of a word, only load that much into
5195 the register. Otherwise load the whole register. Note that in
5196 the latter case we may pick up unwanted bits. It's not a problem
5197 at the moment but may wish to revisit. */
5199 if (intoffset % BITS_PER_WORD != 0)
5200 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5205 intoffset /= BITS_PER_UNIT;
5208 regno = parms->regbase + this_slotno;
5209 reg = gen_rtx_REG (mode, regno);
5210 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5211 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5214 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5219 while (intslots > 0);
5222 /* A subroutine of function_arg_record_value. Traverse the structure
5223 recursively and assign bits to floating point registers. Track which
5224 bits in between need integer registers; invoke function_arg_record_value_3
5225 to make that happen. */
5228 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
5229 struct function_arg_record_value_parms *parms,
5235 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5237 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5244 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5246 if (TREE_CODE (field) == FIELD_DECL)
5248 HOST_WIDE_INT bitpos = startbitpos;
5250 if (DECL_SIZE (field) != 0)
5252 if (integer_zerop (DECL_SIZE (field)))
5255 if (host_integerp (bit_position (field), 1))
5256 bitpos += int_bit_position (field);
5259 /* ??? FIXME: else assume zero offset. */
5261 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5262 function_arg_record_value_2 (TREE_TYPE (field),
5266 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5267 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5272 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5273 int regno, nregs, pos;
5274 enum machine_mode mode = DECL_MODE (field);
5277 function_arg_record_value_3 (bitpos, parms);
5279 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5282 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5283 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5285 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5287 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5293 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5294 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5296 reg = gen_rtx_REG (mode, regno);
5297 pos = bitpos / BITS_PER_UNIT;
5298 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5299 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5303 regno += GET_MODE_SIZE (mode) / 4;
5304 reg = gen_rtx_REG (mode, regno);
5305 pos += GET_MODE_SIZE (mode);
5306 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5307 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5313 if (parms->intoffset == -1)
5314 parms->intoffset = bitpos;
5320 /* Used by function_arg and sparc_function_value_1 to implement the complex
5321 conventions of the 64-bit ABI for passing and returning structures.
5322 Return an expression valid as a return value for the FUNCTION_ARG
5323 and TARGET_FUNCTION_VALUE.
5325 TYPE is the data type of the argument (as a tree).
5326 This is null for libcalls where that information may
5328 MODE is the argument's machine mode.
5329 SLOTNO is the index number of the argument's slot in the parameter array.
5330 NAMED is nonzero if this argument is a named parameter
5331 (otherwise it is an extra parameter matching an ellipsis).
5332 REGBASE is the regno of the base register for the parameter array. */
5335 function_arg_record_value (const_tree type, enum machine_mode mode,
5336 int slotno, int named, int regbase)
5338 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5339 struct function_arg_record_value_parms parms;
5342 parms.ret = NULL_RTX;
5343 parms.slotno = slotno;
5344 parms.named = named;
5345 parms.regbase = regbase;
5348 /* Compute how many registers we need. */
5350 parms.intoffset = 0;
5351 function_arg_record_value_1 (type, 0, &parms, false);
5353 /* Take into account pending integer fields. */
5354 if (parms.intoffset != -1)
5356 unsigned int startbit, endbit;
5357 int intslots, this_slotno;
5359 startbit = parms.intoffset & -BITS_PER_WORD;
5360 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5361 intslots = (endbit - startbit) / BITS_PER_WORD;
5362 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5364 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5366 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5367 /* We need to pass this field on the stack. */
5371 parms.nregs += intslots;
5373 nregs = parms.nregs;
5375 /* Allocate the vector and handle some annoying special cases. */
5378 /* ??? Empty structure has no value? Duh? */
5381 /* Though there's nothing really to store, return a word register
5382 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5383 leads to breakage due to the fact that there are zero bytes to
5385 return gen_rtx_REG (mode, regbase);
5389 /* ??? C++ has structures with no fields, and yet a size. Give up
5390 for now and pass everything back in integer registers. */
5391 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5393 if (nregs + slotno > SPARC_INT_ARG_MAX)
5394 nregs = SPARC_INT_ARG_MAX - slotno;
5396 gcc_assert (nregs != 0);
5398 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5400 /* If at least one field must be passed on the stack, generate
5401 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5402 also be passed on the stack. We can't do much better because the
5403 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5404 of structures for which the fields passed exclusively in registers
5405 are not at the beginning of the structure. */
5407 XVECEXP (parms.ret, 0, 0)
5408 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5410 /* Fill in the entries. */
5412 parms.intoffset = 0;
5413 function_arg_record_value_2 (type, 0, &parms, false);
5414 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5416 gcc_assert (parms.nregs == nregs);
5421 /* Used by function_arg and sparc_function_value_1 to implement the conventions
5422 of the 64-bit ABI for passing and returning unions.
5423 Return an expression valid as a return value for the FUNCTION_ARG
5424 and TARGET_FUNCTION_VALUE.
5426 SIZE is the size in bytes of the union.
5427 MODE is the argument's machine mode.
5428 REGNO is the hard register the union will be passed in. */
5431 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5434 int nwords = ROUND_ADVANCE (size), i;
5437 /* See comment in previous function for empty structures. */
5439 return gen_rtx_REG (mode, regno);
5441 if (slotno == SPARC_INT_ARG_MAX - 1)
5444 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5446 for (i = 0; i < nwords; i++)
5448 /* Unions are passed left-justified. */
5449 XVECEXP (regs, 0, i)
5450 = gen_rtx_EXPR_LIST (VOIDmode,
5451 gen_rtx_REG (word_mode, regno),
5452 GEN_INT (UNITS_PER_WORD * i));
5459 /* Used by function_arg and sparc_function_value_1 to implement the conventions
5460 for passing and returning large (BLKmode) vectors.
5461 Return an expression valid as a return value for the FUNCTION_ARG
5462 and TARGET_FUNCTION_VALUE.
5464 SIZE is the size in bytes of the vector (at least 8 bytes).
5465 REGNO is the FP hard register the vector will be passed in. */
5468 function_arg_vector_value (int size, int regno)
5470 int i, nregs = size / 8;
5473 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5475 for (i = 0; i < nregs; i++)
5477 XVECEXP (regs, 0, i)
5478 = gen_rtx_EXPR_LIST (VOIDmode,
5479 gen_rtx_REG (DImode, regno + 2*i),
5486 /* Handle the FUNCTION_ARG macro.
5487 Determine where to put an argument to a function.
5488 Value is zero to push the argument on the stack,
5489 or a hard register in which to store the argument.
5491 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5492 the preceding args and about the function being called.
5493 MODE is the argument's machine mode.
5494 TYPE is the data type of the argument (as a tree).
5495 This is null for libcalls where that information may
5497 NAMED is nonzero if this argument is a named parameter
5498 (otherwise it is an extra parameter matching an ellipsis).
5499 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5502 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5503 tree type, int named, int incoming_p)
5505 int regbase = (incoming_p
5506 ? SPARC_INCOMING_INT_ARG_FIRST
5507 : SPARC_OUTGOING_INT_ARG_FIRST);
5508 int slotno, regno, padding;
5509 enum mode_class mclass = GET_MODE_CLASS (mode);
5511 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5516 /* Vector types deserve special treatment because they are polymorphic wrt
5517 their mode, depending upon whether VIS instructions are enabled. */
5518 if (type && TREE_CODE (type) == VECTOR_TYPE)
5520 HOST_WIDE_INT size = int_size_in_bytes (type);
5521 gcc_assert ((TARGET_ARCH32 && size <= 8)
5522 || (TARGET_ARCH64 && size <= 16));
5524 if (mode == BLKmode)
5525 return function_arg_vector_value (size,
5526 SPARC_FP_ARG_FIRST + 2*slotno);
5528 mclass = MODE_FLOAT;
5532 return gen_rtx_REG (mode, regno);
5534 /* Structures up to 16 bytes in size are passed in arg slots on the stack
5535 and are promoted to registers if possible. */
5536 if (type && TREE_CODE (type) == RECORD_TYPE)
5538 HOST_WIDE_INT size = int_size_in_bytes (type);
5539 gcc_assert (size <= 16);
5541 return function_arg_record_value (type, mode, slotno, named, regbase);
5544 /* Unions up to 16 bytes in size are passed in integer registers. */
5545 else if (type && TREE_CODE (type) == UNION_TYPE)
5547 HOST_WIDE_INT size = int_size_in_bytes (type);
5548 gcc_assert (size <= 16);
5550 return function_arg_union_value (size, mode, slotno, regno);
5553 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5554 but also have the slot allocated for them.
5555 If no prototype is in scope fp values in register slots get passed
5556 in two places, either fp regs and int regs or fp regs and memory. */
5557 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5558 && SPARC_FP_REG_P (regno))
5560 rtx reg = gen_rtx_REG (mode, regno);
5561 if (cum->prototype_p || cum->libcall_p)
5563 /* "* 2" because fp reg numbers are recorded in 4 byte
5566 /* ??? This will cause the value to be passed in the fp reg and
5567 in the stack. When a prototype exists we want to pass the
5568 value in the reg but reserve space on the stack. That's an
5569 optimization, and is deferred [for a bit]. */
5570 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5571 return gen_rtx_PARALLEL (mode,
5573 gen_rtx_EXPR_LIST (VOIDmode,
5574 NULL_RTX, const0_rtx),
5575 gen_rtx_EXPR_LIST (VOIDmode,
5579 /* ??? It seems that passing back a register even when past
5580 the area declared by REG_PARM_STACK_SPACE will allocate
5581 space appropriately, and will not copy the data onto the
5582 stack, exactly as we desire.
5584 This is due to locate_and_pad_parm being called in
5585 expand_call whenever reg_parm_stack_space > 0, which
5586 while beneficial to our example here, would seem to be
5587 in error from what had been intended. Ho hum... -- r~ */
5595 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5599 /* On incoming, we don't need to know that the value
5600 is passed in %f0 and %i0, and it confuses other parts
5601 causing needless spillage even on the simplest cases. */
5605 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5606 + (regno - SPARC_FP_ARG_FIRST) / 2);
5608 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5609 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5611 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5615 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5616 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5617 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5622 /* All other aggregate types are passed in an integer register in a mode
5623 corresponding to the size of the type. */
5624 else if (type && AGGREGATE_TYPE_P (type))
5626 HOST_WIDE_INT size = int_size_in_bytes (type);
5627 gcc_assert (size <= 16);
5629 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5632 return gen_rtx_REG (mode, regno);
5635 /* For an arg passed partly in registers and partly in memory,
5636 this is the number of bytes of registers used.
5637 For args passed entirely in registers or entirely in memory, zero.
5639 Any arg that starts in the first 6 regs but won't entirely fit in them
5640 needs partial registers on v8. On v9, structures with integer
5641 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5642 values that begin in the last fp reg [where "last fp reg" varies with the
5643 mode] will be split between that reg and memory. */
5646 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5647 tree type, bool named)
5649 int slotno, regno, padding;
5651 /* We pass 0 for incoming_p here, it doesn't matter. */
5652 slotno = function_arg_slotno (cum, mode, type, named, 0, ®no, &padding);
5659 if ((slotno + (mode == BLKmode
5660 ? ROUND_ADVANCE (int_size_in_bytes (type))
5661 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5662 > SPARC_INT_ARG_MAX)
5663 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5667 /* We are guaranteed by pass_by_reference that the size of the
5668 argument is not greater than 16 bytes, so we only need to return
5669 one word if the argument is partially passed in registers. */
5671 if (type && AGGREGATE_TYPE_P (type))
5673 int size = int_size_in_bytes (type);
5675 if (size > UNITS_PER_WORD
5676 && slotno == SPARC_INT_ARG_MAX - 1)
5677 return UNITS_PER_WORD;
5679 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5680 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5681 && ! (TARGET_FPU && named)))
5683 /* The complex types are passed as packed types. */
5684 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5685 && slotno == SPARC_INT_ARG_MAX - 1)
5686 return UNITS_PER_WORD;
5688 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5690 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5692 return UNITS_PER_WORD;
5699 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5700 Specify whether to pass the argument by reference. */
5703 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5704 enum machine_mode mode, const_tree type,
5705 bool named ATTRIBUTE_UNUSED)
5708 /* Original SPARC 32-bit ABI says that structures and unions,
5709 and quad-precision floats are passed by reference. For Pascal,
5710 also pass arrays by reference. All other base types are passed
5713 Extended ABI (as implemented by the Sun compiler) says that all
5714 complex floats are passed by reference. Pass complex integers
5715 in registers up to 8 bytes. More generally, enforce the 2-word
5716 cap for passing arguments in registers.
5718 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5719 integers are passed like floats of the same size, that is in
5720 registers up to 8 bytes. Pass all vector floats by reference
5721 like structure and unions. */
5722 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5724 /* Catch CDImode, TFmode, DCmode and TCmode. */
5725 || GET_MODE_SIZE (mode) > 8
5727 && TREE_CODE (type) == VECTOR_TYPE
5728 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5730 /* Original SPARC 64-bit ABI says that structures and unions
5731 smaller than 16 bytes are passed in registers, as well as
5732 all other base types.
5734 Extended ABI (as implemented by the Sun compiler) says that
5735 complex floats are passed in registers up to 16 bytes. Pass
5736 all complex integers in registers up to 16 bytes. More generally,
5737 enforce the 2-word cap for passing arguments in registers.
5739 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5740 integers are passed like floats of the same size, that is in
5741 registers (up to 16 bytes). Pass all vector floats like structure
5744 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5745 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5746 /* Catch CTImode and TCmode. */
5747 || GET_MODE_SIZE (mode) > 16);
5750 /* Handle the FUNCTION_ARG_ADVANCE macro.
5751 Update the data in CUM to advance over an argument
5752 of mode MODE and data type TYPE.
5753 TYPE is null for libcalls where that information may not be available. */
5756 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5757 tree type, int named)
5759 int slotno, regno, padding;
5761 /* We pass 0 for incoming_p here, it doesn't matter. */
5762 slotno = function_arg_slotno (cum, mode, type, named, 0, ®no, &padding);
5764 /* If register required leading padding, add it. */
5766 cum->words += padding;
5770 cum->words += (mode != BLKmode
5771 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5772 : ROUND_ADVANCE (int_size_in_bytes (type)));
5776 if (type && AGGREGATE_TYPE_P (type))
5778 int size = int_size_in_bytes (type);
5782 else if (size <= 16)
5784 else /* passed by reference */
5789 cum->words += (mode != BLKmode
5790 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5791 : ROUND_ADVANCE (int_size_in_bytes (type)));
5796 /* Handle the FUNCTION_ARG_PADDING macro.
5797 For the 64 bit ABI structs are always stored left shifted in their
5801 function_arg_padding (enum machine_mode mode, const_tree type)
5803 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5806 /* Fall back to the default. */
5807 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5810 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5811 Specify whether to return the return value in memory. */
5814 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5817 /* Original SPARC 32-bit ABI says that structures and unions,
5818 and quad-precision floats are returned in memory. All other
5819 base types are returned in registers.
5821 Extended ABI (as implemented by the Sun compiler) says that
5822 all complex floats are returned in registers (8 FP registers
5823 at most for '_Complex long double'). Return all complex integers
5824 in registers (4 at most for '_Complex long long').
5826 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5827 integers are returned like floats of the same size, that is in
5828 registers up to 8 bytes and in memory otherwise. Return all
5829 vector floats in memory like structure and unions; note that
5830 they always have BLKmode like the latter. */
5831 return (TYPE_MODE (type) == BLKmode
5832 || TYPE_MODE (type) == TFmode
5833 || (TREE_CODE (type) == VECTOR_TYPE
5834 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5836 /* Original SPARC 64-bit ABI says that structures and unions
5837 smaller than 32 bytes are returned in registers, as well as
5838 all other base types.
5840 Extended ABI (as implemented by the Sun compiler) says that all
5841 complex floats are returned in registers (8 FP registers at most
5842 for '_Complex long double'). Return all complex integers in
5843 registers (4 at most for '_Complex TItype').
5845 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5846 integers are returned like floats of the same size, that is in
5847 registers. Return all vector floats like structure and unions;
5848 note that they always have BLKmode like the latter. */
5849 return ((TYPE_MODE (type) == BLKmode
5850 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5853 /* Handle the TARGET_STRUCT_VALUE target hook.
5854 Return where to find the structure return value address. */
5857 sparc_struct_value_rtx (tree fndecl, int incoming)
5866 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5867 STRUCT_VALUE_OFFSET));
5869 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5870 STRUCT_VALUE_OFFSET));
5872 /* Only follow the SPARC ABI for fixed-size structure returns.
5873 Variable size structure returns are handled per the normal
5874 procedures in GCC. This is enabled by -mstd-struct-return */
5876 && sparc_std_struct_return
5877 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
5878 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
5880 /* We must check and adjust the return address, as it is
5881 optional as to whether the return object is really
5883 rtx ret_rtx = gen_rtx_REG (Pmode, 31);
5884 rtx scratch = gen_reg_rtx (SImode);
5885 rtx endlab = gen_label_rtx ();
5887 /* Calculate the return object size */
5888 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
5889 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
5890 /* Construct a temporary return value */
5891 rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
5893 /* Implement SPARC 32-bit psABI callee returns struck checking
5896 Fetch the instruction where we will return to and see if
5897 it's an unimp instruction (the most significant 10 bits
5899 emit_move_insn (scratch, gen_rtx_MEM (SImode,
5900 plus_constant (ret_rtx, 8)));
5901 /* Assume the size is valid and pre-adjust */
5902 emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5903 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
5904 emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5905 /* Assign stack temp:
5906 Write the address of the memory pointed to by temp_val into
5907 the memory pointed to by mem */
5908 emit_move_insn (mem, XEXP (temp_val, 0));
5909 emit_label (endlab);
5912 set_mem_alias_set (mem, struct_value_alias_set);
5917 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
5918 For v9, function return values are subject to the same rules as arguments,
5919 except that up to 32 bytes may be returned in registers. */
5922 sparc_function_value_1 (const_tree type, enum machine_mode mode,
5925 /* Beware that the two values are swapped here wrt function_arg. */
5926 int regbase = (outgoing
5927 ? SPARC_INCOMING_INT_ARG_FIRST
5928 : SPARC_OUTGOING_INT_ARG_FIRST);
5929 enum mode_class mclass = GET_MODE_CLASS (mode);
5932 /* Vector types deserve special treatment because they are polymorphic wrt
5933 their mode, depending upon whether VIS instructions are enabled. */
5934 if (type && TREE_CODE (type) == VECTOR_TYPE)
5936 HOST_WIDE_INT size = int_size_in_bytes (type);
5937 gcc_assert ((TARGET_ARCH32 && size <= 8)
5938 || (TARGET_ARCH64 && size <= 32));
5940 if (mode == BLKmode)
5941 return function_arg_vector_value (size,
5942 SPARC_FP_ARG_FIRST);
5944 mclass = MODE_FLOAT;
5947 if (TARGET_ARCH64 && type)
5949 /* Structures up to 32 bytes in size are returned in registers. */
5950 if (TREE_CODE (type) == RECORD_TYPE)
5952 HOST_WIDE_INT size = int_size_in_bytes (type);
5953 gcc_assert (size <= 32);
5955 return function_arg_record_value (type, mode, 0, 1, regbase);
5958 /* Unions up to 32 bytes in size are returned in integer registers. */
5959 else if (TREE_CODE (type) == UNION_TYPE)
5961 HOST_WIDE_INT size = int_size_in_bytes (type);
5962 gcc_assert (size <= 32);
5964 return function_arg_union_value (size, mode, 0, regbase);
5967 /* Objects that require it are returned in FP registers. */
5968 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5971 /* All other aggregate types are returned in an integer register in a
5972 mode corresponding to the size of the type. */
5973 else if (AGGREGATE_TYPE_P (type))
5975 /* All other aggregate types are passed in an integer register
5976 in a mode corresponding to the size of the type. */
5977 HOST_WIDE_INT size = int_size_in_bytes (type);
5978 gcc_assert (size <= 32);
5980 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5982 /* ??? We probably should have made the same ABI change in
5983 3.4.0 as the one we made for unions. The latter was
5984 required by the SCD though, while the former is not
5985 specified, so we favored compatibility and efficiency.
5987 Now we're stuck for aggregates larger than 16 bytes,
5988 because OImode vanished in the meantime. Let's not
5989 try to be unduly clever, and simply follow the ABI
5990 for unions in that case. */
5991 if (mode == BLKmode)
5992 return function_arg_union_value (size, mode, 0, regbase);
5997 /* This must match sparc_promote_function_mode.
5998 ??? Maybe 32-bit pointers should actually remain in Pmode? */
5999 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6003 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
6004 regno = SPARC_FP_ARG_FIRST;
6008 return gen_rtx_REG (mode, regno);
6011 /* Handle TARGET_FUNCTION_VALUE.
6013 On SPARC the value is found in the first "output" register, but the called
6014 function leaves it in the first "input" register. */
6017 sparc_function_value (const_tree valtype,
6018 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
6021 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
6024 /* Handle TARGET_LIBCALL_VALUE. */
6027 sparc_libcall_value (enum machine_mode mode,
6028 const_rtx fun ATTRIBUTE_UNUSED)
6030 return sparc_function_value_1 (NULL_TREE, mode, false);
6033 /* Handle FUNCTION_VALUE_REGNO_P.
6034 On SPARC, the first "output" reg is used for integer values, and
6035 the first floating point register is used for floating point values. */
6038 sparc_function_value_regno_p (const unsigned int regno)
6040 return (regno == 8 || regno == 32);
6043 /* Do what is necessary for `va_start'. We look at the current function
6044 to determine if stdarg or varargs is used and return the address of
6045 the first unnamed parameter. */
6048 sparc_builtin_saveregs (void)
6050 int first_reg = crtl->args.info.words;
6054 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
6055 emit_move_insn (gen_rtx_MEM (word_mode,
6056 gen_rtx_PLUS (Pmode,
6058 GEN_INT (FIRST_PARM_OFFSET (0)
6061 gen_rtx_REG (word_mode,
6062 SPARC_INCOMING_INT_ARG_FIRST + regno));
6064 address = gen_rtx_PLUS (Pmode,
6066 GEN_INT (FIRST_PARM_OFFSET (0)
6067 + UNITS_PER_WORD * first_reg));
6072 /* Implement `va_start' for stdarg. */
6075 sparc_va_start (tree valist, rtx nextarg)
6077 nextarg = expand_builtin_saveregs ();
6078 std_expand_builtin_va_start (valist, nextarg);
6081 /* Implement `va_arg' for stdarg. */
6084 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6087 HOST_WIDE_INT size, rsize, align;
6090 tree ptrtype = build_pointer_type (type);
6092 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
6095 size = rsize = UNITS_PER_WORD;
6101 size = int_size_in_bytes (type);
6102 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
6107 /* For SPARC64, objects requiring 16-byte alignment get it. */
6108 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
6109 align = 2 * UNITS_PER_WORD;
6111 /* SPARC-V9 ABI states that structures up to 16 bytes in size
6112 are left-justified in their slots. */
6113 if (AGGREGATE_TYPE_P (type))
6116 size = rsize = UNITS_PER_WORD;
6126 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
6127 size_int (align - 1));
6128 incr = fold_convert (sizetype, incr);
6129 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
6131 incr = fold_convert (ptr_type_node, incr);
6134 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
6137 if (BYTES_BIG_ENDIAN && size < rsize)
6138 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
6139 size_int (rsize - size));
6143 addr = fold_convert (build_pointer_type (ptrtype), addr);
6144 addr = build_va_arg_indirect_ref (addr);
6147 /* If the address isn't aligned properly for the type, we need a temporary.
6148 FIXME: This is inefficient, usually we can do this in registers. */
6149 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
6151 tree tmp = create_tmp_var (type, "va_arg_tmp");
6152 tree dest_addr = build_fold_addr_expr (tmp);
6153 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
6154 3, dest_addr, addr, size_int (rsize));
6155 TREE_ADDRESSABLE (tmp) = 1;
6156 gimplify_and_add (copy, pre_p);
6161 addr = fold_convert (ptrtype, addr);
6164 = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
6165 gimplify_assign (valist, incr, post_p);
6167 return build_va_arg_indirect_ref (addr);
6170 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
6171 Specify whether the vector mode is supported by the hardware. */
6174 sparc_vector_mode_supported_p (enum machine_mode mode)
6176 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
6179 /* Return the string to output an unconditional branch to LABEL, which is
6180 the operand number of the label.
6182 DEST is the destination insn (i.e. the label), INSN is the source. */
6185 output_ubranch (rtx dest, int label, rtx insn)
6187 static char string[64];
6188 bool v9_form = false;
6191 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
6193 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6194 - INSN_ADDRESSES (INSN_UID (insn)));
6195 /* Leave some instructions for "slop". */
6196 if (delta >= -260000 && delta < 260000)
6201 strcpy (string, "ba%*,pt\t%%xcc, ");
6203 strcpy (string, "b%*\t");
6205 p = strchr (string, '\0');
6216 /* Return the string to output a conditional branch to LABEL, which is
6217 the operand number of the label. OP is the conditional expression.
6218 XEXP (OP, 0) is assumed to be a condition code register (integer or
6219 floating point) and its mode specifies what kind of comparison we made.
6221 DEST is the destination insn (i.e. the label), INSN is the source.
6223 REVERSED is nonzero if we should reverse the sense of the comparison.
6225 ANNUL is nonzero if we should generate an annulling branch. */
6228 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
6231 static char string[64];
6232 enum rtx_code code = GET_CODE (op);
6233 rtx cc_reg = XEXP (op, 0);
6234 enum machine_mode mode = GET_MODE (cc_reg);
6235 const char *labelno, *branch;
6236 int spaces = 8, far;
6239 /* v9 branches are limited to +-1MB. If it is too far away,
6252 fbne,a,pn %fcc2, .LC29
6260 far = TARGET_V9 && (get_attr_length (insn) >= 3);
6263 /* Reversal of FP compares takes care -- an ordered compare
6264 becomes an unordered compare and vice versa. */
6265 if (mode == CCFPmode || mode == CCFPEmode)
6266 code = reverse_condition_maybe_unordered (code);
6268 code = reverse_condition (code);
6271 /* Start by writing the branch condition. */
6272 if (mode == CCFPmode || mode == CCFPEmode)
6323 /* ??? !v9: FP branches cannot be preceded by another floating point
6324 insn. Because there is currently no concept of pre-delay slots,
6325 we can fix this only by always emitting a nop before a floating
6330 strcpy (string, "nop\n\t");
6331 strcat (string, branch);
6344 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6356 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6377 strcpy (string, branch);
6379 spaces -= strlen (branch);
6380 p = strchr (string, '\0');
6382 /* Now add the annulling, the label, and a possible noop. */
6395 if (! far && insn && INSN_ADDRESSES_SET_P ())
6397 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6398 - INSN_ADDRESSES (INSN_UID (insn)));
6399 /* Leave some instructions for "slop". */
6400 if (delta < -260000 || delta >= 260000)
6404 if (mode == CCFPmode || mode == CCFPEmode)
6406 static char v9_fcc_labelno[] = "%%fccX, ";
6407 /* Set the char indicating the number of the fcc reg to use. */
6408 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6409 labelno = v9_fcc_labelno;
6412 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6416 else if (mode == CCXmode || mode == CCX_NOOVmode)
6418 labelno = "%%xcc, ";
6423 labelno = "%%icc, ";
6428 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6431 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6444 strcpy (p, labelno);
6445 p = strchr (p, '\0');
6448 strcpy (p, ".+12\n\t nop\n\tb\t");
6449 /* Skip the next insn if requested or
6450 if we know that it will be a nop. */
6451 if (annul || ! final_sequence)
6465 /* Emit a library call comparison between floating point X and Y.
6466 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
6467 Return the new operator to be used in the comparison sequence.
6469 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6470 values as arguments instead of the TFmode registers themselves,
6471 that's why we cannot call emit_float_lib_cmp. */
6474 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6477 rtx slot0, slot1, result, tem, tem2, libfunc;
6478 enum machine_mode mode;
6479 enum rtx_code new_comparison;
6484 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
6488 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
6492 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
6496 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
6500 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
6504 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
6515 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
6528 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6529 emit_move_insn (slot0, x);
6536 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6537 emit_move_insn (slot1, y);
6540 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6541 emit_library_call (libfunc, LCT_NORMAL,
6543 XEXP (slot0, 0), Pmode,
6544 XEXP (slot1, 0), Pmode);
6549 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6550 emit_library_call (libfunc, LCT_NORMAL,
6552 x, TFmode, y, TFmode);
6557 /* Immediately move the result of the libcall into a pseudo
6558 register so reload doesn't clobber the value if it needs
6559 the return register for a spill reg. */
6560 result = gen_reg_rtx (mode);
6561 emit_move_insn (result, hard_libcall_value (mode, libfunc));
6566 return gen_rtx_NE (VOIDmode, result, const0_rtx);
6569 new_comparison = (comparison == UNORDERED ? EQ : NE);
6570 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
6573 new_comparison = (comparison == UNGT ? GT : NE);
6574 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
6576 return gen_rtx_NE (VOIDmode, result, const2_rtx);
6578 tem = gen_reg_rtx (mode);
6580 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6582 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6583 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
6586 tem = gen_reg_rtx (mode);
6588 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6590 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6591 tem2 = gen_reg_rtx (mode);
6593 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6595 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6596 new_comparison = (comparison == UNEQ ? EQ : NE);
6597 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
6603 /* Generate an unsigned DImode to FP conversion. This is the same code
6604 optabs would emit if we didn't have TFmode patterns. */
6607 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6609 rtx neglab, donelab, i0, i1, f0, in, out;
6612 in = force_reg (DImode, operands[1]);
6613 neglab = gen_label_rtx ();
6614 donelab = gen_label_rtx ();
6615 i0 = gen_reg_rtx (DImode);
6616 i1 = gen_reg_rtx (DImode);
6617 f0 = gen_reg_rtx (mode);
6619 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6621 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6622 emit_jump_insn (gen_jump (donelab));
6625 emit_label (neglab);
6627 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6628 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6629 emit_insn (gen_iordi3 (i0, i0, i1));
6630 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6631 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6633 emit_label (donelab);
6636 /* Generate an FP to unsigned DImode conversion. This is the same code
6637 optabs would emit if we didn't have TFmode patterns. */
6640 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6642 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6645 in = force_reg (mode, operands[1]);
6646 neglab = gen_label_rtx ();
6647 donelab = gen_label_rtx ();
6648 i0 = gen_reg_rtx (DImode);
6649 i1 = gen_reg_rtx (DImode);
6650 limit = gen_reg_rtx (mode);
6651 f0 = gen_reg_rtx (mode);
6653 emit_move_insn (limit,
6654 CONST_DOUBLE_FROM_REAL_VALUE (
6655 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6656 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6658 emit_insn (gen_rtx_SET (VOIDmode,
6660 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6661 emit_jump_insn (gen_jump (donelab));
6664 emit_label (neglab);
6666 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6667 emit_insn (gen_rtx_SET (VOIDmode,
6669 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6670 emit_insn (gen_movdi (i1, const1_rtx));
6671 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6672 emit_insn (gen_xordi3 (out, i0, i1));
6674 emit_label (donelab);
6677 /* Return the string to output a conditional branch to LABEL, testing
6678 register REG. LABEL is the operand number of the label; REG is the
6679 operand number of the reg. OP is the conditional expression. The mode
6680 of REG says what kind of comparison we made.
6682 DEST is the destination insn (i.e. the label), INSN is the source.
6684 REVERSED is nonzero if we should reverse the sense of the comparison.
6686 ANNUL is nonzero if we should generate an annulling branch. */
6689 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6690 int annul, rtx insn)
6692 static char string[64];
6693 enum rtx_code code = GET_CODE (op);
6694 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6699 /* branch on register are limited to +-128KB. If it is too far away,
6712 brgez,a,pn %o1, .LC29
6718 ba,pt %xcc, .LC29 */
6720 far = get_attr_length (insn) >= 3;
6722 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6724 code = reverse_condition (code);
6726 /* Only 64 bit versions of these instructions exist. */
6727 gcc_assert (mode == DImode);
6729 /* Start by writing the branch condition. */
6734 strcpy (string, "brnz");
6738 strcpy (string, "brz");
6742 strcpy (string, "brgez");
6746 strcpy (string, "brlz");
6750 strcpy (string, "brlez");
6754 strcpy (string, "brgz");
6761 p = strchr (string, '\0');
6763 /* Now add the annulling, reg, label, and nop. */
6770 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6773 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6778 *p = p < string + 8 ? '\t' : ' ';
6786 int veryfar = 1, delta;
6788 if (INSN_ADDRESSES_SET_P ())
6790 delta = (INSN_ADDRESSES (INSN_UID (dest))
6791 - INSN_ADDRESSES (INSN_UID (insn)));
6792 /* Leave some instructions for "slop". */
6793 if (delta >= -260000 && delta < 260000)
6797 strcpy (p, ".+12\n\t nop\n\t");
6798 /* Skip the next insn if requested or
6799 if we know that it will be a nop. */
6800 if (annul || ! final_sequence)
6810 strcpy (p, "ba,pt\t%%xcc, ");
6824 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6825 Such instructions cannot be used in the delay slot of return insn on v9.
6826 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6830 epilogue_renumber (register rtx *where, int test)
6832 register const char *fmt;
6834 register enum rtx_code code;
6839 code = GET_CODE (*where);
6844 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6846 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6847 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6855 /* Do not replace the frame pointer with the stack pointer because
6856 it can cause the delayed instruction to load below the stack.
6857 This occurs when instructions like:
6859 (set (reg/i:SI 24 %i0)
6860 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6861 (const_int -20 [0xffffffec])) 0))
6863 are in the return delayed slot. */
6865 if (GET_CODE (XEXP (*where, 0)) == REG
6866 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6867 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6868 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6873 if (SPARC_STACK_BIAS
6874 && GET_CODE (XEXP (*where, 0)) == REG
6875 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6883 fmt = GET_RTX_FORMAT (code);
6885 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6890 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6891 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6894 else if (fmt[i] == 'e'
6895 && epilogue_renumber (&(XEXP (*where, i)), test))
6901 /* Leaf functions and non-leaf functions have different needs. */
6904 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6907 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6909 static const int *const reg_alloc_orders[] = {
6910 reg_leaf_alloc_order,
6911 reg_nonleaf_alloc_order};
6914 order_regs_for_local_alloc (void)
6916 static int last_order_nonleaf = 1;
6918 if (df_regs_ever_live_p (15) != last_order_nonleaf)
6920 last_order_nonleaf = !last_order_nonleaf;
6921 memcpy ((char *) reg_alloc_order,
6922 (const char *) reg_alloc_orders[last_order_nonleaf],
6923 FIRST_PSEUDO_REGISTER * sizeof (int));
6927 /* Return 1 if REG and MEM are legitimate enough to allow the various
6928 mem<-->reg splits to be run. */
6931 sparc_splitdi_legitimate (rtx reg, rtx mem)
6933 /* Punt if we are here by mistake. */
6934 gcc_assert (reload_completed);
6936 /* We must have an offsettable memory reference. */
6937 if (! offsettable_memref_p (mem))
6940 /* If we have legitimate args for ldd/std, we do not want
6941 the split to happen. */
6942 if ((REGNO (reg) % 2) == 0
6943 && mem_min_alignment (mem, 8))
6950 /* Return 1 if x and y are some kind of REG and they refer to
6951 different hard registers. This test is guaranteed to be
6952 run after reload. */
6955 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6957 if (GET_CODE (x) != REG)
6959 if (GET_CODE (y) != REG)
6961 if (REGNO (x) == REGNO (y))
6966 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6967 This makes them candidates for using ldd and std insns.
6969 Note reg1 and reg2 *must* be hard registers. */
6972 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6974 /* We might have been passed a SUBREG. */
6975 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6978 if (REGNO (reg1) % 2 != 0)
6981 /* Integer ldd is deprecated in SPARC V9 */
6982 if (TARGET_V9 && REGNO (reg1) < 32)
6985 return (REGNO (reg1) == REGNO (reg2) - 1);
6988 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6991 This can only happen when addr1 and addr2, the addresses in mem1
6992 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6993 addr1 must also be aligned on a 64-bit boundary.
6995 Also iff dependent_reg_rtx is not null it should not be used to
6996 compute the address for mem1, i.e. we cannot optimize a sequence
7008 But, note that the transformation from:
7013 is perfectly fine. Thus, the peephole2 patterns always pass us
7014 the destination register of the first load, never the second one.
7016 For stores we don't have a similar problem, so dependent_reg_rtx is
7020 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
7024 HOST_WIDE_INT offset1;
7026 /* The mems cannot be volatile. */
7027 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
7030 /* MEM1 should be aligned on a 64-bit boundary. */
7031 if (MEM_ALIGN (mem1) < 64)
7034 addr1 = XEXP (mem1, 0);
7035 addr2 = XEXP (mem2, 0);
7037 /* Extract a register number and offset (if used) from the first addr. */
7038 if (GET_CODE (addr1) == PLUS)
7040 /* If not a REG, return zero. */
7041 if (GET_CODE (XEXP (addr1, 0)) != REG)
7045 reg1 = REGNO (XEXP (addr1, 0));
7046 /* The offset must be constant! */
7047 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
7049 offset1 = INTVAL (XEXP (addr1, 1));
7052 else if (GET_CODE (addr1) != REG)
7056 reg1 = REGNO (addr1);
7057 /* This was a simple (mem (reg)) expression. Offset is 0. */
7061 /* Make sure the second address is a (mem (plus (reg) (const_int). */
7062 if (GET_CODE (addr2) != PLUS)
7065 if (GET_CODE (XEXP (addr2, 0)) != REG
7066 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
7069 if (reg1 != REGNO (XEXP (addr2, 0)))
7072 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
7075 /* The first offset must be evenly divisible by 8 to ensure the
7076 address is 64 bit aligned. */
7077 if (offset1 % 8 != 0)
7080 /* The offset for the second addr must be 4 more than the first addr. */
7081 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
7084 /* All the tests passed. addr1 and addr2 are valid for ldd and std
7089 /* Return 1 if reg is a pseudo, or is the first register in
7090 a hard register pair. This makes it suitable for use in
7091 ldd and std insns. */
7094 register_ok_for_ldd (rtx reg)
7096 /* We might have been passed a SUBREG. */
7100 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
7101 return (REGNO (reg) % 2 == 0);
7106 /* Return 1 if OP is a memory whose address is known to be
7107 aligned to 8-byte boundary, or a pseudo during reload.
7108 This makes it suitable for use in ldd and std insns. */
7111 memory_ok_for_ldd (rtx op)
7115 /* In 64-bit mode, we assume that the address is word-aligned. */
7116 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
7119 if ((reload_in_progress || reload_completed)
7120 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
7123 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
7125 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
7134 /* Print operand X (an rtx) in assembler syntax to file FILE.
7135 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
7136 For `%' followed by punctuation, CODE is the punctuation and X is null. */
7139 print_operand (FILE *file, rtx x, int code)
7144 /* Output an insn in a delay slot. */
7146 sparc_indent_opcode = 1;
7148 fputs ("\n\t nop", file);
7151 /* Output an annul flag if there's nothing for the delay slot and we
7152 are optimizing. This is always used with '(' below.
7153 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
7154 this is a dbx bug. So, we only do this when optimizing.
7155 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
7156 Always emit a nop in case the next instruction is a branch. */
7157 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
7161 /* Output a 'nop' if there's nothing for the delay slot and we are
7162 not optimizing. This is always used with '*' above. */
7163 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
7164 fputs ("\n\t nop", file);
7165 else if (final_sequence)
7166 sparc_indent_opcode = 1;
7169 /* Output the right displacement from the saved PC on function return.
7170 The caller may have placed an "unimp" insn immediately after the call
7171 so we have to account for it. This insn is used in the 32-bit ABI
7172 when calling a function that returns a non zero-sized structure. The
7173 64-bit ABI doesn't have it. Be careful to have this test be the same
7174 as that for the call. The exception is when sparc_std_struct_return
7175 is enabled, the psABI is followed exactly and the adjustment is made
7176 by the code in sparc_struct_value_rtx. The call emitted is the same
7177 when sparc_std_struct_return is enabled. */
7179 && cfun->returns_struct
7180 && !sparc_std_struct_return
7181 && DECL_SIZE (DECL_RESULT (current_function_decl))
7182 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
7184 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
7190 /* Output the Embedded Medium/Anywhere code model base register. */
7191 fputs (EMBMEDANY_BASE_REG, file);
7194 /* Print some local dynamic TLS name. */
7195 assemble_name (file, get_some_local_dynamic_name ());
7199 /* Adjust the operand to take into account a RESTORE operation. */
7200 if (GET_CODE (x) == CONST_INT)
7202 else if (GET_CODE (x) != REG)
7203 output_operand_lossage ("invalid %%Y operand");
7204 else if (REGNO (x) < 8)
7205 fputs (reg_names[REGNO (x)], file);
7206 else if (REGNO (x) >= 24 && REGNO (x) < 32)
7207 fputs (reg_names[REGNO (x)-16], file);
7209 output_operand_lossage ("invalid %%Y operand");
7212 /* Print out the low order register name of a register pair. */
7213 if (WORDS_BIG_ENDIAN)
7214 fputs (reg_names[REGNO (x)+1], file);
7216 fputs (reg_names[REGNO (x)], file);
7219 /* Print out the high order register name of a register pair. */
7220 if (WORDS_BIG_ENDIAN)
7221 fputs (reg_names[REGNO (x)], file);
7223 fputs (reg_names[REGNO (x)+1], file);
7226 /* Print out the second register name of a register pair or quad.
7227 I.e., R (%o0) => %o1. */
7228 fputs (reg_names[REGNO (x)+1], file);
7231 /* Print out the third register name of a register quad.
7232 I.e., S (%o0) => %o2. */
7233 fputs (reg_names[REGNO (x)+2], file);
7236 /* Print out the fourth register name of a register quad.
7237 I.e., T (%o0) => %o3. */
7238 fputs (reg_names[REGNO (x)+3], file);
7241 /* Print a condition code register. */
7242 if (REGNO (x) == SPARC_ICC_REG)
7244 /* We don't handle CC[X]_NOOVmode because they're not supposed
7246 if (GET_MODE (x) == CCmode)
7247 fputs ("%icc", file);
7248 else if (GET_MODE (x) == CCXmode)
7249 fputs ("%xcc", file);
7254 /* %fccN register */
7255 fputs (reg_names[REGNO (x)], file);
7258 /* Print the operand's address only. */
7259 output_address (XEXP (x, 0));
7262 /* In this case we need a register. Use %g0 if the
7263 operand is const0_rtx. */
7265 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
7267 fputs ("%g0", file);
7274 switch (GET_CODE (x))
7276 case IOR: fputs ("or", file); break;
7277 case AND: fputs ("and", file); break;
7278 case XOR: fputs ("xor", file); break;
7279 default: output_operand_lossage ("invalid %%A operand");
7284 switch (GET_CODE (x))
7286 case IOR: fputs ("orn", file); break;
7287 case AND: fputs ("andn", file); break;
7288 case XOR: fputs ("xnor", file); break;
7289 default: output_operand_lossage ("invalid %%B operand");
7293 /* These are used by the conditional move instructions. */
7297 enum rtx_code rc = GET_CODE (x);
7301 enum machine_mode mode = GET_MODE (XEXP (x, 0));
7302 if (mode == CCFPmode || mode == CCFPEmode)
7303 rc = reverse_condition_maybe_unordered (GET_CODE (x));
7305 rc = reverse_condition (GET_CODE (x));
7309 case NE: fputs ("ne", file); break;
7310 case EQ: fputs ("e", file); break;
7311 case GE: fputs ("ge", file); break;
7312 case GT: fputs ("g", file); break;
7313 case LE: fputs ("le", file); break;
7314 case LT: fputs ("l", file); break;
7315 case GEU: fputs ("geu", file); break;
7316 case GTU: fputs ("gu", file); break;
7317 case LEU: fputs ("leu", file); break;
7318 case LTU: fputs ("lu", file); break;
7319 case LTGT: fputs ("lg", file); break;
7320 case UNORDERED: fputs ("u", file); break;
7321 case ORDERED: fputs ("o", file); break;
7322 case UNLT: fputs ("ul", file); break;
7323 case UNLE: fputs ("ule", file); break;
7324 case UNGT: fputs ("ug", file); break;
7325 case UNGE: fputs ("uge", file); break;
7326 case UNEQ: fputs ("ue", file); break;
7327 default: output_operand_lossage (code == 'c'
7328 ? "invalid %%c operand"
7329 : "invalid %%C operand");
7334 /* These are used by the movr instruction pattern. */
7338 enum rtx_code rc = (code == 'd'
7339 ? reverse_condition (GET_CODE (x))
7343 case NE: fputs ("ne", file); break;
7344 case EQ: fputs ("e", file); break;
7345 case GE: fputs ("gez", file); break;
7346 case LT: fputs ("lz", file); break;
7347 case LE: fputs ("lez", file); break;
7348 case GT: fputs ("gz", file); break;
7349 default: output_operand_lossage (code == 'd'
7350 ? "invalid %%d operand"
7351 : "invalid %%D operand");
7358 /* Print a sign-extended character. */
7359 int i = trunc_int_for_mode (INTVAL (x), QImode);
7360 fprintf (file, "%d", i);
7365 /* Operand must be a MEM; write its address. */
7366 if (GET_CODE (x) != MEM)
7367 output_operand_lossage ("invalid %%f operand");
7368 output_address (XEXP (x, 0));
7373 /* Print a sign-extended 32-bit value. */
7375 if (GET_CODE(x) == CONST_INT)
7377 else if (GET_CODE(x) == CONST_DOUBLE)
7378 i = CONST_DOUBLE_LOW (x);
7381 output_operand_lossage ("invalid %%s operand");
7384 i = trunc_int_for_mode (i, SImode);
7385 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7390 /* Do nothing special. */
7394 /* Undocumented flag. */
7395 output_operand_lossage ("invalid operand output code");
7398 if (GET_CODE (x) == REG)
7399 fputs (reg_names[REGNO (x)], file);
7400 else if (GET_CODE (x) == MEM)
7403 /* Poor Sun assembler doesn't understand absolute addressing. */
7404 if (CONSTANT_P (XEXP (x, 0)))
7405 fputs ("%g0+", file);
7406 output_address (XEXP (x, 0));
7409 else if (GET_CODE (x) == HIGH)
7411 fputs ("%hi(", file);
7412 output_addr_const (file, XEXP (x, 0));
7415 else if (GET_CODE (x) == LO_SUM)
7417 print_operand (file, XEXP (x, 0), 0);
7418 if (TARGET_CM_MEDMID)
7419 fputs ("+%l44(", file);
7421 fputs ("+%lo(", file);
7422 output_addr_const (file, XEXP (x, 1));
7425 else if (GET_CODE (x) == CONST_DOUBLE
7426 && (GET_MODE (x) == VOIDmode
7427 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7429 if (CONST_DOUBLE_HIGH (x) == 0)
7430 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7431 else if (CONST_DOUBLE_HIGH (x) == -1
7432 && CONST_DOUBLE_LOW (x) < 0)
7433 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7435 output_operand_lossage ("long long constant not a valid immediate operand");
7437 else if (GET_CODE (x) == CONST_DOUBLE)
7438 output_operand_lossage ("floating point constant not a valid immediate operand");
7439 else { output_addr_const (file, x); }
7442 /* Target hook for assembling integer objects. The sparc version has
7443 special handling for aligned DI-mode objects. */
7446 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7448 /* ??? We only output .xword's for symbols and only then in environments
7449 where the assembler can handle them. */
7450 if (aligned_p && size == 8
7451 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7455 assemble_integer_with_op ("\t.xword\t", x);
7460 assemble_aligned_integer (4, const0_rtx);
7461 assemble_aligned_integer (4, x);
7465 return default_assemble_integer (x, size, aligned_p);
7468 /* Return the value of a code used in the .proc pseudo-op that says
7469 what kind of result this function returns. For non-C types, we pick
7470 the closest C type. */
7472 #ifndef SHORT_TYPE_SIZE
7473 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7476 #ifndef INT_TYPE_SIZE
7477 #define INT_TYPE_SIZE BITS_PER_WORD
7480 #ifndef LONG_TYPE_SIZE
7481 #define LONG_TYPE_SIZE BITS_PER_WORD
7484 #ifndef LONG_LONG_TYPE_SIZE
7485 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7488 #ifndef FLOAT_TYPE_SIZE
7489 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7492 #ifndef DOUBLE_TYPE_SIZE
7493 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7496 #ifndef LONG_DOUBLE_TYPE_SIZE
7497 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7501 sparc_type_code (register tree type)
7503 register unsigned long qualifiers = 0;
7504 register unsigned shift;
7506 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7507 setting more, since some assemblers will give an error for this. Also,
7508 we must be careful to avoid shifts of 32 bits or more to avoid getting
7509 unpredictable results. */
7511 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7513 switch (TREE_CODE (type))
7519 qualifiers |= (3 << shift);
7524 qualifiers |= (2 << shift);
7528 case REFERENCE_TYPE:
7530 qualifiers |= (1 << shift);
7534 return (qualifiers | 8);
7537 case QUAL_UNION_TYPE:
7538 return (qualifiers | 9);
7541 return (qualifiers | 10);
7544 return (qualifiers | 16);
7547 /* If this is a range type, consider it to be the underlying
7549 if (TREE_TYPE (type) != 0)
7552 /* Carefully distinguish all the standard types of C,
7553 without messing up if the language is not C. We do this by
7554 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7555 look at both the names and the above fields, but that's redundant.
7556 Any type whose size is between two C types will be considered
7557 to be the wider of the two types. Also, we do not have a
7558 special code to use for "long long", so anything wider than
7559 long is treated the same. Note that we can't distinguish
7560 between "int" and "long" in this code if they are the same
7561 size, but that's fine, since neither can the assembler. */
7563 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7564 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7566 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7567 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7569 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7570 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7573 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7576 /* If this is a range type, consider it to be the underlying
7578 if (TREE_TYPE (type) != 0)
7581 /* Carefully distinguish all the standard types of C,
7582 without messing up if the language is not C. */
7584 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7585 return (qualifiers | 6);
7588 return (qualifiers | 7);
7590 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7591 /* ??? We need to distinguish between double and float complex types,
7592 but I don't know how yet because I can't reach this code from
7593 existing front-ends. */
7594 return (qualifiers | 7); /* Who knows? */
7597 case BOOLEAN_TYPE: /* Boolean truth value type. */
7598 case LANG_TYPE: /* ? */
7602 gcc_unreachable (); /* Not a type! */
7609 /* Nested function support. */
7611 /* Emit RTL insns to initialize the variable parts of a trampoline.
7612 FNADDR is an RTX for the address of the function's pure code.
7613 CXT is an RTX for the static chain value for the function.
7615 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7616 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7617 (to store insns). This is a bit excessive. Perhaps a different
7618 mechanism would be better here.
7620 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7623 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
7625 /* SPARC 32-bit trampoline:
7628 sethi %hi(static), %g2
7630 or %g2, %lo(static), %g2
7632 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7633 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7637 (adjust_address (m_tramp, SImode, 0),
7638 expand_binop (SImode, ior_optab,
7639 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7640 size_int (10), 0, 1),
7641 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7642 NULL_RTX, 1, OPTAB_DIRECT));
7645 (adjust_address (m_tramp, SImode, 4),
7646 expand_binop (SImode, ior_optab,
7647 expand_shift (RSHIFT_EXPR, SImode, cxt,
7648 size_int (10), 0, 1),
7649 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7650 NULL_RTX, 1, OPTAB_DIRECT));
7653 (adjust_address (m_tramp, SImode, 8),
7654 expand_binop (SImode, ior_optab,
7655 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7656 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7657 NULL_RTX, 1, OPTAB_DIRECT));
7660 (adjust_address (m_tramp, SImode, 12),
7661 expand_binop (SImode, ior_optab,
7662 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7663 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7664 NULL_RTX, 1, OPTAB_DIRECT));
7666 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7667 aligned on a 16 byte boundary so one flush clears it all. */
7668 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
7669 if (sparc_cpu != PROCESSOR_ULTRASPARC
7670 && sparc_cpu != PROCESSOR_ULTRASPARC3
7671 && sparc_cpu != PROCESSOR_NIAGARA
7672 && sparc_cpu != PROCESSOR_NIAGARA2)
7673 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
7675 /* Call __enable_execute_stack after writing onto the stack to make sure
7676 the stack address is accessible. */
7677 #ifdef ENABLE_EXECUTE_STACK
7678 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7679 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
7684 /* The 64-bit version is simpler because it makes more sense to load the
7685 values as "immediate" data out of the trampoline. It's also easier since
7686 we can read the PC without clobbering a register. */
7689 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
7691 /* SPARC 64-bit trampoline:
7700 emit_move_insn (adjust_address (m_tramp, SImode, 0),
7701 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7702 emit_move_insn (adjust_address (m_tramp, SImode, 4),
7703 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7704 emit_move_insn (adjust_address (m_tramp, SImode, 8),
7705 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7706 emit_move_insn (adjust_address (m_tramp, SImode, 12),
7707 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7708 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
7709 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
7710 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
7712 if (sparc_cpu != PROCESSOR_ULTRASPARC
7713 && sparc_cpu != PROCESSOR_ULTRASPARC3
7714 && sparc_cpu != PROCESSOR_NIAGARA
7715 && sparc_cpu != PROCESSOR_NIAGARA2)
7716 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
7718 /* Call __enable_execute_stack after writing onto the stack to make sure
7719 the stack address is accessible. */
7720 #ifdef ENABLE_EXECUTE_STACK
7721 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7722 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
7726 /* Worker for TARGET_TRAMPOLINE_INIT. */
7729 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
7731 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
7732 cxt = force_reg (Pmode, cxt);
7734 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
7736 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
7739 /* Adjust the cost of a scheduling dependency. Return the new cost of
7740 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7743 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7745 enum attr_type insn_type;
7747 if (! recog_memoized (insn))
7750 insn_type = get_attr_type (insn);
7752 if (REG_NOTE_KIND (link) == 0)
7754 /* Data dependency; DEP_INSN writes a register that INSN reads some
7757 /* if a load, then the dependence must be on the memory address;
7758 add an extra "cycle". Note that the cost could be two cycles
7759 if the reg was written late in an instruction group; we ca not tell
7761 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7764 /* Get the delay only if the address of the store is the dependence. */
7765 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7767 rtx pat = PATTERN(insn);
7768 rtx dep_pat = PATTERN (dep_insn);
7770 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7771 return cost; /* This should not happen! */
7773 /* The dependency between the two instructions was on the data that
7774 is being stored. Assume that this implies that the address of the
7775 store is not dependent. */
7776 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7779 return cost + 3; /* An approximation. */
7782 /* A shift instruction cannot receive its data from an instruction
7783 in the same cycle; add a one cycle penalty. */
7784 if (insn_type == TYPE_SHIFT)
7785 return cost + 3; /* Split before cascade into shift. */
7789 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7790 INSN writes some cycles later. */
7792 /* These are only significant for the fpu unit; writing a fp reg before
7793 the fpu has finished with it stalls the processor. */
7795 /* Reusing an integer register causes no problems. */
7796 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7804 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7806 enum attr_type insn_type, dep_type;
7807 rtx pat = PATTERN(insn);
7808 rtx dep_pat = PATTERN (dep_insn);
7810 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7813 insn_type = get_attr_type (insn);
7814 dep_type = get_attr_type (dep_insn);
7816 switch (REG_NOTE_KIND (link))
7819 /* Data dependency; DEP_INSN writes a register that INSN reads some
7826 /* Get the delay iff the address of the store is the dependence. */
7827 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7830 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7837 /* If a load, then the dependence must be on the memory address. If
7838 the addresses aren't equal, then it might be a false dependency */
7839 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7841 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7842 || GET_CODE (SET_DEST (dep_pat)) != MEM
7843 || GET_CODE (SET_SRC (pat)) != MEM
7844 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7845 XEXP (SET_SRC (pat), 0)))
7853 /* Compare to branch latency is 0. There is no benefit from
7854 separating compare and branch. */
7855 if (dep_type == TYPE_COMPARE)
7857 /* Floating point compare to branch latency is less than
7858 compare to conditional move. */
7859 if (dep_type == TYPE_FPCMP)
7868 /* Anti-dependencies only penalize the fpu unit. */
7869 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7881 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7885 case PROCESSOR_SUPERSPARC:
7886 cost = supersparc_adjust_cost (insn, link, dep, cost);
7888 case PROCESSOR_HYPERSPARC:
7889 case PROCESSOR_SPARCLITE86X:
7890 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7899 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7900 int sched_verbose ATTRIBUTE_UNUSED,
7901 int max_ready ATTRIBUTE_UNUSED)
7905 sparc_use_sched_lookahead (void)
7907 if (sparc_cpu == PROCESSOR_NIAGARA
7908 || sparc_cpu == PROCESSOR_NIAGARA2)
7910 if (sparc_cpu == PROCESSOR_ULTRASPARC
7911 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7913 if ((1 << sparc_cpu) &
7914 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7915 (1 << PROCESSOR_SPARCLITE86X)))
7921 sparc_issue_rate (void)
7925 case PROCESSOR_NIAGARA:
7926 case PROCESSOR_NIAGARA2:
7930 /* Assume V9 processors are capable of at least dual-issue. */
7932 case PROCESSOR_SUPERSPARC:
7934 case PROCESSOR_HYPERSPARC:
7935 case PROCESSOR_SPARCLITE86X:
7937 case PROCESSOR_ULTRASPARC:
7938 case PROCESSOR_ULTRASPARC3:
7944 set_extends (rtx insn)
7946 register rtx pat = PATTERN (insn);
7948 switch (GET_CODE (SET_SRC (pat)))
7950 /* Load and some shift instructions zero extend. */
7953 /* sethi clears the high bits */
7955 /* LO_SUM is used with sethi. sethi cleared the high
7956 bits and the values used with lo_sum are positive */
7958 /* Store flag stores 0 or 1 */
7968 rtx op0 = XEXP (SET_SRC (pat), 0);
7969 rtx op1 = XEXP (SET_SRC (pat), 1);
7970 if (GET_CODE (op1) == CONST_INT)
7971 return INTVAL (op1) >= 0;
7972 if (GET_CODE (op0) != REG)
7974 if (sparc_check_64 (op0, insn) == 1)
7976 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7981 rtx op0 = XEXP (SET_SRC (pat), 0);
7982 rtx op1 = XEXP (SET_SRC (pat), 1);
7983 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7985 if (GET_CODE (op1) == CONST_INT)
7986 return INTVAL (op1) >= 0;
7987 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7990 return GET_MODE (SET_SRC (pat)) == SImode;
7991 /* Positive integers leave the high bits zero. */
7993 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7995 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7998 return - (GET_MODE (SET_SRC (pat)) == SImode);
8000 return sparc_check_64 (SET_SRC (pat), insn);
8006 /* We _ought_ to have only one kind per function, but... */
8007 static GTY(()) rtx sparc_addr_diff_list;
8008 static GTY(()) rtx sparc_addr_list;
8011 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
8013 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
8015 sparc_addr_diff_list
8016 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
8018 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
8022 sparc_output_addr_vec (rtx vec)
8024 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8025 int idx, vlen = XVECLEN (body, 0);
8027 #ifdef ASM_OUTPUT_ADDR_VEC_START
8028 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8031 #ifdef ASM_OUTPUT_CASE_LABEL
8032 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8035 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8038 for (idx = 0; idx < vlen; idx++)
8040 ASM_OUTPUT_ADDR_VEC_ELT
8041 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
8044 #ifdef ASM_OUTPUT_ADDR_VEC_END
8045 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8050 sparc_output_addr_diff_vec (rtx vec)
8052 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8053 rtx base = XEXP (XEXP (body, 0), 0);
8054 int idx, vlen = XVECLEN (body, 1);
8056 #ifdef ASM_OUTPUT_ADDR_VEC_START
8057 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8060 #ifdef ASM_OUTPUT_CASE_LABEL
8061 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8064 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8067 for (idx = 0; idx < vlen; idx++)
8069 ASM_OUTPUT_ADDR_DIFF_ELT
8072 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
8073 CODE_LABEL_NUMBER (base));
8076 #ifdef ASM_OUTPUT_ADDR_VEC_END
8077 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8082 sparc_output_deferred_case_vectors (void)
8087 if (sparc_addr_list == NULL_RTX
8088 && sparc_addr_diff_list == NULL_RTX)
8091 /* Align to cache line in the function's code section. */
8092 switch_to_section (current_function_section ());
8094 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
8096 ASM_OUTPUT_ALIGN (asm_out_file, align);
8098 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
8099 sparc_output_addr_vec (XEXP (t, 0));
8100 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
8101 sparc_output_addr_diff_vec (XEXP (t, 0));
8103 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
8106 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
8107 unknown. Return 1 if the high bits are zero, -1 if the register is
8110 sparc_check_64 (rtx x, rtx insn)
8112 /* If a register is set only once it is safe to ignore insns this
8113 code does not know how to handle. The loop will either recognize
8114 the single set and return the correct value or fail to recognize
8119 gcc_assert (GET_CODE (x) == REG);
8121 if (GET_MODE (x) == DImode)
8122 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
8124 if (flag_expensive_optimizations
8125 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
8131 insn = get_last_insn_anywhere ();
8136 while ((insn = PREV_INSN (insn)))
8138 switch (GET_CODE (insn))
8151 rtx pat = PATTERN (insn);
8152 if (GET_CODE (pat) != SET)
8154 if (rtx_equal_p (x, SET_DEST (pat)))
8155 return set_extends (insn);
8156 if (y && rtx_equal_p (y, SET_DEST (pat)))
8157 return set_extends (insn);
8158 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
8166 /* Returns assembly code to perform a DImode shift using
8167 a 64-bit global or out register on SPARC-V8+. */
8169 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
8171 static char asm_code[60];
8173 /* The scratch register is only required when the destination
8174 register is not a 64-bit global or out register. */
8175 if (which_alternative != 2)
8176 operands[3] = operands[0];
8178 /* We can only shift by constants <= 63. */
8179 if (GET_CODE (operands[2]) == CONST_INT)
8180 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
8182 if (GET_CODE (operands[1]) == CONST_INT)
8184 output_asm_insn ("mov\t%1, %3", operands);
8188 output_asm_insn ("sllx\t%H1, 32, %3", operands);
8189 if (sparc_check_64 (operands[1], insn) <= 0)
8190 output_asm_insn ("srl\t%L1, 0, %L1", operands);
8191 output_asm_insn ("or\t%L1, %3, %3", operands);
8194 strcpy(asm_code, opcode);
8196 if (which_alternative != 2)
8197 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
8199 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
8202 /* Output rtl to increment the profiler label LABELNO
8203 for profiling a function entry. */
8206 sparc_profile_hook (int labelno)
8211 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
8212 if (NO_PROFILE_COUNTERS)
8214 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
8218 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8219 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
8220 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
8224 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
8227 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
8228 tree decl ATTRIBUTE_UNUSED)
8230 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
8232 if (!(flags & SECTION_DEBUG))
8233 fputs (",#alloc", asm_out_file);
8234 if (flags & SECTION_WRITE)
8235 fputs (",#write", asm_out_file);
8236 if (flags & SECTION_TLS)
8237 fputs (",#tls", asm_out_file);
8238 if (flags & SECTION_CODE)
8239 fputs (",#execinstr", asm_out_file);
8241 /* ??? Handle SECTION_BSS. */
8243 fputc ('\n', asm_out_file);
8246 /* We do not allow indirect calls to be optimized into sibling calls.
8248 We cannot use sibling calls when delayed branches are disabled
8249 because they will likely require the call delay slot to be filled.
8251 Also, on SPARC 32-bit we cannot emit a sibling call when the
8252 current function returns a structure. This is because the "unimp
8253 after call" convention would cause the callee to return to the
8254 wrong place. The generic code already disallows cases where the
8255 function being called returns a structure.
8257 It may seem strange how this last case could occur. Usually there
8258 is code after the call which jumps to epilogue code which dumps the
8259 return value into the struct return area. That ought to invalidate
8260 the sibling call right? Well, in the C++ case we can end up passing
8261 the pointer to the struct return area to a constructor (which returns
8262 void) and then nothing else happens. Such a sibling call would look
8263 valid without the added check here.
8265 VxWorks PIC PLT entries require the global pointer to be initialized
8266 on entry. We therefore can't emit sibling calls to them. */
8268 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8271 && flag_delayed_branch
8272 && (TARGET_ARCH64 || ! cfun->returns_struct)
8273 && !(TARGET_VXWORKS_RTP
8275 && !targetm.binds_local_p (decl)));
8278 /* libfunc renaming. */
8279 #include "config/gofast.h"
8282 sparc_init_libfuncs (void)
8286 /* Use the subroutines that Sun's library provides for integer
8287 multiply and divide. The `*' prevents an underscore from
8288 being prepended by the compiler. .umul is a little faster
8290 set_optab_libfunc (smul_optab, SImode, "*.umul");
8291 set_optab_libfunc (sdiv_optab, SImode, "*.div");
8292 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
8293 set_optab_libfunc (smod_optab, SImode, "*.rem");
8294 set_optab_libfunc (umod_optab, SImode, "*.urem");
8296 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
8297 set_optab_libfunc (add_optab, TFmode, "_Q_add");
8298 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
8299 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
8300 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
8301 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
8303 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
8304 is because with soft-float, the SFmode and DFmode sqrt
8305 instructions will be absent, and the compiler will notice and
8306 try to use the TFmode sqrt instruction for calls to the
8307 builtin function sqrt, but this fails. */
8309 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
8311 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
8312 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
8313 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
8314 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
8315 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
8316 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
8318 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
8319 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
8320 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
8321 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
8323 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
8324 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
8325 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
8326 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
8328 if (DITF_CONVERSION_LIBFUNCS)
8330 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
8331 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
8332 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
8333 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
8336 if (SUN_CONVERSION_LIBFUNCS)
8338 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
8339 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
8340 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
8341 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
8346 /* In the SPARC 64bit ABI, SImode multiply and divide functions
8347 do not exist in the library. Make sure the compiler does not
8348 emit calls to them by accident. (It should always use the
8349 hardware instructions.) */
8350 set_optab_libfunc (smul_optab, SImode, 0);
8351 set_optab_libfunc (sdiv_optab, SImode, 0);
8352 set_optab_libfunc (udiv_optab, SImode, 0);
8353 set_optab_libfunc (smod_optab, SImode, 0);
8354 set_optab_libfunc (umod_optab, SImode, 0);
8356 if (SUN_INTEGER_MULTIPLY_64)
8358 set_optab_libfunc (smul_optab, DImode, "__mul64");
8359 set_optab_libfunc (sdiv_optab, DImode, "__div64");
8360 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
8361 set_optab_libfunc (smod_optab, DImode, "__rem64");
8362 set_optab_libfunc (umod_optab, DImode, "__urem64");
8365 if (SUN_CONVERSION_LIBFUNCS)
8367 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
8368 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
8369 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
8370 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
8374 gofast_maybe_init_libfuncs ();
8377 #define def_builtin(NAME, CODE, TYPE) \
8378 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
8381 /* Implement the TARGET_INIT_BUILTINS target hook.
8382 Create builtin functions for special SPARC instructions. */
8385 sparc_init_builtins (void)
8388 sparc_vis_init_builtins ();
8391 /* Create builtin functions for VIS 1.0 instructions. */
8394 sparc_vis_init_builtins (void)
8396 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
8397 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
8398 tree v4hi = build_vector_type (intHI_type_node, 4);
8399 tree v2hi = build_vector_type (intHI_type_node, 2);
8400 tree v2si = build_vector_type (intSI_type_node, 2);
8402 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
8403 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
8404 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
8405 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
8406 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
8407 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
8408 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
8409 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
8410 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
8411 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
8412 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
8413 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
8414 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
8416 intDI_type_node, 0);
8417 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
8419 intDI_type_node, 0);
8420 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
8422 intSI_type_node, 0);
8423 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
8425 intDI_type_node, 0);
8427 /* Packing and expanding vectors. */
8428 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
8429 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
8430 v8qi_ftype_v2si_v8qi);
8431 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
8433 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
8434 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
8435 v8qi_ftype_v4qi_v4qi);
8437 /* Multiplications. */
8438 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
8439 v4hi_ftype_v4qi_v4hi);
8440 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
8441 v4hi_ftype_v4qi_v2hi);
8442 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
8443 v4hi_ftype_v4qi_v2hi);
8444 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
8445 v4hi_ftype_v8qi_v4hi);
8446 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
8447 v4hi_ftype_v8qi_v4hi);
8448 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
8449 v2si_ftype_v4qi_v2hi);
8450 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
8451 v2si_ftype_v4qi_v2hi);
8453 /* Data aligning. */
8454 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
8455 v4hi_ftype_v4hi_v4hi);
8456 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
8457 v8qi_ftype_v8qi_v8qi);
8458 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
8459 v2si_ftype_v2si_v2si);
8460 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
8463 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
8466 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
8469 /* Pixel distance. */
8470 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
8471 di_ftype_v8qi_v8qi_di);
8474 /* Handle TARGET_EXPAND_BUILTIN target hook.
8475 Expand builtin functions for sparc intrinsics. */
8478 sparc_expand_builtin (tree exp, rtx target,
8479 rtx subtarget ATTRIBUTE_UNUSED,
8480 enum machine_mode tmode ATTRIBUTE_UNUSED,
8481 int ignore ATTRIBUTE_UNUSED)
8484 call_expr_arg_iterator iter;
8485 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8486 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
8488 enum machine_mode mode[4];
8491 mode[0] = insn_data[icode].operand[0].mode;
8493 || GET_MODE (target) != mode[0]
8494 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
8495 op[0] = gen_reg_rtx (mode[0]);
8499 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8502 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8503 op[arg_count] = expand_normal (arg);
8505 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8507 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8513 pat = GEN_FCN (icode) (op[0], op[1]);
8516 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8519 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8534 sparc_vis_mul8x16 (int e8, int e16)
8536 return (e8 * e16 + 128) / 256;
8539 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8540 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8541 constants. A tree list with the results of the multiplications is returned,
8542 and each element in the list is of INNER_TYPE. */
8545 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8547 tree n_elts = NULL_TREE;
8552 case CODE_FOR_fmul8x16_vis:
8553 for (; elts0 && elts1;
8554 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8557 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8558 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8559 n_elts = tree_cons (NULL_TREE,
8560 build_int_cst (inner_type, val),
8565 case CODE_FOR_fmul8x16au_vis:
8566 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8568 for (; elts0; elts0 = TREE_CHAIN (elts0))
8571 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8573 n_elts = tree_cons (NULL_TREE,
8574 build_int_cst (inner_type, val),
8579 case CODE_FOR_fmul8x16al_vis:
8580 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8582 for (; elts0; elts0 = TREE_CHAIN (elts0))
8585 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8587 n_elts = tree_cons (NULL_TREE,
8588 build_int_cst (inner_type, val),
8597 return nreverse (n_elts);
8600 /* Handle TARGET_FOLD_BUILTIN target hook.
8601 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8602 result of the function call is ignored. NULL_TREE is returned if the
8603 function could not be folded. */
8606 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
8607 tree *args, bool ignore)
8609 tree arg0, arg1, arg2;
8610 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8611 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
8614 && icode != CODE_FOR_alignaddrsi_vis
8615 && icode != CODE_FOR_alignaddrdi_vis)
8616 return fold_convert (rtype, integer_zero_node);
8620 case CODE_FOR_fexpand_vis:
8624 if (TREE_CODE (arg0) == VECTOR_CST)
8626 tree inner_type = TREE_TYPE (rtype);
8627 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8628 tree n_elts = NULL_TREE;
8630 for (; elts; elts = TREE_CHAIN (elts))
8632 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8633 n_elts = tree_cons (NULL_TREE,
8634 build_int_cst (inner_type, val),
8637 return build_vector (rtype, nreverse (n_elts));
8641 case CODE_FOR_fmul8x16_vis:
8642 case CODE_FOR_fmul8x16au_vis:
8643 case CODE_FOR_fmul8x16al_vis:
8649 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8651 tree inner_type = TREE_TYPE (rtype);
8652 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8653 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8654 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
8657 return build_vector (rtype, n_elts);
8661 case CODE_FOR_fpmerge_vis:
8667 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8669 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8670 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8671 tree n_elts = NULL_TREE;
8673 for (; elts0 && elts1;
8674 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8676 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8677 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8680 return build_vector (rtype, nreverse (n_elts));
8684 case CODE_FOR_pdist_vis:
8692 if (TREE_CODE (arg0) == VECTOR_CST
8693 && TREE_CODE (arg1) == VECTOR_CST
8694 && TREE_CODE (arg2) == INTEGER_CST)
8697 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8698 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8699 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8700 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8702 for (; elts0 && elts1;
8703 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8705 unsigned HOST_WIDE_INT
8706 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8707 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8708 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8709 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8711 unsigned HOST_WIDE_INT l;
8714 overflow |= neg_double (low1, high1, &l, &h);
8715 overflow |= add_double (low0, high0, l, h, &l, &h);
8717 overflow |= neg_double (l, h, &l, &h);
8719 overflow |= add_double (low, high, l, h, &low, &high);
8722 gcc_assert (overflow == 0);
8724 return build_int_cst_wide (rtype, low, high);
8734 /* ??? This duplicates information provided to the compiler by the
8735 ??? scheduler description. Some day, teach genautomata to output
8736 ??? the latencies and then CSE will just use that. */
8739 sparc_rtx_costs (rtx x, int code, int outer_code, int *total,
8740 bool speed ATTRIBUTE_UNUSED)
8742 enum machine_mode mode = GET_MODE (x);
8743 bool float_mode_p = FLOAT_MODE_P (mode);
8748 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8766 if (GET_MODE (x) == VOIDmode
8767 && ((CONST_DOUBLE_HIGH (x) == 0
8768 && CONST_DOUBLE_LOW (x) < 0x1000)
8769 || (CONST_DOUBLE_HIGH (x) == -1
8770 && CONST_DOUBLE_LOW (x) < 0
8771 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8778 /* If outer-code was a sign or zero extension, a cost
8779 of COSTS_N_INSNS (1) was already added in. This is
8780 why we are subtracting it back out. */
8781 if (outer_code == ZERO_EXTEND)
8783 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8785 else if (outer_code == SIGN_EXTEND)
8787 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8789 else if (float_mode_p)
8791 *total = sparc_costs->float_load;
8795 *total = sparc_costs->int_load;
8803 *total = sparc_costs->float_plusminus;
8805 *total = COSTS_N_INSNS (1);
8810 *total = sparc_costs->float_mul;
8811 else if (! TARGET_HARD_MUL)
8812 *total = COSTS_N_INSNS (25);
8818 if (sparc_costs->int_mul_bit_factor)
8822 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8824 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8825 for (nbits = 0; value != 0; value &= value - 1)
8828 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8829 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8831 rtx x1 = XEXP (x, 1);
8832 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8833 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8835 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8837 for (; value2 != 0; value2 &= value2 - 1)
8845 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8846 bit_cost = COSTS_N_INSNS (bit_cost);
8850 *total = sparc_costs->int_mulX + bit_cost;
8852 *total = sparc_costs->int_mul + bit_cost;
8859 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8869 *total = sparc_costs->float_div_df;
8871 *total = sparc_costs->float_div_sf;
8876 *total = sparc_costs->int_divX;
8878 *total = sparc_costs->int_div;
8885 *total = COSTS_N_INSNS (1);
8892 case UNSIGNED_FLOAT:
8896 case FLOAT_TRUNCATE:
8897 *total = sparc_costs->float_move;
8902 *total = sparc_costs->float_sqrt_df;
8904 *total = sparc_costs->float_sqrt_sf;
8909 *total = sparc_costs->float_cmp;
8911 *total = COSTS_N_INSNS (1);
8916 *total = sparc_costs->float_cmove;
8918 *total = sparc_costs->int_cmove;
8922 /* Handle the NAND vector patterns. */
8923 if (sparc_vector_mode_supported_p (GET_MODE (x))
8924 && GET_CODE (XEXP (x, 0)) == NOT
8925 && GET_CODE (XEXP (x, 1)) == NOT)
8927 *total = COSTS_N_INSNS (1);
8938 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
8939 This is achieved by means of a manual dynamic stack space allocation in
8940 the current frame. We make the assumption that SEQ doesn't contain any
8941 function calls, with the possible exception of calls to the PIC helper. */
8944 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8946 /* We must preserve the lowest 16 words for the register save area. */
8947 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
8948 /* We really need only 2 words of fresh stack space. */
8949 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
8952 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
8953 SPARC_STACK_BIAS + offset));
8955 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
8956 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8958 emit_insn (gen_rtx_SET (VOIDmode,
8959 adjust_address (slot, word_mode, UNITS_PER_WORD),
8963 emit_insn (gen_rtx_SET (VOIDmode,
8965 adjust_address (slot, word_mode, UNITS_PER_WORD)));
8966 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8967 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
8970 /* Output the assembler code for a thunk function. THUNK_DECL is the
8971 declaration for the thunk function itself, FUNCTION is the decl for
8972 the target function. DELTA is an immediate constant offset to be
8973 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8974 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8977 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8978 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8981 rtx this_rtx, insn, funexp;
8982 unsigned int int_arg_first;
8984 reload_completed = 1;
8985 epilogue_completed = 1;
8987 emit_note (NOTE_INSN_PROLOGUE_END);
8989 if (flag_delayed_branch)
8991 /* We will emit a regular sibcall below, so we need to instruct
8992 output_sibcall that we are in a leaf function. */
8993 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8995 /* This will cause final.c to invoke leaf_renumber_regs so we
8996 must behave as if we were in a not-yet-leafified function. */
8997 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
9001 /* We will emit the sibcall manually below, so we will need to
9002 manually spill non-leaf registers. */
9003 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
9005 /* We really are in a leaf function. */
9006 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
9009 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
9010 returns a structure, the structure return pointer is there instead. */
9012 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9013 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
9015 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
9017 /* Add DELTA. When possible use a plain add, otherwise load it into
9018 a register first. */
9021 rtx delta_rtx = GEN_INT (delta);
9023 if (! SPARC_SIMM13_P (delta))
9025 rtx scratch = gen_rtx_REG (Pmode, 1);
9026 emit_move_insn (scratch, delta_rtx);
9027 delta_rtx = scratch;
9030 /* THIS_RTX += DELTA. */
9031 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
9034 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
9037 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
9038 rtx scratch = gen_rtx_REG (Pmode, 1);
9040 gcc_assert (vcall_offset < 0);
9042 /* SCRATCH = *THIS_RTX. */
9043 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
9045 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
9046 may not have any available scratch register at this point. */
9047 if (SPARC_SIMM13_P (vcall_offset))
9049 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
9050 else if (! fixed_regs[5]
9051 /* The below sequence is made up of at least 2 insns,
9052 while the default method may need only one. */
9053 && vcall_offset < -8192)
9055 rtx scratch2 = gen_rtx_REG (Pmode, 5);
9056 emit_move_insn (scratch2, vcall_offset_rtx);
9057 vcall_offset_rtx = scratch2;
9061 rtx increment = GEN_INT (-4096);
9063 /* VCALL_OFFSET is a negative number whose typical range can be
9064 estimated as -32768..0 in 32-bit mode. In almost all cases
9065 it is therefore cheaper to emit multiple add insns than
9066 spilling and loading the constant into a register (at least
9068 while (! SPARC_SIMM13_P (vcall_offset))
9070 emit_insn (gen_add2_insn (scratch, increment));
9071 vcall_offset += 4096;
9073 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
9076 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
9077 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
9078 gen_rtx_PLUS (Pmode,
9080 vcall_offset_rtx)));
9082 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
9083 emit_insn (gen_add2_insn (this_rtx, scratch));
9086 /* Generate a tail call to the target function. */
9087 if (! TREE_USED (function))
9089 assemble_external (function);
9090 TREE_USED (function) = 1;
9092 funexp = XEXP (DECL_RTL (function), 0);
9094 if (flag_delayed_branch)
9096 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
9097 insn = emit_call_insn (gen_sibcall (funexp));
9098 SIBLING_CALL_P (insn) = 1;
9102 /* The hoops we have to jump through in order to generate a sibcall
9103 without using delay slots... */
9104 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
9108 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
9109 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
9111 /* Delay emitting the PIC helper function because it needs to
9112 change the section and we are emitting assembly code. */
9113 load_pic_register (); /* clobbers %o7 */
9114 scratch = legitimize_pic_address (funexp, scratch);
9117 emit_and_preserve (seq, spill_reg, spill_reg2);
9119 else if (TARGET_ARCH32)
9121 emit_insn (gen_rtx_SET (VOIDmode,
9123 gen_rtx_HIGH (SImode, funexp)));
9124 emit_insn (gen_rtx_SET (VOIDmode,
9126 gen_rtx_LO_SUM (SImode, scratch, funexp)));
9128 else /* TARGET_ARCH64 */
9130 switch (sparc_cmodel)
9134 /* The destination can serve as a temporary. */
9135 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
9140 /* The destination cannot serve as a temporary. */
9141 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
9143 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
9146 emit_and_preserve (seq, spill_reg, 0);
9154 emit_jump_insn (gen_indirect_jump (scratch));
9159 /* Run just enough of rest_of_compilation to get the insns emitted.
9160 There's not really enough bulk here to make other passes such as
9161 instruction scheduling worth while. Note that use_thunk calls
9162 assemble_start_function and assemble_end_function. */
9163 insn = get_insns ();
9164 insn_locators_alloc ();
9165 shorten_branches (insn);
9166 final_start_function (insn, file, 1);
9167 final (insn, file, 1);
9168 final_end_function ();
9170 reload_completed = 0;
9171 epilogue_completed = 0;
9174 /* Return true if sparc_output_mi_thunk would be able to output the
9175 assembler code for the thunk function specified by the arguments
9176 it is passed, and false otherwise. */
9178 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
9179 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
9180 HOST_WIDE_INT vcall_offset,
9181 const_tree function ATTRIBUTE_UNUSED)
9183 /* Bound the loop used in the default method above. */
9184 return (vcall_offset >= -32768 || ! fixed_regs[5]);
9187 /* How to allocate a 'struct machine_function'. */
9189 static struct machine_function *
9190 sparc_init_machine_status (void)
9192 return ggc_alloc_cleared_machine_function ();
9195 /* Locate some local-dynamic symbol still in use by this function
9196 so that we can print its name in local-dynamic base patterns. */
9199 get_some_local_dynamic_name (void)
9203 if (cfun->machine->some_ld_name)
9204 return cfun->machine->some_ld_name;
9206 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
9208 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
9209 return cfun->machine->some_ld_name;
9215 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
9220 && GET_CODE (x) == SYMBOL_REF
9221 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
9223 cfun->machine->some_ld_name = XSTR (x, 0);
9230 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
9231 This is called from dwarf2out.c to emit call frame instructions
9232 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
9234 sparc_dwarf_handle_frame_unspec (const char *label,
9235 rtx pattern ATTRIBUTE_UNUSED,
9236 int index ATTRIBUTE_UNUSED)
9238 gcc_assert (index == UNSPECV_SAVEW);
9239 dwarf2out_window_save (label);
9242 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
9243 We need to emit DTP-relative relocations. */
9246 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
9251 fputs ("\t.word\t%r_tls_dtpoff32(", file);
9254 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
9259 output_addr_const (file, x);
9263 /* Do whatever processing is required at the end of a file. */
9266 sparc_file_end (void)
9268 /* If need to emit the special PIC helper function, do so now. */
9269 if (pic_helper_needed)
9271 unsigned int regno = REGNO (pic_offset_table_rtx);
9272 const char *pic_name = reg_names[regno];
9274 #ifdef DWARF2_UNWIND_INFO
9278 get_pc_thunk_name (name, regno);
9279 if (USE_HIDDEN_LINKONCE)
9281 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
9282 get_identifier (name),
9283 build_function_type (void_type_node,
9285 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
9286 NULL_TREE, void_type_node);
9287 TREE_STATIC (decl) = 1;
9288 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
9289 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
9290 DECL_VISIBILITY_SPECIFIED (decl) = 1;
9291 allocate_struct_function (decl, true);
9292 current_function_decl = decl;
9293 init_varasm_status ();
9294 assemble_start_function (decl, name);
9298 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9299 switch_to_section (text_section);
9301 ASM_OUTPUT_ALIGN (asm_out_file, align);
9302 ASM_OUTPUT_LABEL (asm_out_file, name);
9305 #ifdef DWARF2_UNWIND_INFO
9306 do_cfi = dwarf2out_do_cfi_asm ();
9308 fprintf (asm_out_file, "\t.cfi_startproc\n");
9310 if (flag_delayed_branch)
9311 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
9312 pic_name, pic_name);
9314 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
9315 pic_name, pic_name);
9316 #ifdef DWARF2_UNWIND_INFO
9318 fprintf (asm_out_file, "\t.cfi_endproc\n");
9322 if (NEED_INDICATE_EXEC_STACK)
9323 file_end_indicate_exec_stack ();
9326 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9327 /* Implement TARGET_MANGLE_TYPE. */
9330 sparc_mangle_type (const_tree type)
9333 && TYPE_MAIN_VARIANT (type) == long_double_type_node
9334 && TARGET_LONG_DOUBLE_128)
9337 /* For all other types, use normal C++ mangling. */
9342 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
9343 compare and swap on the word containing the byte or half-word. */
9346 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
9348 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
9349 rtx addr = gen_reg_rtx (Pmode);
9350 rtx off = gen_reg_rtx (SImode);
9351 rtx oldv = gen_reg_rtx (SImode);
9352 rtx newv = gen_reg_rtx (SImode);
9353 rtx oldvalue = gen_reg_rtx (SImode);
9354 rtx newvalue = gen_reg_rtx (SImode);
9355 rtx res = gen_reg_rtx (SImode);
9356 rtx resv = gen_reg_rtx (SImode);
9357 rtx memsi, val, mask, end_label, loop_label, cc;
9359 emit_insn (gen_rtx_SET (VOIDmode, addr,
9360 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
9362 if (Pmode != SImode)
9363 addr1 = gen_lowpart (SImode, addr1);
9364 emit_insn (gen_rtx_SET (VOIDmode, off,
9365 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
9367 memsi = gen_rtx_MEM (SImode, addr);
9368 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
9369 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
9371 val = force_reg (SImode, memsi);
9373 emit_insn (gen_rtx_SET (VOIDmode, off,
9374 gen_rtx_XOR (SImode, off,
9375 GEN_INT (GET_MODE (mem) == QImode
9378 emit_insn (gen_rtx_SET (VOIDmode, off,
9379 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
9381 if (GET_MODE (mem) == QImode)
9382 mask = force_reg (SImode, GEN_INT (0xff));
9384 mask = force_reg (SImode, GEN_INT (0xffff));
9386 emit_insn (gen_rtx_SET (VOIDmode, mask,
9387 gen_rtx_ASHIFT (SImode, mask, off)));
9389 emit_insn (gen_rtx_SET (VOIDmode, val,
9390 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9393 oldval = gen_lowpart (SImode, oldval);
9394 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9395 gen_rtx_ASHIFT (SImode, oldval, off)));
9397 newval = gen_lowpart_common (SImode, newval);
9398 emit_insn (gen_rtx_SET (VOIDmode, newv,
9399 gen_rtx_ASHIFT (SImode, newval, off)));
9401 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9402 gen_rtx_AND (SImode, oldv, mask)));
9404 emit_insn (gen_rtx_SET (VOIDmode, newv,
9405 gen_rtx_AND (SImode, newv, mask)));
9407 end_label = gen_label_rtx ();
9408 loop_label = gen_label_rtx ();
9409 emit_label (loop_label);
9411 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
9412 gen_rtx_IOR (SImode, oldv, val)));
9414 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
9415 gen_rtx_IOR (SImode, newv, val)));
9417 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
9419 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
9421 emit_insn (gen_rtx_SET (VOIDmode, resv,
9422 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9425 cc = gen_compare_reg_1 (NE, resv, val);
9426 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
9428 /* Use cbranchcc4 to separate the compare and branch! */
9429 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
9430 cc, const0_rtx, loop_label));
9432 emit_label (end_label);
9434 emit_insn (gen_rtx_SET (VOIDmode, res,
9435 gen_rtx_AND (SImode, res, mask)));
9437 emit_insn (gen_rtx_SET (VOIDmode, res,
9438 gen_rtx_LSHIFTRT (SImode, res, off)));
9440 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
9443 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
9446 sparc_frame_pointer_required (void)
9448 return !(leaf_function_p () && only_leaf_regs_used ());
9451 /* The way this is structured, we can't eliminate SFP in favor of SP
9452 if the frame pointer is required: we want to use the SFP->HFP elimination
9453 in that case. But the test in update_eliminables doesn't know we are
9454 assuming below that we only do the former elimination. */
9457 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
9459 return (to == HARD_FRAME_POINTER_REGNUM
9460 || !targetm.frame_pointer_required ());
9463 #include "gt-sparc.h"