1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
38 #include "insn-attr.h"
49 #include "target-def.h"
50 #include "cfglayout.h"
52 #include "langhooks.h"
58 struct processor_costs cypress_costs = {
59 COSTS_N_INSNS (2), /* int load */
60 COSTS_N_INSNS (2), /* int signed load */
61 COSTS_N_INSNS (2), /* int zeroed load */
62 COSTS_N_INSNS (2), /* float load */
63 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
64 COSTS_N_INSNS (5), /* fadd, fsub */
65 COSTS_N_INSNS (1), /* fcmp */
66 COSTS_N_INSNS (1), /* fmov, fmovr */
67 COSTS_N_INSNS (7), /* fmul */
68 COSTS_N_INSNS (37), /* fdivs */
69 COSTS_N_INSNS (37), /* fdivd */
70 COSTS_N_INSNS (63), /* fsqrts */
71 COSTS_N_INSNS (63), /* fsqrtd */
72 COSTS_N_INSNS (1), /* imul */
73 COSTS_N_INSNS (1), /* imulX */
74 0, /* imul bit factor */
75 COSTS_N_INSNS (1), /* idiv */
76 COSTS_N_INSNS (1), /* idivX */
77 COSTS_N_INSNS (1), /* movcc/movr */
78 0, /* shift penalty */
82 struct processor_costs supersparc_costs = {
83 COSTS_N_INSNS (1), /* int load */
84 COSTS_N_INSNS (1), /* int signed load */
85 COSTS_N_INSNS (1), /* int zeroed load */
86 COSTS_N_INSNS (0), /* float load */
87 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
88 COSTS_N_INSNS (3), /* fadd, fsub */
89 COSTS_N_INSNS (3), /* fcmp */
90 COSTS_N_INSNS (1), /* fmov, fmovr */
91 COSTS_N_INSNS (3), /* fmul */
92 COSTS_N_INSNS (6), /* fdivs */
93 COSTS_N_INSNS (9), /* fdivd */
94 COSTS_N_INSNS (12), /* fsqrts */
95 COSTS_N_INSNS (12), /* fsqrtd */
96 COSTS_N_INSNS (4), /* imul */
97 COSTS_N_INSNS (4), /* imulX */
98 0, /* imul bit factor */
99 COSTS_N_INSNS (4), /* idiv */
100 COSTS_N_INSNS (4), /* idivX */
101 COSTS_N_INSNS (1), /* movcc/movr */
102 1, /* shift penalty */
106 struct processor_costs hypersparc_costs = {
107 COSTS_N_INSNS (1), /* int load */
108 COSTS_N_INSNS (1), /* int signed load */
109 COSTS_N_INSNS (1), /* int zeroed load */
110 COSTS_N_INSNS (1), /* float load */
111 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
112 COSTS_N_INSNS (1), /* fadd, fsub */
113 COSTS_N_INSNS (1), /* fcmp */
114 COSTS_N_INSNS (1), /* fmov, fmovr */
115 COSTS_N_INSNS (1), /* fmul */
116 COSTS_N_INSNS (8), /* fdivs */
117 COSTS_N_INSNS (12), /* fdivd */
118 COSTS_N_INSNS (17), /* fsqrts */
119 COSTS_N_INSNS (17), /* fsqrtd */
120 COSTS_N_INSNS (17), /* imul */
121 COSTS_N_INSNS (17), /* imulX */
122 0, /* imul bit factor */
123 COSTS_N_INSNS (17), /* idiv */
124 COSTS_N_INSNS (17), /* idivX */
125 COSTS_N_INSNS (1), /* movcc/movr */
126 0, /* shift penalty */
130 struct processor_costs sparclet_costs = {
131 COSTS_N_INSNS (3), /* int load */
132 COSTS_N_INSNS (3), /* int signed load */
133 COSTS_N_INSNS (1), /* int zeroed load */
134 COSTS_N_INSNS (1), /* float load */
135 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
136 COSTS_N_INSNS (1), /* fadd, fsub */
137 COSTS_N_INSNS (1), /* fcmp */
138 COSTS_N_INSNS (1), /* fmov, fmovr */
139 COSTS_N_INSNS (1), /* fmul */
140 COSTS_N_INSNS (1), /* fdivs */
141 COSTS_N_INSNS (1), /* fdivd */
142 COSTS_N_INSNS (1), /* fsqrts */
143 COSTS_N_INSNS (1), /* fsqrtd */
144 COSTS_N_INSNS (5), /* imul */
145 COSTS_N_INSNS (5), /* imulX */
146 0, /* imul bit factor */
147 COSTS_N_INSNS (5), /* idiv */
148 COSTS_N_INSNS (5), /* idivX */
149 COSTS_N_INSNS (1), /* movcc/movr */
150 0, /* shift penalty */
154 struct processor_costs ultrasparc_costs = {
155 COSTS_N_INSNS (2), /* int load */
156 COSTS_N_INSNS (3), /* int signed load */
157 COSTS_N_INSNS (2), /* int zeroed load */
158 COSTS_N_INSNS (2), /* float load */
159 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
160 COSTS_N_INSNS (4), /* fadd, fsub */
161 COSTS_N_INSNS (1), /* fcmp */
162 COSTS_N_INSNS (2), /* fmov, fmovr */
163 COSTS_N_INSNS (4), /* fmul */
164 COSTS_N_INSNS (13), /* fdivs */
165 COSTS_N_INSNS (23), /* fdivd */
166 COSTS_N_INSNS (13), /* fsqrts */
167 COSTS_N_INSNS (23), /* fsqrtd */
168 COSTS_N_INSNS (4), /* imul */
169 COSTS_N_INSNS (4), /* imulX */
170 2, /* imul bit factor */
171 COSTS_N_INSNS (37), /* idiv */
172 COSTS_N_INSNS (68), /* idivX */
173 COSTS_N_INSNS (2), /* movcc/movr */
174 2, /* shift penalty */
178 struct processor_costs ultrasparc3_costs = {
179 COSTS_N_INSNS (2), /* int load */
180 COSTS_N_INSNS (3), /* int signed load */
181 COSTS_N_INSNS (3), /* int zeroed load */
182 COSTS_N_INSNS (2), /* float load */
183 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
184 COSTS_N_INSNS (4), /* fadd, fsub */
185 COSTS_N_INSNS (5), /* fcmp */
186 COSTS_N_INSNS (3), /* fmov, fmovr */
187 COSTS_N_INSNS (4), /* fmul */
188 COSTS_N_INSNS (17), /* fdivs */
189 COSTS_N_INSNS (20), /* fdivd */
190 COSTS_N_INSNS (20), /* fsqrts */
191 COSTS_N_INSNS (29), /* fsqrtd */
192 COSTS_N_INSNS (6), /* imul */
193 COSTS_N_INSNS (6), /* imulX */
194 0, /* imul bit factor */
195 COSTS_N_INSNS (40), /* idiv */
196 COSTS_N_INSNS (71), /* idivX */
197 COSTS_N_INSNS (2), /* movcc/movr */
198 0, /* shift penalty */
202 struct processor_costs niagara_costs = {
203 COSTS_N_INSNS (3), /* int load */
204 COSTS_N_INSNS (3), /* int signed load */
205 COSTS_N_INSNS (3), /* int zeroed load */
206 COSTS_N_INSNS (9), /* float load */
207 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
208 COSTS_N_INSNS (8), /* fadd, fsub */
209 COSTS_N_INSNS (26), /* fcmp */
210 COSTS_N_INSNS (8), /* fmov, fmovr */
211 COSTS_N_INSNS (29), /* fmul */
212 COSTS_N_INSNS (54), /* fdivs */
213 COSTS_N_INSNS (83), /* fdivd */
214 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
215 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
216 COSTS_N_INSNS (11), /* imul */
217 COSTS_N_INSNS (11), /* imulX */
218 0, /* imul bit factor */
219 COSTS_N_INSNS (72), /* idiv */
220 COSTS_N_INSNS (72), /* idivX */
221 COSTS_N_INSNS (1), /* movcc/movr */
222 0, /* shift penalty */
226 struct processor_costs niagara2_costs = {
227 COSTS_N_INSNS (3), /* int load */
228 COSTS_N_INSNS (3), /* int signed load */
229 COSTS_N_INSNS (3), /* int zeroed load */
230 COSTS_N_INSNS (3), /* float load */
231 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
232 COSTS_N_INSNS (6), /* fadd, fsub */
233 COSTS_N_INSNS (6), /* fcmp */
234 COSTS_N_INSNS (6), /* fmov, fmovr */
235 COSTS_N_INSNS (6), /* fmul */
236 COSTS_N_INSNS (19), /* fdivs */
237 COSTS_N_INSNS (33), /* fdivd */
238 COSTS_N_INSNS (19), /* fsqrts */
239 COSTS_N_INSNS (33), /* fsqrtd */
240 COSTS_N_INSNS (5), /* imul */
241 COSTS_N_INSNS (5), /* imulX */
242 0, /* imul bit factor */
243 COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
244 COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
245 COSTS_N_INSNS (1), /* movcc/movr */
246 0, /* shift penalty */
249 const struct processor_costs *sparc_costs = &cypress_costs;
251 #ifdef HAVE_AS_RELAX_OPTION
252 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
253 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
254 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
255 somebody does not branch between the sethi and jmp. */
256 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
258 #define LEAF_SIBCALL_SLOT_RESERVED_P \
259 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
262 /* Global variables for machine-dependent things. */
264 /* Size of frame. Need to know this to emit return insns from leaf procedures.
265 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
266 reload pass. This is important as the value is later used for scheduling
267 (to see what can go in a delay slot).
268 APPARENT_FSIZE is the size of the stack less the register save area and less
269 the outgoing argument area. It is used when saving call preserved regs. */
270 static HOST_WIDE_INT apparent_fsize;
271 static HOST_WIDE_INT actual_fsize;
273 /* Number of live general or floating point registers needed to be
274 saved (as 4-byte quantities). */
275 static int num_gfregs;
277 /* The alias set for prologue/epilogue register save/restore. */
278 static GTY(()) alias_set_type sparc_sr_alias_set;
280 /* The alias set for the structure return value. */
281 static GTY(()) alias_set_type struct_value_alias_set;
283 /* Vector to say how input registers are mapped to output registers.
284 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
285 eliminate it. You must use -fomit-frame-pointer to get that. */
286 char leaf_reg_remap[] =
287 { 0, 1, 2, 3, 4, 5, 6, 7,
288 -1, -1, -1, -1, -1, -1, 14, -1,
289 -1, -1, -1, -1, -1, -1, -1, -1,
290 8, 9, 10, 11, 12, 13, -1, 15,
292 32, 33, 34, 35, 36, 37, 38, 39,
293 40, 41, 42, 43, 44, 45, 46, 47,
294 48, 49, 50, 51, 52, 53, 54, 55,
295 56, 57, 58, 59, 60, 61, 62, 63,
296 64, 65, 66, 67, 68, 69, 70, 71,
297 72, 73, 74, 75, 76, 77, 78, 79,
298 80, 81, 82, 83, 84, 85, 86, 87,
299 88, 89, 90, 91, 92, 93, 94, 95,
300 96, 97, 98, 99, 100};
302 /* Vector, indexed by hard register number, which contains 1
303 for a register that is allowable in a candidate for leaf
304 function treatment. */
305 char sparc_leaf_regs[] =
306 { 1, 1, 1, 1, 1, 1, 1, 1,
307 0, 0, 0, 0, 0, 0, 1, 0,
308 0, 0, 0, 0, 0, 0, 0, 0,
309 1, 1, 1, 1, 1, 1, 0, 1,
310 1, 1, 1, 1, 1, 1, 1, 1,
311 1, 1, 1, 1, 1, 1, 1, 1,
312 1, 1, 1, 1, 1, 1, 1, 1,
313 1, 1, 1, 1, 1, 1, 1, 1,
314 1, 1, 1, 1, 1, 1, 1, 1,
315 1, 1, 1, 1, 1, 1, 1, 1,
316 1, 1, 1, 1, 1, 1, 1, 1,
317 1, 1, 1, 1, 1, 1, 1, 1,
320 struct GTY(()) machine_function
322 /* Some local-dynamic TLS symbol name. */
323 const char *some_ld_name;
325 /* True if the current function is leaf and uses only leaf regs,
326 so that the SPARC leaf function optimization can be applied.
327 Private version of current_function_uses_only_leaf_regs, see
328 sparc_expand_prologue for the rationale. */
331 /* True if the data calculated by sparc_expand_prologue are valid. */
332 bool prologue_data_valid_p;
335 #define sparc_leaf_function_p cfun->machine->leaf_function_p
336 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
338 /* Register we pretend to think the frame pointer is allocated to.
339 Normally, this is %fp, but if we are in a leaf procedure, this
340 is %sp+"something". We record "something" separately as it may
341 be too big for reg+constant addressing. */
342 static rtx frame_base_reg;
343 static HOST_WIDE_INT frame_base_offset;
345 /* 1 if the next opcode is to be specially indented. */
346 int sparc_indent_opcode = 0;
348 static bool sparc_handle_option (size_t, const char *, int);
349 static void sparc_init_modes (void);
350 static void scan_record_type (tree, int *, int *, int *);
351 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
352 tree, int, int, int *, int *);
354 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
355 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
357 static void sparc_output_addr_vec (rtx);
358 static void sparc_output_addr_diff_vec (rtx);
359 static void sparc_output_deferred_case_vectors (void);
360 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
361 static rtx sparc_builtin_saveregs (void);
362 static int epilogue_renumber (rtx *, int);
363 static bool sparc_assemble_integer (rtx, unsigned int, int);
364 static int set_extends (rtx);
365 static void emit_pic_helper (void);
366 static void load_pic_register (bool);
367 static int save_or_restore_regs (int, int, rtx, int, int);
368 static void emit_save_or_restore_regs (int);
369 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
370 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
371 #ifdef OBJECT_FORMAT_ELF
372 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
375 static int sparc_adjust_cost (rtx, rtx, rtx, int);
376 static int sparc_issue_rate (void);
377 static void sparc_sched_init (FILE *, int, int);
378 static int sparc_use_sched_lookahead (void);
380 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
381 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
382 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
383 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
384 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
386 static bool sparc_function_ok_for_sibcall (tree, tree);
387 static void sparc_init_libfuncs (void);
388 static void sparc_init_builtins (void);
389 static void sparc_vis_init_builtins (void);
390 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
391 static tree sparc_fold_builtin (tree, tree, bool);
392 static int sparc_vis_mul8x16 (int, int);
393 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
394 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
395 HOST_WIDE_INT, tree);
396 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
397 HOST_WIDE_INT, const_tree);
398 static struct machine_function * sparc_init_machine_status (void);
399 static bool sparc_cannot_force_const_mem (rtx);
400 static rtx sparc_tls_get_addr (void);
401 static rtx sparc_tls_got (void);
402 static const char *get_some_local_dynamic_name (void);
403 static int get_some_local_dynamic_name_1 (rtx *, void *);
404 static bool sparc_rtx_costs (rtx, int, int, int *, bool);
405 static bool sparc_promote_prototypes (const_tree);
406 static rtx sparc_struct_value_rtx (tree, int);
407 static bool sparc_return_in_memory (const_tree, const_tree);
408 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
409 static void sparc_va_start (tree, rtx);
410 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
411 static bool sparc_vector_mode_supported_p (enum machine_mode);
412 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
413 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
414 enum machine_mode, const_tree, bool);
415 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
416 enum machine_mode, tree, bool);
417 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
418 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
419 static void sparc_file_end (void);
420 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
421 static const char *sparc_mangle_type (const_tree);
423 #ifdef SUBTARGET_ATTRIBUTE_TABLE
424 const struct attribute_spec sparc_attribute_table[];
427 /* Option handling. */
430 enum cmodel sparc_cmodel;
432 char sparc_hard_reg_printed[8];
434 struct sparc_cpu_select sparc_select[] =
436 /* switch name, tune arch */
437 { (char *)0, "default", 1, 1 },
438 { (char *)0, "-mcpu=", 1, 1 },
439 { (char *)0, "-mtune=", 1, 0 },
443 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
444 enum processor_type sparc_cpu;
446 /* Whether
\fan FPU option was specified. */
447 static bool fpu_option_set = false;
449 /* Initialize the GCC target structure. */
451 /* The sparc default is to use .half rather than .short for aligned
452 HI objects. Use .word instead of .long on non-ELF systems. */
453 #undef TARGET_ASM_ALIGNED_HI_OP
454 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
455 #ifndef OBJECT_FORMAT_ELF
456 #undef TARGET_ASM_ALIGNED_SI_OP
457 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
460 #undef TARGET_ASM_UNALIGNED_HI_OP
461 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
462 #undef TARGET_ASM_UNALIGNED_SI_OP
463 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
464 #undef TARGET_ASM_UNALIGNED_DI_OP
465 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
467 /* The target hook has to handle DI-mode values. */
468 #undef TARGET_ASM_INTEGER
469 #define TARGET_ASM_INTEGER sparc_assemble_integer
471 #undef TARGET_ASM_FUNCTION_PROLOGUE
472 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
473 #undef TARGET_ASM_FUNCTION_EPILOGUE
474 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
476 #undef TARGET_SCHED_ADJUST_COST
477 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
478 #undef TARGET_SCHED_ISSUE_RATE
479 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
480 #undef TARGET_SCHED_INIT
481 #define TARGET_SCHED_INIT sparc_sched_init
482 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
483 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
485 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
486 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
488 #undef TARGET_INIT_LIBFUNCS
489 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
490 #undef TARGET_INIT_BUILTINS
491 #define TARGET_INIT_BUILTINS sparc_init_builtins
493 #undef TARGET_LEGITIMIZE_ADDRESS
494 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
496 #undef TARGET_EXPAND_BUILTIN
497 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
498 #undef TARGET_FOLD_BUILTIN
499 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
502 #undef TARGET_HAVE_TLS
503 #define TARGET_HAVE_TLS true
506 #undef TARGET_CANNOT_FORCE_CONST_MEM
507 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
509 #undef TARGET_ASM_OUTPUT_MI_THUNK
510 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
511 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
512 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
514 #undef TARGET_RTX_COSTS
515 #define TARGET_RTX_COSTS sparc_rtx_costs
516 #undef TARGET_ADDRESS_COST
517 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
519 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
520 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
521 test for this value. */
522 #undef TARGET_PROMOTE_FUNCTION_ARGS
523 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
525 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
526 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
527 test for this value. */
528 #undef TARGET_PROMOTE_FUNCTION_RETURN
529 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
531 #undef TARGET_PROMOTE_PROTOTYPES
532 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
534 #undef TARGET_STRUCT_VALUE_RTX
535 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
536 #undef TARGET_RETURN_IN_MEMORY
537 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
538 #undef TARGET_MUST_PASS_IN_STACK
539 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
540 #undef TARGET_PASS_BY_REFERENCE
541 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
542 #undef TARGET_ARG_PARTIAL_BYTES
543 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
545 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
546 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
547 #undef TARGET_STRICT_ARGUMENT_NAMING
548 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
550 #undef TARGET_EXPAND_BUILTIN_VA_START
551 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
552 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
553 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
555 #undef TARGET_VECTOR_MODE_SUPPORTED_P
556 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
558 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
559 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
561 #ifdef SUBTARGET_INSERT_ATTRIBUTES
562 #undef TARGET_INSERT_ATTRIBUTES
563 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
566 #ifdef SUBTARGET_ATTRIBUTE_TABLE
567 #undef TARGET_ATTRIBUTE_TABLE
568 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
571 #undef TARGET_RELAXED_ORDERING
572 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
574 #undef TARGET_DEFAULT_TARGET_FLAGS
575 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
576 #undef TARGET_HANDLE_OPTION
577 #define TARGET_HANDLE_OPTION sparc_handle_option
579 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
580 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
581 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
584 #undef TARGET_ASM_FILE_END
585 #define TARGET_ASM_FILE_END sparc_file_end
587 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
588 #undef TARGET_MANGLE_TYPE
589 #define TARGET_MANGLE_TYPE sparc_mangle_type
592 #undef TARGET_LEGITIMATE_ADDRESS_P
593 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
595 struct gcc_target targetm = TARGET_INITIALIZER;
597 /* Implement TARGET_HANDLE_OPTION. */
600 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
605 case OPT_mhard_float:
606 case OPT_msoft_float:
607 fpu_option_set = true;
611 sparc_select[1].string = arg;
615 sparc_select[2].string = arg;
622 /* Validate and override various options, and do some machine dependent
626 sparc_override_options (void)
628 static struct code_model {
629 const char *const name;
630 const enum cmodel value;
631 } const cmodels[] = {
633 { "medlow", CM_MEDLOW },
634 { "medmid", CM_MEDMID },
635 { "medany", CM_MEDANY },
636 { "embmedany", CM_EMBMEDANY },
637 { NULL, (enum cmodel) 0 }
639 const struct code_model *cmodel;
640 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
641 static struct cpu_default {
643 const char *const name;
644 } const cpu_default[] = {
645 /* There must be one entry here for each TARGET_CPU value. */
646 { TARGET_CPU_sparc, "cypress" },
647 { TARGET_CPU_sparclet, "tsc701" },
648 { TARGET_CPU_sparclite, "f930" },
649 { TARGET_CPU_v8, "v8" },
650 { TARGET_CPU_hypersparc, "hypersparc" },
651 { TARGET_CPU_sparclite86x, "sparclite86x" },
652 { TARGET_CPU_supersparc, "supersparc" },
653 { TARGET_CPU_v9, "v9" },
654 { TARGET_CPU_ultrasparc, "ultrasparc" },
655 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
656 { TARGET_CPU_niagara, "niagara" },
657 { TARGET_CPU_niagara2, "niagara2" },
660 const struct cpu_default *def;
661 /* Table of values for -m{cpu,tune}=. */
662 static struct cpu_table {
663 const char *const name;
664 const enum processor_type processor;
667 } const cpu_table[] = {
668 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
669 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
670 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
671 /* TI TMS390Z55 supersparc */
672 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
673 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
674 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
675 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
676 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
677 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
678 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
679 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
681 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
683 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
684 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
685 /* TI ultrasparc I, II, IIi */
686 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
687 /* Although insns using %y are deprecated, it is a clear win on current
689 |MASK_DEPRECATED_V8_INSNS},
690 /* TI ultrasparc III */
691 /* ??? Check if %y issue still holds true in ultra3. */
692 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
694 { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
695 { "niagara2", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9},
696 { 0, (enum processor_type) 0, 0, 0 }
698 const struct cpu_table *cpu;
699 const struct sparc_cpu_select *sel;
702 #ifndef SPARC_BI_ARCH
703 /* Check for unsupported architecture size. */
704 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
705 error ("%s is not supported by this configuration",
706 DEFAULT_ARCH32_P ? "-m64" : "-m32");
709 /* We force all 64bit archs to use 128 bit long double */
710 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
712 error ("-mlong-double-64 not allowed with -m64");
713 target_flags |= MASK_LONG_DOUBLE_128;
716 /* Code model selection. */
717 sparc_cmodel = SPARC_DEFAULT_CMODEL;
721 sparc_cmodel = CM_32;
724 if (sparc_cmodel_string != NULL)
728 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
729 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
731 if (cmodel->name == NULL)
732 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
734 sparc_cmodel = cmodel->value;
737 error ("-mcmodel= is not supported on 32 bit systems");
740 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
742 /* Set the default CPU. */
743 for (def = &cpu_default[0]; def->name; ++def)
744 if (def->cpu == TARGET_CPU_DEFAULT)
746 gcc_assert (def->name);
747 sparc_select[0].string = def->name;
749 for (sel = &sparc_select[0]; sel->name; ++sel)
753 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
754 if (! strcmp (sel->string, cpu->name))
757 sparc_cpu = cpu->processor;
761 target_flags &= ~cpu->disable;
762 target_flags |= cpu->enable;
768 error ("bad value (%s) for %s switch", sel->string, sel->name);
772 /* If -mfpu or -mno-fpu was explicitly used, don't override with
773 the processor default. */
775 target_flags = (target_flags & ~MASK_FPU) | fpu;
777 /* Don't allow -mvis if FPU is disabled. */
779 target_flags &= ~MASK_VIS;
781 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
783 -m64 also implies v9. */
784 if (TARGET_VIS || TARGET_ARCH64)
786 target_flags |= MASK_V9;
787 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
790 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
791 if (TARGET_V9 && TARGET_ARCH32)
792 target_flags |= MASK_DEPRECATED_V8_INSNS;
794 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
795 if (! TARGET_V9 || TARGET_ARCH64)
796 target_flags &= ~MASK_V8PLUS;
798 /* Don't use stack biasing in 32 bit mode. */
800 target_flags &= ~MASK_STACK_BIAS;
802 /* Supply a default value for align_functions. */
803 if (align_functions == 0
804 && (sparc_cpu == PROCESSOR_ULTRASPARC
805 || sparc_cpu == PROCESSOR_ULTRASPARC3
806 || sparc_cpu == PROCESSOR_NIAGARA
807 || sparc_cpu == PROCESSOR_NIAGARA2))
808 align_functions = 32;
810 /* Validate PCC_STRUCT_RETURN. */
811 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
812 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
814 /* Only use .uaxword when compiling for a 64-bit target. */
816 targetm.asm_out.unaligned_op.di = NULL;
818 /* Do various machine dependent initializations. */
821 /* Acquire unique alias sets for our private stuff. */
822 sparc_sr_alias_set = new_alias_set ();
823 struct_value_alias_set = new_alias_set ();
825 /* Set up function hooks. */
826 init_machine_status = sparc_init_machine_status;
831 case PROCESSOR_CYPRESS:
832 sparc_costs = &cypress_costs;
835 case PROCESSOR_SPARCLITE:
836 case PROCESSOR_SUPERSPARC:
837 sparc_costs = &supersparc_costs;
841 case PROCESSOR_HYPERSPARC:
842 case PROCESSOR_SPARCLITE86X:
843 sparc_costs = &hypersparc_costs;
845 case PROCESSOR_SPARCLET:
846 case PROCESSOR_TSC701:
847 sparc_costs = &sparclet_costs;
850 case PROCESSOR_ULTRASPARC:
851 sparc_costs = &ultrasparc_costs;
853 case PROCESSOR_ULTRASPARC3:
854 sparc_costs = &ultrasparc3_costs;
856 case PROCESSOR_NIAGARA:
857 sparc_costs = &niagara_costs;
859 case PROCESSOR_NIAGARA2:
860 sparc_costs = &niagara2_costs;
864 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
865 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
866 target_flags |= MASK_LONG_DOUBLE_128;
869 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
870 set_param_value ("simultaneous-prefetches",
871 ((sparc_cpu == PROCESSOR_ULTRASPARC
872 || sparc_cpu == PROCESSOR_NIAGARA
873 || sparc_cpu == PROCESSOR_NIAGARA2)
875 : (sparc_cpu == PROCESSOR_ULTRASPARC3
877 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
878 set_param_value ("l1-cache-line-size",
879 ((sparc_cpu == PROCESSOR_ULTRASPARC
880 || sparc_cpu == PROCESSOR_ULTRASPARC3
881 || sparc_cpu == PROCESSOR_NIAGARA
882 || sparc_cpu == PROCESSOR_NIAGARA2)
886 #ifdef SUBTARGET_ATTRIBUTE_TABLE
887 /* Table of valid machine attributes. */
888 const struct attribute_spec sparc_attribute_table[] =
890 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
891 SUBTARGET_ATTRIBUTE_TABLE,
892 { NULL, 0, 0, false, false, false, NULL }
896 /* Miscellaneous utilities. */
898 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
899 or branch on register contents instructions. */
902 v9_regcmp_p (enum rtx_code code)
904 return (code == EQ || code == NE || code == GE || code == LT
905 || code == LE || code == GT);
908 /* Nonzero if OP is a floating point constant which can
909 be loaded into an integer register using a single
910 sethi instruction. */
915 if (GET_CODE (op) == CONST_DOUBLE)
920 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
921 REAL_VALUE_TO_TARGET_SINGLE (r, i);
922 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
928 /* Nonzero if OP is a floating point constant which can
929 be loaded into an integer register using a single
935 if (GET_CODE (op) == CONST_DOUBLE)
940 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
941 REAL_VALUE_TO_TARGET_SINGLE (r, i);
942 return SPARC_SIMM13_P (i);
948 /* Nonzero if OP is a floating point constant which can
949 be loaded into an integer register using a high/losum
950 instruction sequence. */
953 fp_high_losum_p (rtx op)
955 /* The constraints calling this should only be in
956 SFmode move insns, so any constant which cannot
957 be moved using a single insn will do. */
958 if (GET_CODE (op) == CONST_DOUBLE)
963 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
964 REAL_VALUE_TO_TARGET_SINGLE (r, i);
965 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
971 /* Expand a move instruction. Return true if all work is done. */
974 sparc_expand_move (enum machine_mode mode, rtx *operands)
976 /* Handle sets of MEM first. */
977 if (GET_CODE (operands[0]) == MEM)
979 /* 0 is a register (or a pair of registers) on SPARC. */
980 if (register_or_zero_operand (operands[1], mode))
983 if (!reload_in_progress)
985 operands[0] = validize_mem (operands[0]);
986 operands[1] = force_reg (mode, operands[1]);
990 /* Fixup TLS cases. */
992 && CONSTANT_P (operands[1])
993 && GET_CODE (operands[1]) != HIGH
994 && sparc_tls_referenced_p (operands [1]))
996 rtx sym = operands[1];
999 if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == PLUS)
1001 addend = XEXP (XEXP (sym, 0), 1);
1002 sym = XEXP (XEXP (sym, 0), 0);
1005 gcc_assert (SPARC_SYMBOL_REF_TLS_P (sym));
1007 sym = legitimize_tls_address (sym);
1010 sym = gen_rtx_PLUS (mode, sym, addend);
1011 sym = force_operand (sym, operands[0]);
1016 /* Fixup PIC cases. */
1017 if (flag_pic && CONSTANT_P (operands[1]))
1019 if (pic_address_needs_scratch (operands[1]))
1020 operands[1] = legitimize_pic_address (operands[1], mode, 0);
1022 /* VxWorks does not impose a fixed gap between segments; the run-time
1023 gap can be different from the object-file gap. We therefore can't
1024 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1025 are absolutely sure that X is in the same segment as the GOT.
1026 Unfortunately, the flexibility of linker scripts means that we
1027 can't be sure of that in general, so assume that _G_O_T_-relative
1028 accesses are never valid on VxWorks. */
1029 if (GET_CODE (operands[1]) == LABEL_REF && !TARGET_VXWORKS_RTP)
1033 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1039 gcc_assert (TARGET_ARCH64);
1040 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1045 if (symbolic_operand (operands[1], mode))
1047 operands[1] = legitimize_pic_address (operands[1],
1049 (reload_in_progress ?
1056 /* If we are trying to toss an integer constant into FP registers,
1057 or loading a FP or vector constant, force it into memory. */
1058 if (CONSTANT_P (operands[1])
1059 && REG_P (operands[0])
1060 && (SPARC_FP_REG_P (REGNO (operands[0]))
1061 || SCALAR_FLOAT_MODE_P (mode)
1062 || VECTOR_MODE_P (mode)))
1064 /* emit_group_store will send such bogosity to us when it is
1065 not storing directly into memory. So fix this up to avoid
1066 crashes in output_constant_pool. */
1067 if (operands [1] == const0_rtx)
1068 operands[1] = CONST0_RTX (mode);
1070 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1071 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1072 && const_zero_operand (operands[1], mode))
1075 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1076 /* We are able to build any SF constant in integer registers
1077 with at most 2 instructions. */
1079 /* And any DF constant in integer registers. */
1081 && (reload_completed || reload_in_progress))))
1084 operands[1] = force_const_mem (mode, operands[1]);
1085 if (!reload_in_progress)
1086 operands[1] = validize_mem (operands[1]);
1090 /* Accept non-constants and valid constants unmodified. */
1091 if (!CONSTANT_P (operands[1])
1092 || GET_CODE (operands[1]) == HIGH
1093 || input_operand (operands[1], mode))
1099 /* All QImode constants require only one insn, so proceed. */
1104 sparc_emit_set_const32 (operands[0], operands[1]);
1108 /* input_operand should have filtered out 32-bit mode. */
1109 sparc_emit_set_const64 (operands[0], operands[1]);
1119 /* Load OP1, a 32-bit constant, into OP0, a register.
1120 We know it can't be done in one insn when we get
1121 here, the move expander guarantees this. */
1124 sparc_emit_set_const32 (rtx op0, rtx op1)
1126 enum machine_mode mode = GET_MODE (op0);
1129 if (reload_in_progress || reload_completed)
1132 temp = gen_reg_rtx (mode);
1134 if (GET_CODE (op1) == CONST_INT)
1136 gcc_assert (!small_int_operand (op1, mode)
1137 && !const_high_operand (op1, mode));
1139 /* Emit them as real moves instead of a HIGH/LO_SUM,
1140 this way CSE can see everything and reuse intermediate
1141 values if it wants. */
1142 emit_insn (gen_rtx_SET (VOIDmode, temp,
1143 GEN_INT (INTVAL (op1)
1144 & ~(HOST_WIDE_INT)0x3ff)));
1146 emit_insn (gen_rtx_SET (VOIDmode,
1148 gen_rtx_IOR (mode, temp,
1149 GEN_INT (INTVAL (op1) & 0x3ff))));
1153 /* A symbol, emit in the traditional way. */
1154 emit_insn (gen_rtx_SET (VOIDmode, temp,
1155 gen_rtx_HIGH (mode, op1)));
1156 emit_insn (gen_rtx_SET (VOIDmode,
1157 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1161 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1162 If TEMP is nonzero, we are forbidden to use any other scratch
1163 registers. Otherwise, we are allowed to generate them as needed.
1165 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1166 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1169 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1171 rtx temp1, temp2, temp3, temp4, temp5;
1174 if (temp && GET_MODE (temp) == TImode)
1177 temp = gen_rtx_REG (DImode, REGNO (temp));
1180 /* SPARC-V9 code-model support. */
1181 switch (sparc_cmodel)
1184 /* The range spanned by all instructions in the object is less
1185 than 2^31 bytes (2GB) and the distance from any instruction
1186 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1187 than 2^31 bytes (2GB).
1189 The executable must be in the low 4TB of the virtual address
1192 sethi %hi(symbol), %temp1
1193 or %temp1, %lo(symbol), %reg */
1195 temp1 = temp; /* op0 is allowed. */
1197 temp1 = gen_reg_rtx (DImode);
1199 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1200 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1204 /* The range spanned by all instructions in the object is less
1205 than 2^31 bytes (2GB) and the distance from any instruction
1206 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1207 than 2^31 bytes (2GB).
1209 The executable must be in the low 16TB of the virtual address
1212 sethi %h44(symbol), %temp1
1213 or %temp1, %m44(symbol), %temp2
1214 sllx %temp2, 12, %temp3
1215 or %temp3, %l44(symbol), %reg */
1220 temp3 = temp; /* op0 is allowed. */
1224 temp1 = gen_reg_rtx (DImode);
1225 temp2 = gen_reg_rtx (DImode);
1226 temp3 = gen_reg_rtx (DImode);
1229 emit_insn (gen_seth44 (temp1, op1));
1230 emit_insn (gen_setm44 (temp2, temp1, op1));
1231 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1232 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1233 emit_insn (gen_setl44 (op0, temp3, op1));
1237 /* The range spanned by all instructions in the object is less
1238 than 2^31 bytes (2GB) and the distance from any instruction
1239 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1240 than 2^31 bytes (2GB).
1242 The executable can be placed anywhere in the virtual address
1245 sethi %hh(symbol), %temp1
1246 sethi %lm(symbol), %temp2
1247 or %temp1, %hm(symbol), %temp3
1248 sllx %temp3, 32, %temp4
1249 or %temp4, %temp2, %temp5
1250 or %temp5, %lo(symbol), %reg */
1253 /* It is possible that one of the registers we got for operands[2]
1254 might coincide with that of operands[0] (which is why we made
1255 it TImode). Pick the other one to use as our scratch. */
1256 if (rtx_equal_p (temp, op0))
1258 gcc_assert (ti_temp);
1259 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1262 temp2 = temp; /* op0 is _not_ allowed, see above. */
1269 temp1 = gen_reg_rtx (DImode);
1270 temp2 = gen_reg_rtx (DImode);
1271 temp3 = gen_reg_rtx (DImode);
1272 temp4 = gen_reg_rtx (DImode);
1273 temp5 = gen_reg_rtx (DImode);
1276 emit_insn (gen_sethh (temp1, op1));
1277 emit_insn (gen_setlm (temp2, op1));
1278 emit_insn (gen_sethm (temp3, temp1, op1));
1279 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1280 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1281 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1282 gen_rtx_PLUS (DImode, temp4, temp2)));
1283 emit_insn (gen_setlo (op0, temp5, op1));
1287 /* Old old old backwards compatibility kruft here.
1288 Essentially it is MEDLOW with a fixed 64-bit
1289 virtual base added to all data segment addresses.
1290 Text-segment stuff is computed like MEDANY, we can't
1291 reuse the code above because the relocation knobs
1294 Data segment: sethi %hi(symbol), %temp1
1295 add %temp1, EMBMEDANY_BASE_REG, %temp2
1296 or %temp2, %lo(symbol), %reg */
1297 if (data_segment_operand (op1, GET_MODE (op1)))
1301 temp1 = temp; /* op0 is allowed. */
1306 temp1 = gen_reg_rtx (DImode);
1307 temp2 = gen_reg_rtx (DImode);
1310 emit_insn (gen_embmedany_sethi (temp1, op1));
1311 emit_insn (gen_embmedany_brsum (temp2, temp1));
1312 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1315 /* Text segment: sethi %uhi(symbol), %temp1
1316 sethi %hi(symbol), %temp2
1317 or %temp1, %ulo(symbol), %temp3
1318 sllx %temp3, 32, %temp4
1319 or %temp4, %temp2, %temp5
1320 or %temp5, %lo(symbol), %reg */
1325 /* It is possible that one of the registers we got for operands[2]
1326 might coincide with that of operands[0] (which is why we made
1327 it TImode). Pick the other one to use as our scratch. */
1328 if (rtx_equal_p (temp, op0))
1330 gcc_assert (ti_temp);
1331 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1334 temp2 = temp; /* op0 is _not_ allowed, see above. */
1341 temp1 = gen_reg_rtx (DImode);
1342 temp2 = gen_reg_rtx (DImode);
1343 temp3 = gen_reg_rtx (DImode);
1344 temp4 = gen_reg_rtx (DImode);
1345 temp5 = gen_reg_rtx (DImode);
1348 emit_insn (gen_embmedany_textuhi (temp1, op1));
1349 emit_insn (gen_embmedany_texthi (temp2, op1));
1350 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1351 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1352 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1353 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1354 gen_rtx_PLUS (DImode, temp4, temp2)));
1355 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1364 #if HOST_BITS_PER_WIDE_INT == 32
1366 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1371 /* These avoid problems when cross compiling. If we do not
1372 go through all this hair then the optimizer will see
1373 invalid REG_EQUAL notes or in some cases none at all. */
1374 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1375 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1376 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1377 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1379 /* The optimizer is not to assume anything about exactly
1380 which bits are set for a HIGH, they are unspecified.
1381 Unfortunately this leads to many missed optimizations
1382 during CSE. We mask out the non-HIGH bits, and matches
1383 a plain movdi, to alleviate this problem. */
1385 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1387 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1391 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1393 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1397 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1399 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1403 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1405 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1408 /* Worker routines for 64-bit constant formation on arch64.
1409 One of the key things to be doing in these emissions is
1410 to create as many temp REGs as possible. This makes it
1411 possible for half-built constants to be used later when
1412 such values are similar to something required later on.
1413 Without doing this, the optimizer cannot see such
1416 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1417 unsigned HOST_WIDE_INT, int);
1420 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1421 unsigned HOST_WIDE_INT low_bits, int is_neg)
1423 unsigned HOST_WIDE_INT high_bits;
1426 high_bits = (~low_bits) & 0xffffffff;
1428 high_bits = low_bits;
1430 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1433 emit_insn (gen_rtx_SET (VOIDmode, op0,
1434 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1438 /* If we are XOR'ing with -1, then we should emit a one's complement
1439 instead. This way the combiner will notice logical operations
1440 such as ANDN later on and substitute. */
1441 if ((low_bits & 0x3ff) == 0x3ff)
1443 emit_insn (gen_rtx_SET (VOIDmode, op0,
1444 gen_rtx_NOT (DImode, temp)));
1448 emit_insn (gen_rtx_SET (VOIDmode, op0,
1449 gen_safe_XOR64 (temp,
1450 (-(HOST_WIDE_INT)0x400
1451 | (low_bits & 0x3ff)))));
1456 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1457 unsigned HOST_WIDE_INT, int);
1460 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1461 unsigned HOST_WIDE_INT high_bits,
1462 unsigned HOST_WIDE_INT low_immediate,
1467 if ((high_bits & 0xfffffc00) != 0)
1469 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1470 if ((high_bits & ~0xfffffc00) != 0)
1471 emit_insn (gen_rtx_SET (VOIDmode, op0,
1472 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1478 emit_insn (gen_safe_SET64 (temp, high_bits));
1482 /* Now shift it up into place. */
1483 emit_insn (gen_rtx_SET (VOIDmode, op0,
1484 gen_rtx_ASHIFT (DImode, temp2,
1485 GEN_INT (shift_count))));
1487 /* If there is a low immediate part piece, finish up by
1488 putting that in as well. */
1489 if (low_immediate != 0)
1490 emit_insn (gen_rtx_SET (VOIDmode, op0,
1491 gen_safe_OR64 (op0, low_immediate)));
1494 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1495 unsigned HOST_WIDE_INT);
1497 /* Full 64-bit constant decomposition. Even though this is the
1498 'worst' case, we still optimize a few things away. */
1500 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1501 unsigned HOST_WIDE_INT high_bits,
1502 unsigned HOST_WIDE_INT low_bits)
1506 if (reload_in_progress || reload_completed)
1509 sub_temp = gen_reg_rtx (DImode);
1511 if ((high_bits & 0xfffffc00) != 0)
1513 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1514 if ((high_bits & ~0xfffffc00) != 0)
1515 emit_insn (gen_rtx_SET (VOIDmode,
1517 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1523 emit_insn (gen_safe_SET64 (temp, high_bits));
1527 if (!reload_in_progress && !reload_completed)
1529 rtx temp2 = gen_reg_rtx (DImode);
1530 rtx temp3 = gen_reg_rtx (DImode);
1531 rtx temp4 = gen_reg_rtx (DImode);
1533 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1534 gen_rtx_ASHIFT (DImode, sub_temp,
1537 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1538 if ((low_bits & ~0xfffffc00) != 0)
1540 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1541 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1542 emit_insn (gen_rtx_SET (VOIDmode, op0,
1543 gen_rtx_PLUS (DImode, temp4, temp3)));
1547 emit_insn (gen_rtx_SET (VOIDmode, op0,
1548 gen_rtx_PLUS (DImode, temp4, temp2)));
1553 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1554 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1555 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1558 /* We are in the middle of reload, so this is really
1559 painful. However we do still make an attempt to
1560 avoid emitting truly stupid code. */
1561 if (low1 != const0_rtx)
1563 emit_insn (gen_rtx_SET (VOIDmode, op0,
1564 gen_rtx_ASHIFT (DImode, sub_temp,
1565 GEN_INT (to_shift))));
1566 emit_insn (gen_rtx_SET (VOIDmode, op0,
1567 gen_rtx_IOR (DImode, op0, low1)));
1575 if (low2 != const0_rtx)
1577 emit_insn (gen_rtx_SET (VOIDmode, op0,
1578 gen_rtx_ASHIFT (DImode, sub_temp,
1579 GEN_INT (to_shift))));
1580 emit_insn (gen_rtx_SET (VOIDmode, op0,
1581 gen_rtx_IOR (DImode, op0, low2)));
1589 emit_insn (gen_rtx_SET (VOIDmode, op0,
1590 gen_rtx_ASHIFT (DImode, sub_temp,
1591 GEN_INT (to_shift))));
1592 if (low3 != const0_rtx)
1593 emit_insn (gen_rtx_SET (VOIDmode, op0,
1594 gen_rtx_IOR (DImode, op0, low3)));
1599 /* Analyze a 64-bit constant for certain properties. */
1600 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1601 unsigned HOST_WIDE_INT,
1602 int *, int *, int *);
1605 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1606 unsigned HOST_WIDE_INT low_bits,
1607 int *hbsp, int *lbsp, int *abbasp)
1609 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1612 lowest_bit_set = highest_bit_set = -1;
1616 if ((lowest_bit_set == -1)
1617 && ((low_bits >> i) & 1))
1619 if ((highest_bit_set == -1)
1620 && ((high_bits >> (32 - i - 1)) & 1))
1621 highest_bit_set = (64 - i - 1);
1624 && ((highest_bit_set == -1)
1625 || (lowest_bit_set == -1)));
1631 if ((lowest_bit_set == -1)
1632 && ((high_bits >> i) & 1))
1633 lowest_bit_set = i + 32;
1634 if ((highest_bit_set == -1)
1635 && ((low_bits >> (32 - i - 1)) & 1))
1636 highest_bit_set = 32 - i - 1;
1639 && ((highest_bit_set == -1)
1640 || (lowest_bit_set == -1)));
1642 /* If there are no bits set this should have gone out
1643 as one instruction! */
1644 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1645 all_bits_between_are_set = 1;
1646 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1650 if ((low_bits & (1 << i)) != 0)
1655 if ((high_bits & (1 << (i - 32))) != 0)
1658 all_bits_between_are_set = 0;
1661 *hbsp = highest_bit_set;
1662 *lbsp = lowest_bit_set;
1663 *abbasp = all_bits_between_are_set;
1666 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1669 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1670 unsigned HOST_WIDE_INT low_bits)
1672 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1675 || high_bits == 0xffffffff)
1678 analyze_64bit_constant (high_bits, low_bits,
1679 &highest_bit_set, &lowest_bit_set,
1680 &all_bits_between_are_set);
1682 if ((highest_bit_set == 63
1683 || lowest_bit_set == 0)
1684 && all_bits_between_are_set != 0)
1687 if ((highest_bit_set - lowest_bit_set) < 21)
1693 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1694 unsigned HOST_WIDE_INT,
1697 static unsigned HOST_WIDE_INT
1698 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1699 unsigned HOST_WIDE_INT low_bits,
1700 int lowest_bit_set, int shift)
1702 HOST_WIDE_INT hi, lo;
1704 if (lowest_bit_set < 32)
1706 lo = (low_bits >> lowest_bit_set) << shift;
1707 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1712 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1714 gcc_assert (! (hi & lo));
1718 /* Here we are sure to be arch64 and this is an integer constant
1719 being loaded into a register. Emit the most efficient
1720 insn sequence possible. Detection of all the 1-insn cases
1721 has been done already. */
1723 sparc_emit_set_const64 (rtx op0, rtx op1)
1725 unsigned HOST_WIDE_INT high_bits, low_bits;
1726 int lowest_bit_set, highest_bit_set;
1727 int all_bits_between_are_set;
1730 /* Sanity check that we know what we are working with. */
1731 gcc_assert (TARGET_ARCH64
1732 && (GET_CODE (op0) == SUBREG
1733 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1735 if (reload_in_progress || reload_completed)
1738 if (GET_CODE (op1) != CONST_INT)
1740 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1745 temp = gen_reg_rtx (DImode);
1747 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1748 low_bits = (INTVAL (op1) & 0xffffffff);
1750 /* low_bits bits 0 --> 31
1751 high_bits bits 32 --> 63 */
1753 analyze_64bit_constant (high_bits, low_bits,
1754 &highest_bit_set, &lowest_bit_set,
1755 &all_bits_between_are_set);
1757 /* First try for a 2-insn sequence. */
1759 /* These situations are preferred because the optimizer can
1760 * do more things with them:
1762 * sllx %reg, shift, %reg
1764 * srlx %reg, shift, %reg
1765 * 3) mov some_small_const, %reg
1766 * sllx %reg, shift, %reg
1768 if (((highest_bit_set == 63
1769 || lowest_bit_set == 0)
1770 && all_bits_between_are_set != 0)
1771 || ((highest_bit_set - lowest_bit_set) < 12))
1773 HOST_WIDE_INT the_const = -1;
1774 int shift = lowest_bit_set;
1776 if ((highest_bit_set != 63
1777 && lowest_bit_set != 0)
1778 || all_bits_between_are_set == 0)
1781 create_simple_focus_bits (high_bits, low_bits,
1784 else if (lowest_bit_set == 0)
1785 shift = -(63 - highest_bit_set);
1787 gcc_assert (SPARC_SIMM13_P (the_const));
1788 gcc_assert (shift != 0);
1790 emit_insn (gen_safe_SET64 (temp, the_const));
1792 emit_insn (gen_rtx_SET (VOIDmode,
1794 gen_rtx_ASHIFT (DImode,
1798 emit_insn (gen_rtx_SET (VOIDmode,
1800 gen_rtx_LSHIFTRT (DImode,
1802 GEN_INT (-shift))));
1806 /* Now a range of 22 or less bits set somewhere.
1807 * 1) sethi %hi(focus_bits), %reg
1808 * sllx %reg, shift, %reg
1809 * 2) sethi %hi(focus_bits), %reg
1810 * srlx %reg, shift, %reg
1812 if ((highest_bit_set - lowest_bit_set) < 21)
1814 unsigned HOST_WIDE_INT focus_bits =
1815 create_simple_focus_bits (high_bits, low_bits,
1816 lowest_bit_set, 10);
1818 gcc_assert (SPARC_SETHI_P (focus_bits));
1819 gcc_assert (lowest_bit_set != 10);
1821 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1823 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1824 if (lowest_bit_set < 10)
1825 emit_insn (gen_rtx_SET (VOIDmode,
1827 gen_rtx_LSHIFTRT (DImode, temp,
1828 GEN_INT (10 - lowest_bit_set))));
1829 else if (lowest_bit_set > 10)
1830 emit_insn (gen_rtx_SET (VOIDmode,
1832 gen_rtx_ASHIFT (DImode, temp,
1833 GEN_INT (lowest_bit_set - 10))));
1837 /* 1) sethi %hi(low_bits), %reg
1838 * or %reg, %lo(low_bits), %reg
1839 * 2) sethi %hi(~low_bits), %reg
1840 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1843 || high_bits == 0xffffffff)
1845 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1846 (high_bits == 0xffffffff));
1850 /* Now, try 3-insn sequences. */
1852 /* 1) sethi %hi(high_bits), %reg
1853 * or %reg, %lo(high_bits), %reg
1854 * sllx %reg, 32, %reg
1858 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1862 /* We may be able to do something quick
1863 when the constant is negated, so try that. */
1864 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1865 (~low_bits) & 0xfffffc00))
1867 /* NOTE: The trailing bits get XOR'd so we need the
1868 non-negated bits, not the negated ones. */
1869 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1871 if ((((~high_bits) & 0xffffffff) == 0
1872 && ((~low_bits) & 0x80000000) == 0)
1873 || (((~high_bits) & 0xffffffff) == 0xffffffff
1874 && ((~low_bits) & 0x80000000) != 0))
1876 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1878 if ((SPARC_SETHI_P (fast_int)
1879 && (~high_bits & 0xffffffff) == 0)
1880 || SPARC_SIMM13_P (fast_int))
1881 emit_insn (gen_safe_SET64 (temp, fast_int));
1883 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1888 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1889 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1890 sparc_emit_set_const64 (temp, negated_const);
1893 /* If we are XOR'ing with -1, then we should emit a one's complement
1894 instead. This way the combiner will notice logical operations
1895 such as ANDN later on and substitute. */
1896 if (trailing_bits == 0x3ff)
1898 emit_insn (gen_rtx_SET (VOIDmode, op0,
1899 gen_rtx_NOT (DImode, temp)));
1903 emit_insn (gen_rtx_SET (VOIDmode,
1905 gen_safe_XOR64 (temp,
1906 (-0x400 | trailing_bits))));
1911 /* 1) sethi %hi(xxx), %reg
1912 * or %reg, %lo(xxx), %reg
1913 * sllx %reg, yyy, %reg
1915 * ??? This is just a generalized version of the low_bits==0
1916 * thing above, FIXME...
1918 if ((highest_bit_set - lowest_bit_set) < 32)
1920 unsigned HOST_WIDE_INT focus_bits =
1921 create_simple_focus_bits (high_bits, low_bits,
1924 /* We can't get here in this state. */
1925 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1927 /* So what we know is that the set bits straddle the
1928 middle of the 64-bit word. */
1929 sparc_emit_set_const64_quick2 (op0, temp,
1935 /* 1) sethi %hi(high_bits), %reg
1936 * or %reg, %lo(high_bits), %reg
1937 * sllx %reg, 32, %reg
1938 * or %reg, low_bits, %reg
1940 if (SPARC_SIMM13_P(low_bits)
1941 && ((int)low_bits > 0))
1943 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1947 /* The easiest way when all else fails, is full decomposition. */
1949 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1950 high_bits, low_bits, ~high_bits, ~low_bits);
1952 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1954 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1956 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1957 return the mode to be used for the comparison. For floating-point,
1958 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1959 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1960 processing is needed. */
1963 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1965 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1991 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1992 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1994 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1995 return CCX_NOOVmode;
2001 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2008 /* Emit the compare insn and return the CC reg for a CODE comparison
2009 with operands X and Y. */
2012 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2014 enum machine_mode mode;
2017 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2020 mode = SELECT_CC_MODE (code, x, y);
2022 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2023 fcc regs (cse can't tell they're really call clobbered regs and will
2024 remove a duplicate comparison even if there is an intervening function
2025 call - it will then try to reload the cc reg via an int reg which is why
2026 we need the movcc patterns). It is possible to provide the movcc
2027 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2028 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2029 to tell cse that CCFPE mode registers (even pseudos) are call
2032 /* ??? This is an experiment. Rather than making changes to cse which may
2033 or may not be easy/clean, we do our own cse. This is possible because
2034 we will generate hard registers. Cse knows they're call clobbered (it
2035 doesn't know the same thing about pseudos). If we guess wrong, no big
2036 deal, but if we win, great! */
2038 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2039 #if 1 /* experiment */
2042 /* We cycle through the registers to ensure they're all exercised. */
2043 static int next_fcc_reg = 0;
2044 /* Previous x,y for each fcc reg. */
2045 static rtx prev_args[4][2];
2047 /* Scan prev_args for x,y. */
2048 for (reg = 0; reg < 4; reg++)
2049 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2054 prev_args[reg][0] = x;
2055 prev_args[reg][1] = y;
2056 next_fcc_reg = (next_fcc_reg + 1) & 3;
2058 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2061 cc_reg = gen_reg_rtx (mode);
2062 #endif /* ! experiment */
2063 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2064 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2066 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2068 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2069 will only result in an unrecognizable insn so no point in asserting. */
2070 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2076 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2079 gen_compare_reg (rtx cmp)
2081 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2084 /* This function is used for v9 only.
2085 DEST is the target of the Scc insn.
2086 CODE is the code for an Scc's comparison.
2087 X and Y are the values we compare.
2089 This function is needed to turn
2092 (gt (reg:CCX 100 %icc)
2096 (gt:DI (reg:CCX 100 %icc)
2099 IE: The instruction recognizer needs to see the mode of the comparison to
2100 find the right instruction. We could use "gt:DI" right in the
2101 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2104 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2107 && (GET_MODE (x) == DImode
2108 || GET_MODE (dest) == DImode))
2111 /* Try to use the movrCC insns. */
2113 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2115 && v9_regcmp_p (compare_code))
2120 /* Special case for op0 != 0. This can be done with one instruction if
2123 if (compare_code == NE
2124 && GET_MODE (dest) == DImode
2125 && rtx_equal_p (op0, dest))
2127 emit_insn (gen_rtx_SET (VOIDmode, dest,
2128 gen_rtx_IF_THEN_ELSE (DImode,
2129 gen_rtx_fmt_ee (compare_code, DImode,
2136 if (reg_overlap_mentioned_p (dest, op0))
2138 /* Handle the case where dest == x.
2139 We "early clobber" the result. */
2140 op0 = gen_reg_rtx (GET_MODE (x));
2141 emit_move_insn (op0, x);
2144 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2145 if (GET_MODE (op0) != DImode)
2147 temp = gen_reg_rtx (DImode);
2148 convert_move (temp, op0, 0);
2152 emit_insn (gen_rtx_SET (VOIDmode, dest,
2153 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2154 gen_rtx_fmt_ee (compare_code, DImode,
2162 x = gen_compare_reg_1 (compare_code, x, y);
2165 gcc_assert (GET_MODE (x) != CC_NOOVmode
2166 && GET_MODE (x) != CCX_NOOVmode);
2168 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2169 emit_insn (gen_rtx_SET (VOIDmode, dest,
2170 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2171 gen_rtx_fmt_ee (compare_code,
2172 GET_MODE (x), x, y),
2173 const1_rtx, dest)));
2179 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2180 without jumps using the addx/subx instructions. */
2183 emit_scc_insn (rtx operands[])
2190 /* The quad-word fp compare library routines all return nonzero to indicate
2191 true, which is different from the equivalent libgcc routines, so we must
2192 handle them specially here. */
2193 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2195 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2196 GET_CODE (operands[1]));
2197 operands[2] = XEXP (operands[1], 0);
2198 operands[3] = XEXP (operands[1], 1);
2201 code = GET_CODE (operands[1]);
2205 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2206 more applications). The exception to this is "reg != 0" which can
2207 be done in one instruction on v9 (so we do it). */
2210 if (GET_MODE (x) == SImode)
2212 rtx pat = gen_seqsi_special (operands[0], x, y);
2216 else if (GET_MODE (x) == DImode)
2218 rtx pat = gen_seqdi_special (operands[0], x, y);
2226 if (GET_MODE (x) == SImode)
2228 rtx pat = gen_snesi_special (operands[0], x, y);
2232 else if (GET_MODE (x) == DImode)
2234 rtx pat = gen_snedi_special (operands[0], x, y);
2240 /* For the rest, on v9 we can use conditional moves. */
2244 if (gen_v9_scc (operands[0], code, x, y))
2248 /* We can do LTU and GEU using the addx/subx instructions too. And
2249 for GTU/LEU, if both operands are registers swap them and fall
2250 back to the easy case. */
2251 if (code == GTU || code == LEU)
2253 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2254 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2259 code = swap_condition (code);
2263 if (code == LTU || code == GEU)
2265 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2266 gen_rtx_fmt_ee (code, SImode,
2267 gen_compare_reg_1 (code, x, y),
2272 /* Nope, do branches. */
2276 /* Emit a conditional jump insn for the v9 architecture using comparison code
2277 CODE and jump target LABEL.
2278 This function exists to take advantage of the v9 brxx insns. */
2281 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2283 emit_jump_insn (gen_rtx_SET (VOIDmode,
2285 gen_rtx_IF_THEN_ELSE (VOIDmode,
2286 gen_rtx_fmt_ee (code, GET_MODE (op0),
2288 gen_rtx_LABEL_REF (VOIDmode, label),
2293 emit_conditional_branch_insn (rtx operands[])
2295 /* The quad-word fp compare library routines all return nonzero to indicate
2296 true, which is different from the equivalent libgcc routines, so we must
2297 handle them specially here. */
2298 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2300 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2301 GET_CODE (operands[0]));
2302 operands[1] = XEXP (operands[0], 0);
2303 operands[2] = XEXP (operands[0], 1);
2306 if (TARGET_ARCH64 && operands[2] == const0_rtx
2307 && GET_CODE (operands[1]) == REG
2308 && GET_MODE (operands[1]) == DImode)
2310 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2314 operands[1] = gen_compare_reg (operands[0]);
2315 operands[2] = const0_rtx;
2316 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2317 operands[1], operands[2]);
2318 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2323 /* Generate a DFmode part of a hard TFmode register.
2324 REG is the TFmode hard register, LOW is 1 for the
2325 low 64bit of the register and 0 otherwise.
2328 gen_df_reg (rtx reg, int low)
2330 int regno = REGNO (reg);
2332 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2333 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2334 return gen_rtx_REG (DFmode, regno);
2337 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2338 Unlike normal calls, TFmode operands are passed by reference. It is
2339 assumed that no more than 3 operands are required. */
2342 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2344 rtx ret_slot = NULL, arg[3], func_sym;
2347 /* We only expect to be called for conversions, unary, and binary ops. */
2348 gcc_assert (nargs == 2 || nargs == 3);
2350 for (i = 0; i < nargs; ++i)
2352 rtx this_arg = operands[i];
2355 /* TFmode arguments and return values are passed by reference. */
2356 if (GET_MODE (this_arg) == TFmode)
2358 int force_stack_temp;
2360 force_stack_temp = 0;
2361 if (TARGET_BUGGY_QP_LIB && i == 0)
2362 force_stack_temp = 1;
2364 if (GET_CODE (this_arg) == MEM
2365 && ! force_stack_temp)
2366 this_arg = XEXP (this_arg, 0);
2367 else if (CONSTANT_P (this_arg)
2368 && ! force_stack_temp)
2370 this_slot = force_const_mem (TFmode, this_arg);
2371 this_arg = XEXP (this_slot, 0);
2375 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2377 /* Operand 0 is the return value. We'll copy it out later. */
2379 emit_move_insn (this_slot, this_arg);
2381 ret_slot = this_slot;
2383 this_arg = XEXP (this_slot, 0);
2390 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2392 if (GET_MODE (operands[0]) == TFmode)
2395 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2396 arg[0], GET_MODE (arg[0]),
2397 arg[1], GET_MODE (arg[1]));
2399 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2400 arg[0], GET_MODE (arg[0]),
2401 arg[1], GET_MODE (arg[1]),
2402 arg[2], GET_MODE (arg[2]));
2405 emit_move_insn (operands[0], ret_slot);
2411 gcc_assert (nargs == 2);
2413 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2414 GET_MODE (operands[0]), 1,
2415 arg[1], GET_MODE (arg[1]));
2417 if (ret != operands[0])
2418 emit_move_insn (operands[0], ret);
2422 /* Expand soft-float TFmode calls to sparc abi routines. */
2425 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2447 emit_soft_tfmode_libcall (func, 3, operands);
2451 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2455 gcc_assert (code == SQRT);
2458 emit_soft_tfmode_libcall (func, 2, operands);
2462 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2469 switch (GET_MODE (operands[1]))
2482 case FLOAT_TRUNCATE:
2483 switch (GET_MODE (operands[0]))
2497 switch (GET_MODE (operands[1]))
2502 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2512 case UNSIGNED_FLOAT:
2513 switch (GET_MODE (operands[1]))
2518 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2529 switch (GET_MODE (operands[0]))
2543 switch (GET_MODE (operands[0]))
2560 emit_soft_tfmode_libcall (func, 2, operands);
2563 /* Expand a hard-float tfmode operation. All arguments must be in
2567 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2571 if (GET_RTX_CLASS (code) == RTX_UNARY)
2573 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2574 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2578 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2579 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2580 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2581 operands[1], operands[2]);
2584 if (register_operand (operands[0], VOIDmode))
2587 dest = gen_reg_rtx (GET_MODE (operands[0]));
2589 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2591 if (dest != operands[0])
2592 emit_move_insn (operands[0], dest);
2596 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2598 if (TARGET_HARD_QUAD)
2599 emit_hard_tfmode_operation (code, operands);
2601 emit_soft_tfmode_binop (code, operands);
2605 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2607 if (TARGET_HARD_QUAD)
2608 emit_hard_tfmode_operation (code, operands);
2610 emit_soft_tfmode_unop (code, operands);
2614 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2616 if (TARGET_HARD_QUAD)
2617 emit_hard_tfmode_operation (code, operands);
2619 emit_soft_tfmode_cvt (code, operands);
2622 /* Return nonzero if a branch/jump/call instruction will be emitting
2623 nop into its delay slot. */
2626 empty_delay_slot (rtx insn)
2630 /* If no previous instruction (should not happen), return true. */
2631 if (PREV_INSN (insn) == NULL)
2634 seq = NEXT_INSN (PREV_INSN (insn));
2635 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2641 /* Return nonzero if TRIAL can go into the call delay slot. */
2644 tls_call_delay (rtx trial)
2649 call __tls_get_addr, %tgd_call (foo)
2650 add %l7, %o0, %o0, %tgd_add (foo)
2651 while Sun as/ld does not. */
2652 if (TARGET_GNU_TLS || !TARGET_TLS)
2655 pat = PATTERN (trial);
2657 /* We must reject tgd_add{32|64}, i.e.
2658 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2659 and tldm_add{32|64}, i.e.
2660 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2662 if (GET_CODE (pat) == SET
2663 && GET_CODE (SET_SRC (pat)) == PLUS)
2665 rtx unspec = XEXP (SET_SRC (pat), 1);
2667 if (GET_CODE (unspec) == UNSPEC
2668 && (XINT (unspec, 1) == UNSPEC_TLSGD
2669 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2676 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2677 instruction. RETURN_P is true if the v9 variant 'return' is to be
2678 considered in the test too.
2680 TRIAL must be a SET whose destination is a REG appropriate for the
2681 'restore' instruction or, if RETURN_P is true, for the 'return'
2685 eligible_for_restore_insn (rtx trial, bool return_p)
2687 rtx pat = PATTERN (trial);
2688 rtx src = SET_SRC (pat);
2690 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2691 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2692 && arith_operand (src, GET_MODE (src)))
2695 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2697 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2700 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2701 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2702 && arith_double_operand (src, GET_MODE (src)))
2703 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2705 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2706 else if (! TARGET_FPU && register_operand (src, SFmode))
2709 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2710 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2713 /* If we have the 'return' instruction, anything that does not use
2714 local or output registers and can go into a delay slot wins. */
2715 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2716 && (get_attr_in_uncond_branch_delay (trial)
2717 == IN_UNCOND_BRANCH_DELAY_TRUE))
2720 /* The 'restore src1,src2,dest' pattern for SImode. */
2721 else if (GET_CODE (src) == PLUS
2722 && register_operand (XEXP (src, 0), SImode)
2723 && arith_operand (XEXP (src, 1), SImode))
2726 /* The 'restore src1,src2,dest' pattern for DImode. */
2727 else if (GET_CODE (src) == PLUS
2728 && register_operand (XEXP (src, 0), DImode)
2729 && arith_double_operand (XEXP (src, 1), DImode))
2732 /* The 'restore src1,%lo(src2),dest' pattern. */
2733 else if (GET_CODE (src) == LO_SUM
2734 && ! TARGET_CM_MEDMID
2735 && ((register_operand (XEXP (src, 0), SImode)
2736 && immediate_operand (XEXP (src, 1), SImode))
2738 && register_operand (XEXP (src, 0), DImode)
2739 && immediate_operand (XEXP (src, 1), DImode))))
2742 /* The 'restore src,src,dest' pattern. */
2743 else if (GET_CODE (src) == ASHIFT
2744 && (register_operand (XEXP (src, 0), SImode)
2745 || register_operand (XEXP (src, 0), DImode))
2746 && XEXP (src, 1) == const1_rtx)
2752 /* Return nonzero if TRIAL can go into the function return's
2756 eligible_for_return_delay (rtx trial)
2760 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2763 if (get_attr_length (trial) != 1)
2766 /* If there are any call-saved registers, we should scan TRIAL if it
2767 does not reference them. For now just make it easy. */
2771 /* If the function uses __builtin_eh_return, the eh_return machinery
2772 occupies the delay slot. */
2773 if (crtl->calls_eh_return)
2776 /* In the case of a true leaf function, anything can go into the slot. */
2777 if (sparc_leaf_function_p)
2778 return get_attr_in_uncond_branch_delay (trial)
2779 == IN_UNCOND_BRANCH_DELAY_TRUE;
2781 pat = PATTERN (trial);
2783 /* Otherwise, only operations which can be done in tandem with
2784 a `restore' or `return' insn can go into the delay slot. */
2785 if (GET_CODE (SET_DEST (pat)) != REG
2786 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2789 /* If this instruction sets up floating point register and we have a return
2790 instruction, it can probably go in. But restore will not work
2792 if (REGNO (SET_DEST (pat)) >= 32)
2794 && ! epilogue_renumber (&pat, 1)
2795 && (get_attr_in_uncond_branch_delay (trial)
2796 == IN_UNCOND_BRANCH_DELAY_TRUE));
2798 return eligible_for_restore_insn (trial, true);
2801 /* Return nonzero if TRIAL can go into the sibling call's
2805 eligible_for_sibcall_delay (rtx trial)
2809 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2812 if (get_attr_length (trial) != 1)
2815 pat = PATTERN (trial);
2817 if (sparc_leaf_function_p)
2819 /* If the tail call is done using the call instruction,
2820 we have to restore %o7 in the delay slot. */
2821 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2824 /* %g1 is used to build the function address */
2825 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2831 /* Otherwise, only operations which can be done in tandem with
2832 a `restore' insn can go into the delay slot. */
2833 if (GET_CODE (SET_DEST (pat)) != REG
2834 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2835 || REGNO (SET_DEST (pat)) >= 32)
2838 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2840 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2843 return eligible_for_restore_insn (trial, false);
2847 short_branch (int uid1, int uid2)
2849 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2851 /* Leave a few words of "slop". */
2852 if (delta >= -1023 && delta <= 1022)
2858 /* Return nonzero if REG is not used after INSN.
2859 We assume REG is a reload reg, and therefore does
2860 not live past labels or calls or jumps. */
2862 reg_unused_after (rtx reg, rtx insn)
2864 enum rtx_code code, prev_code = UNKNOWN;
2866 while ((insn = NEXT_INSN (insn)))
2868 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2871 code = GET_CODE (insn);
2872 if (GET_CODE (insn) == CODE_LABEL)
2877 rtx set = single_set (insn);
2878 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2881 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2883 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2891 /* Determine if it's legal to put X into the constant pool. This
2892 is not possible if X contains the address of a symbol that is
2893 not constant (TLS) or not known at final link time (PIC). */
2896 sparc_cannot_force_const_mem (rtx x)
2898 switch (GET_CODE (x))
2903 /* Accept all non-symbolic constants. */
2907 /* Labels are OK iff we are non-PIC. */
2908 return flag_pic != 0;
2911 /* 'Naked' TLS symbol references are never OK,
2912 non-TLS symbols are OK iff we are non-PIC. */
2913 if (SYMBOL_REF_TLS_MODEL (x))
2916 return flag_pic != 0;
2919 return sparc_cannot_force_const_mem (XEXP (x, 0));
2922 return sparc_cannot_force_const_mem (XEXP (x, 0))
2923 || sparc_cannot_force_const_mem (XEXP (x, 1));
2932 static GTY(()) char pic_helper_symbol_name[256];
2933 static GTY(()) rtx pic_helper_symbol;
2934 static GTY(()) bool pic_helper_emitted_p = false;
2935 static GTY(()) rtx global_offset_table;
2937 /* Ensure that we are not using patterns that are not OK with PIC. */
2945 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2946 && (GET_CODE (recog_data.operand[i]) != CONST
2947 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2948 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2949 == global_offset_table)
2950 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2958 /* Return true if X is an address which needs a temporary register when
2959 reloaded while generating PIC code. */
2962 pic_address_needs_scratch (rtx x)
2964 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2965 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2966 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2967 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2968 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2974 /* Determine if a given RTX is a valid constant. We already know this
2975 satisfies CONSTANT_P. */
2978 legitimate_constant_p (rtx x)
2982 switch (GET_CODE (x))
2985 /* TLS symbols are not constant. */
2986 if (SYMBOL_REF_TLS_MODEL (x))
2991 inner = XEXP (x, 0);
2993 /* Offsets of TLS symbols are never valid.
2994 Discourage CSE from creating them. */
2995 if (GET_CODE (inner) == PLUS
2996 && SPARC_SYMBOL_REF_TLS_P (XEXP (inner, 0)))
3001 if (GET_MODE (x) == VOIDmode)
3004 /* Floating point constants are generally not ok.
3005 The only exception is 0.0 in VIS. */
3007 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
3008 && const_zero_operand (x, GET_MODE (x)))
3014 /* Vector constants are generally not ok.
3015 The only exception is 0 in VIS. */
3017 && const_zero_operand (x, GET_MODE (x)))
3029 /* Determine if a given RTX is a valid constant address. */
3032 constant_address_p (rtx x)
3034 switch (GET_CODE (x))
3042 if (flag_pic && pic_address_needs_scratch (x))
3044 return legitimate_constant_p (x);
3047 return !flag_pic && legitimate_constant_p (x);
3054 /* Nonzero if the constant value X is a legitimate general operand
3055 when generating PIC code. It is given that flag_pic is on and
3056 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3059 legitimate_pic_operand_p (rtx x)
3061 if (pic_address_needs_scratch (x))
3063 if (SPARC_SYMBOL_REF_TLS_P (x)
3064 || (GET_CODE (x) == CONST
3065 && GET_CODE (XEXP (x, 0)) == PLUS
3066 && SPARC_SYMBOL_REF_TLS_P (XEXP (XEXP (x, 0), 0))))
3071 /* Return nonzero if ADDR is a valid memory address.
3072 STRICT specifies whether strict register checking applies. */
3075 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3077 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3079 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3081 else if (GET_CODE (addr) == PLUS)
3083 rs1 = XEXP (addr, 0);
3084 rs2 = XEXP (addr, 1);
3086 /* Canonicalize. REG comes first, if there are no regs,
3087 LO_SUM comes first. */
3089 && GET_CODE (rs1) != SUBREG
3091 || GET_CODE (rs2) == SUBREG
3092 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3094 rs1 = XEXP (addr, 1);
3095 rs2 = XEXP (addr, 0);
3099 && rs1 == pic_offset_table_rtx
3101 && GET_CODE (rs2) != SUBREG
3102 && GET_CODE (rs2) != LO_SUM
3103 && GET_CODE (rs2) != MEM
3104 && ! SPARC_SYMBOL_REF_TLS_P (rs2)
3105 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3106 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3108 || GET_CODE (rs1) == SUBREG)
3109 && RTX_OK_FOR_OFFSET_P (rs2)))
3114 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3115 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3117 /* We prohibit REG + REG for TFmode when there are no quad move insns
3118 and we consequently need to split. We do this because REG+REG
3119 is not an offsettable address. If we get the situation in reload
3120 where source and destination of a movtf pattern are both MEMs with
3121 REG+REG address, then only one of them gets converted to an
3122 offsettable address. */
3124 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3127 /* We prohibit REG + REG on ARCH32 if not optimizing for
3128 DFmode/DImode because then mem_min_alignment is likely to be zero
3129 after reload and the forced split would lack a matching splitter
3131 if (TARGET_ARCH32 && !optimize
3132 && (mode == DFmode || mode == DImode))
3135 else if (USE_AS_OFFSETABLE_LO10
3136 && GET_CODE (rs1) == LO_SUM
3138 && ! TARGET_CM_MEDMID
3139 && RTX_OK_FOR_OLO10_P (rs2))
3142 imm1 = XEXP (rs1, 1);
3143 rs1 = XEXP (rs1, 0);
3144 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3148 else if (GET_CODE (addr) == LO_SUM)
3150 rs1 = XEXP (addr, 0);
3151 imm1 = XEXP (addr, 1);
3153 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3156 /* We can't allow TFmode in 32-bit mode, because an offset greater
3157 than the alignment (8) may cause the LO_SUM to overflow. */
3158 if (mode == TFmode && TARGET_ARCH32)
3161 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3166 if (GET_CODE (rs1) == SUBREG)
3167 rs1 = SUBREG_REG (rs1);
3173 if (GET_CODE (rs2) == SUBREG)
3174 rs2 = SUBREG_REG (rs2);
3181 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3182 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3187 if ((REGNO (rs1) >= 32
3188 && REGNO (rs1) != FRAME_POINTER_REGNUM
3189 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3191 && (REGNO (rs2) >= 32
3192 && REGNO (rs2) != FRAME_POINTER_REGNUM
3193 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3199 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3201 static GTY(()) rtx sparc_tls_symbol;
3204 sparc_tls_get_addr (void)
3206 if (!sparc_tls_symbol)
3207 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3209 return sparc_tls_symbol;
3213 sparc_tls_got (void)
3218 crtl->uses_pic_offset_table = 1;
3219 return pic_offset_table_rtx;
3222 if (!global_offset_table)
3223 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3224 temp = gen_reg_rtx (Pmode);
3225 emit_move_insn (temp, global_offset_table);
3229 /* Return 1 if *X is a thread-local symbol. */
3232 sparc_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
3234 return SPARC_SYMBOL_REF_TLS_P (*x);
3237 /* Return 1 if X contains a thread-local symbol. */
3240 sparc_tls_referenced_p (rtx x)
3242 if (!TARGET_HAVE_TLS)
3245 return for_each_rtx (&x, &sparc_tls_symbol_ref_1, 0);
3248 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3249 this (thread-local) address. */
3252 legitimize_tls_address (rtx addr)
3254 rtx temp1, temp2, temp3, ret, o0, got, insn;
3256 gcc_assert (can_create_pseudo_p ());
3258 if (GET_CODE (addr) == SYMBOL_REF)
3259 switch (SYMBOL_REF_TLS_MODEL (addr))
3261 case TLS_MODEL_GLOBAL_DYNAMIC:
3263 temp1 = gen_reg_rtx (SImode);
3264 temp2 = gen_reg_rtx (SImode);
3265 ret = gen_reg_rtx (Pmode);
3266 o0 = gen_rtx_REG (Pmode, 8);
3267 got = sparc_tls_got ();
3268 emit_insn (gen_tgd_hi22 (temp1, addr));
3269 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3272 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3273 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3278 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3279 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3282 CALL_INSN_FUNCTION_USAGE (insn)
3283 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3284 CALL_INSN_FUNCTION_USAGE (insn));
3285 insn = get_insns ();
3287 emit_libcall_block (insn, ret, o0, addr);
3290 case TLS_MODEL_LOCAL_DYNAMIC:
3292 temp1 = gen_reg_rtx (SImode);
3293 temp2 = gen_reg_rtx (SImode);
3294 temp3 = gen_reg_rtx (Pmode);
3295 ret = gen_reg_rtx (Pmode);
3296 o0 = gen_rtx_REG (Pmode, 8);
3297 got = sparc_tls_got ();
3298 emit_insn (gen_tldm_hi22 (temp1));
3299 emit_insn (gen_tldm_lo10 (temp2, temp1));
3302 emit_insn (gen_tldm_add32 (o0, got, temp2));
3303 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3308 emit_insn (gen_tldm_add64 (o0, got, temp2));
3309 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3312 CALL_INSN_FUNCTION_USAGE (insn)
3313 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3314 CALL_INSN_FUNCTION_USAGE (insn));
3315 insn = get_insns ();
3317 emit_libcall_block (insn, temp3, o0,
3318 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3319 UNSPEC_TLSLD_BASE));
3320 temp1 = gen_reg_rtx (SImode);
3321 temp2 = gen_reg_rtx (SImode);
3322 emit_insn (gen_tldo_hix22 (temp1, addr));
3323 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3325 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3327 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3330 case TLS_MODEL_INITIAL_EXEC:
3331 temp1 = gen_reg_rtx (SImode);
3332 temp2 = gen_reg_rtx (SImode);
3333 temp3 = gen_reg_rtx (Pmode);
3334 got = sparc_tls_got ();
3335 emit_insn (gen_tie_hi22 (temp1, addr));
3336 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3338 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3340 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3343 ret = gen_reg_rtx (Pmode);
3345 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3348 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3352 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3355 case TLS_MODEL_LOCAL_EXEC:
3356 temp1 = gen_reg_rtx (Pmode);
3357 temp2 = gen_reg_rtx (Pmode);
3360 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3361 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3365 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3366 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3368 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3376 gcc_unreachable (); /* for now ... */
3382 /* Legitimize PIC addresses. If the address is already position-independent,
3383 we return ORIG. Newly generated position-independent addresses go into a
3384 reg. This is REG if nonzero, otherwise we allocate register(s) as
3388 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3391 if (GET_CODE (orig) == SYMBOL_REF
3392 /* See the comment in sparc_expand_move. */
3393 || (TARGET_VXWORKS_RTP && GET_CODE (orig) == LABEL_REF))
3395 rtx pic_ref, address;
3400 gcc_assert (! reload_in_progress && ! reload_completed);
3401 reg = gen_reg_rtx (Pmode);
3406 /* If not during reload, allocate another temp reg here for loading
3407 in the address, so that these instructions can be optimized
3409 rtx temp_reg = ((reload_in_progress || reload_completed)
3410 ? reg : gen_reg_rtx (Pmode));
3412 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3413 won't get confused into thinking that these two instructions
3414 are loading in the true address of the symbol. If in the
3415 future a PIC rtx exists, that should be used instead. */
3418 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3419 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3423 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3424 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3431 pic_ref = gen_const_mem (Pmode,
3432 gen_rtx_PLUS (Pmode,
3433 pic_offset_table_rtx, address));
3434 crtl->uses_pic_offset_table = 1;
3435 insn = emit_move_insn (reg, pic_ref);
3436 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3438 set_unique_reg_note (insn, REG_EQUAL, orig);
3441 else if (GET_CODE (orig) == CONST)
3445 if (GET_CODE (XEXP (orig, 0)) == PLUS
3446 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3451 gcc_assert (! reload_in_progress && ! reload_completed);
3452 reg = gen_reg_rtx (Pmode);
3455 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3456 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3457 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3458 base == reg ? 0 : reg);
3460 if (GET_CODE (offset) == CONST_INT)
3462 if (SMALL_INT (offset))
3463 return plus_constant (base, INTVAL (offset));
3464 else if (! reload_in_progress && ! reload_completed)
3465 offset = force_reg (Pmode, offset);
3467 /* If we reach here, then something is seriously wrong. */
3470 return gen_rtx_PLUS (Pmode, base, offset);
3472 else if (GET_CODE (orig) == LABEL_REF)
3473 /* ??? Why do we do this? */
3474 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3475 the register is live instead, in case it is eliminated. */
3476 crtl->uses_pic_offset_table = 1;
3481 /* Try machine-dependent ways of modifying an illegitimate address X
3482 to be legitimate. If we find one, return the new, valid address.
3484 OLDX is the address as it was before break_out_memory_refs was called.
3485 In some cases it is useful to look at this to decide what needs to be done.
3487 MODE is the mode of the operand pointed to by X.
3489 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3492 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3493 enum machine_mode mode)
3497 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3498 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3499 force_operand (XEXP (x, 0), NULL_RTX));
3500 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3501 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3502 force_operand (XEXP (x, 1), NULL_RTX));
3503 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3504 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3506 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3507 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3508 force_operand (XEXP (x, 1), NULL_RTX));
3510 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3513 if (SPARC_SYMBOL_REF_TLS_P (x))
3514 x = legitimize_tls_address (x);
3516 x = legitimize_pic_address (x, mode, 0);
3517 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3518 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3519 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3520 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3521 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3522 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3523 else if (GET_CODE (x) == SYMBOL_REF
3524 || GET_CODE (x) == CONST
3525 || GET_CODE (x) == LABEL_REF)
3526 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3530 /* Emit the special PIC helper function. */
3533 emit_pic_helper (void)
3535 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3538 switch_to_section (text_section);
3540 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3542 ASM_OUTPUT_ALIGN (asm_out_file, align);
3543 ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3544 if (flag_delayed_branch)
3545 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3546 pic_name, pic_name);
3548 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3549 pic_name, pic_name);
3551 pic_helper_emitted_p = true;
3554 /* Emit code to load the PIC register. */
3557 load_pic_register (bool delay_pic_helper)
3559 int orig_flag_pic = flag_pic;
3561 if (TARGET_VXWORKS_RTP)
3563 emit_insn (gen_vxworks_load_got ());
3564 emit_use (pic_offset_table_rtx);
3568 /* If we haven't initialized the special PIC symbols, do so now. */
3569 if (!pic_helper_symbol_name[0])
3571 ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3572 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3573 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3576 /* If we haven't emitted the special PIC helper function, do so now unless
3577 we are requested to delay it. */
3578 if (!delay_pic_helper && !pic_helper_emitted_p)
3583 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3584 pic_helper_symbol));
3586 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3587 pic_helper_symbol));
3588 flag_pic = orig_flag_pic;
3590 /* Need to emit this whether or not we obey regdecls,
3591 since setjmp/longjmp can cause life info to screw up.
3592 ??? In the case where we don't obey regdecls, this is not sufficient
3593 since we may not fall out the bottom. */
3594 emit_use (pic_offset_table_rtx);
3597 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3598 address of the call target. */
3601 sparc_emit_call_insn (rtx pat, rtx addr)
3605 insn = emit_call_insn (pat);
3607 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3608 if (TARGET_VXWORKS_RTP
3610 && GET_CODE (addr) == SYMBOL_REF
3611 && (SYMBOL_REF_DECL (addr)
3612 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3613 : !SYMBOL_REF_LOCAL_P (addr)))
3615 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3616 crtl->uses_pic_offset_table = 1;
3620 /* Return 1 if RTX is a MEM which is known to be aligned to at
3621 least a DESIRED byte boundary. */
3624 mem_min_alignment (rtx mem, int desired)
3626 rtx addr, base, offset;
3628 /* If it's not a MEM we can't accept it. */
3629 if (GET_CODE (mem) != MEM)
3633 if (!TARGET_UNALIGNED_DOUBLES
3634 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3637 /* ??? The rest of the function predates MEM_ALIGN so
3638 there is probably a bit of redundancy. */
3639 addr = XEXP (mem, 0);
3640 base = offset = NULL_RTX;
3641 if (GET_CODE (addr) == PLUS)
3643 if (GET_CODE (XEXP (addr, 0)) == REG)
3645 base = XEXP (addr, 0);
3647 /* What we are saying here is that if the base
3648 REG is aligned properly, the compiler will make
3649 sure any REG based index upon it will be so
3651 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3652 offset = XEXP (addr, 1);
3654 offset = const0_rtx;
3657 else if (GET_CODE (addr) == REG)
3660 offset = const0_rtx;
3663 if (base != NULL_RTX)
3665 int regno = REGNO (base);
3667 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3669 /* Check if the compiler has recorded some information
3670 about the alignment of the base REG. If reload has
3671 completed, we already matched with proper alignments.
3672 If not running global_alloc, reload might give us
3673 unaligned pointer to local stack though. */
3675 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3676 || (optimize && reload_completed))
3677 && (INTVAL (offset) & (desired - 1)) == 0)
3682 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3686 else if (! TARGET_UNALIGNED_DOUBLES
3687 || CONSTANT_P (addr)
3688 || GET_CODE (addr) == LO_SUM)
3690 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3691 is true, in which case we can only assume that an access is aligned if
3692 it is to a constant address, or the address involves a LO_SUM. */
3696 /* An obviously unaligned address. */
3701 /* Vectors to keep interesting information about registers where it can easily
3702 be got. We used to use the actual mode value as the bit number, but there
3703 are more than 32 modes now. Instead we use two tables: one indexed by
3704 hard register number, and one indexed by mode. */
3706 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3707 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3708 mapped into one sparc_mode_class mode. */
3710 enum sparc_mode_class {
3711 S_MODE, D_MODE, T_MODE, O_MODE,
3712 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3716 /* Modes for single-word and smaller quantities. */
3717 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3719 /* Modes for double-word and smaller quantities. */
3720 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3722 /* Modes for quad-word and smaller quantities. */
3723 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3725 /* Modes for 8-word and smaller quantities. */
3726 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3728 /* Modes for single-float quantities. We must allow any single word or
3729 smaller quantity. This is because the fix/float conversion instructions
3730 take integer inputs/outputs from the float registers. */
3731 #define SF_MODES (S_MODES)
3733 /* Modes for double-float and smaller quantities. */
3734 #define DF_MODES (S_MODES | D_MODES)
3736 /* Modes for double-float only quantities. */
3737 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3739 /* Modes for quad-float only quantities. */
3740 #define TF_ONLY_MODES (1 << (int) TF_MODE)
3742 /* Modes for quad-float and smaller quantities. */
3743 #define TF_MODES (DF_MODES | TF_ONLY_MODES)
3745 /* Modes for quad-float and double-float quantities. */
3746 #define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
3748 /* Modes for quad-float pair only quantities. */
3749 #define OF_ONLY_MODES (1 << (int) OF_MODE)
3751 /* Modes for quad-float pairs and smaller quantities. */
3752 #define OF_MODES (TF_MODES | OF_ONLY_MODES)
3754 #define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
3756 /* Modes for condition codes. */
3757 #define CC_MODES (1 << (int) CC_MODE)
3758 #define CCFP_MODES (1 << (int) CCFP_MODE)
3760 /* Value is 1 if register/mode pair is acceptable on sparc.
3761 The funny mixture of D and T modes is because integer operations
3762 do not specially operate on tetra quantities, so non-quad-aligned
3763 registers can hold quadword quantities (except %o4 and %i4 because
3764 they cross fixed registers). */
3766 /* This points to either the 32 bit or the 64 bit version. */
3767 const int *hard_regno_mode_classes;
3769 static const int hard_32bit_mode_classes[] = {
3770 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3771 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3772 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3773 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3775 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3776 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3777 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3778 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3780 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3781 and none can hold SFmode/SImode values. */
3782 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3783 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3784 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3785 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3788 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3794 static const int hard_64bit_mode_classes[] = {
3795 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3796 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3797 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3798 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3800 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3801 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3802 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3803 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3805 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3806 and none can hold SFmode/SImode values. */
3807 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3808 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3809 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3810 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3813 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3819 int sparc_mode_class [NUM_MACHINE_MODES];
3821 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3824 sparc_init_modes (void)
3828 for (i = 0; i < NUM_MACHINE_MODES; i++)
3830 switch (GET_MODE_CLASS (i))
3833 case MODE_PARTIAL_INT:
3834 case MODE_COMPLEX_INT:
3835 if (GET_MODE_SIZE (i) <= 4)
3836 sparc_mode_class[i] = 1 << (int) S_MODE;
3837 else if (GET_MODE_SIZE (i) == 8)
3838 sparc_mode_class[i] = 1 << (int) D_MODE;
3839 else if (GET_MODE_SIZE (i) == 16)
3840 sparc_mode_class[i] = 1 << (int) T_MODE;
3841 else if (GET_MODE_SIZE (i) == 32)
3842 sparc_mode_class[i] = 1 << (int) O_MODE;
3844 sparc_mode_class[i] = 0;
3846 case MODE_VECTOR_INT:
3847 if (GET_MODE_SIZE (i) <= 4)
3848 sparc_mode_class[i] = 1 << (int)SF_MODE;
3849 else if (GET_MODE_SIZE (i) == 8)
3850 sparc_mode_class[i] = 1 << (int)DF_MODE;
3853 case MODE_COMPLEX_FLOAT:
3854 if (GET_MODE_SIZE (i) <= 4)
3855 sparc_mode_class[i] = 1 << (int) SF_MODE;
3856 else if (GET_MODE_SIZE (i) == 8)
3857 sparc_mode_class[i] = 1 << (int) DF_MODE;
3858 else if (GET_MODE_SIZE (i) == 16)
3859 sparc_mode_class[i] = 1 << (int) TF_MODE;
3860 else if (GET_MODE_SIZE (i) == 32)
3861 sparc_mode_class[i] = 1 << (int) OF_MODE;
3863 sparc_mode_class[i] = 0;
3866 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3867 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3869 sparc_mode_class[i] = 1 << (int) CC_MODE;
3872 sparc_mode_class[i] = 0;
3878 hard_regno_mode_classes = hard_64bit_mode_classes;
3880 hard_regno_mode_classes = hard_32bit_mode_classes;
3882 /* Initialize the array used by REGNO_REG_CLASS. */
3883 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3885 if (i < 16 && TARGET_V8PLUS)
3886 sparc_regno_reg_class[i] = I64_REGS;
3887 else if (i < 32 || i == FRAME_POINTER_REGNUM)
3888 sparc_regno_reg_class[i] = GENERAL_REGS;
3890 sparc_regno_reg_class[i] = FP_REGS;
3892 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3894 sparc_regno_reg_class[i] = FPCC_REGS;
3896 sparc_regno_reg_class[i] = NO_REGS;
3900 /* Compute the frame size required by the function. This function is called
3901 during the reload pass and also by sparc_expand_prologue. */
3904 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3906 int outgoing_args_size = (crtl->outgoing_args_size
3907 + REG_PARM_STACK_SPACE (current_function_decl));
3908 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
3913 for (i = 0; i < 8; i++)
3914 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3919 for (i = 0; i < 8; i += 2)
3920 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3921 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3925 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3926 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3927 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3930 /* Set up values for use in prologue and epilogue. */
3931 num_gfregs = n_regs;
3936 && crtl->outgoing_args_size == 0)
3937 actual_fsize = apparent_fsize = 0;
3940 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
3941 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3942 apparent_fsize += n_regs * 4;
3943 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3946 /* Make sure nothing can clobber our register windows.
3947 If a SAVE must be done, or there is a stack-local variable,
3948 the register window area must be allocated. */
3949 if (! leaf_function_p || size > 0)
3950 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3952 return SPARC_STACK_ALIGN (actual_fsize);
3955 /* Output any necessary .register pseudo-ops. */
3958 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3960 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3966 /* Check if %g[2367] were used without
3967 .register being printed for them already. */
3968 for (i = 2; i < 8; i++)
3970 if (df_regs_ever_live_p (i)
3971 && ! sparc_hard_reg_printed [i])
3973 sparc_hard_reg_printed [i] = 1;
3974 /* %g7 is used as TLS base register, use #ignore
3975 for it instead of #scratch. */
3976 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
3977 i == 7 ? "ignore" : "scratch");
3984 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3985 as needed. LOW should be double-word aligned for 32-bit registers.
3986 Return the new OFFSET. */
3989 #define SORR_RESTORE 1
3992 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3997 if (TARGET_ARCH64 && high <= 32)
3999 for (i = low; i < high; i++)
4001 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4003 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
4004 set_mem_alias_set (mem, sparc_sr_alias_set);
4005 if (action == SORR_SAVE)
4007 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4008 RTX_FRAME_RELATED_P (insn) = 1;
4010 else /* action == SORR_RESTORE */
4011 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4018 for (i = low; i < high; i += 2)
4020 bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i];
4021 bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1];
4022 enum machine_mode mode;
4027 mode = i < 32 ? DImode : DFmode;
4032 mode = i < 32 ? SImode : SFmode;
4037 mode = i < 32 ? SImode : SFmode;
4044 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
4045 set_mem_alias_set (mem, sparc_sr_alias_set);
4046 if (action == SORR_SAVE)
4048 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4049 RTX_FRAME_RELATED_P (insn) = 1;
4051 else /* action == SORR_RESTORE */
4052 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4054 /* Always preserve double-word alignment. */
4055 offset = (offset + 7) & -8;
4062 /* Emit code to save call-saved registers. */
4065 emit_save_or_restore_regs (int action)
4067 HOST_WIDE_INT offset;
4070 offset = frame_base_offset - apparent_fsize;
4072 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
4074 /* ??? This might be optimized a little as %g1 might already have a
4075 value close enough that a single add insn will do. */
4076 /* ??? Although, all of this is probably only a temporary fix
4077 because if %g1 can hold a function result, then
4078 sparc_expand_epilogue will lose (the result will be
4080 base = gen_rtx_REG (Pmode, 1);
4081 emit_move_insn (base, GEN_INT (offset));
4082 emit_insn (gen_rtx_SET (VOIDmode,
4084 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
4088 base = frame_base_reg;
4090 offset = save_or_restore_regs (0, 8, base, offset, action);
4091 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
4094 /* Generate a save_register_window insn. */
4097 gen_save_register_window (rtx increment)
4100 return gen_save_register_windowdi (increment);
4102 return gen_save_register_windowsi (increment);
4105 /* Generate an increment for the stack pointer. */
4108 gen_stack_pointer_inc (rtx increment)
4110 return gen_rtx_SET (VOIDmode,
4112 gen_rtx_PLUS (Pmode,
4117 /* Generate a decrement for the stack pointer. */
4120 gen_stack_pointer_dec (rtx decrement)
4122 return gen_rtx_SET (VOIDmode,
4124 gen_rtx_MINUS (Pmode,
4129 /* Expand the function prologue. The prologue is responsible for reserving
4130 storage for the frame, saving the call-saved registers and loading the
4131 PIC register if needed. */
4134 sparc_expand_prologue (void)
4139 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4140 on the final value of the flag means deferring the prologue/epilogue
4141 expansion until just before the second scheduling pass, which is too
4142 late to emit multiple epilogues or return insns.
4144 Of course we are making the assumption that the value of the flag
4145 will not change between now and its final value. Of the three parts
4146 of the formula, only the last one can reasonably vary. Let's take a
4147 closer look, after assuming that the first two ones are set to true
4148 (otherwise the last value is effectively silenced).
4150 If only_leaf_regs_used returns false, the global predicate will also
4151 be false so the actual frame size calculated below will be positive.
4152 As a consequence, the save_register_window insn will be emitted in
4153 the instruction stream; now this insn explicitly references %fp
4154 which is not a leaf register so only_leaf_regs_used will always
4155 return false subsequently.
4157 If only_leaf_regs_used returns true, we hope that the subsequent
4158 optimization passes won't cause non-leaf registers to pop up. For
4159 example, the regrename pass has special provisions to not rename to
4160 non-leaf registers in a leaf function. */
4161 sparc_leaf_function_p
4162 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
4164 /* Need to use actual_fsize, since we are also allocating
4165 space for our callee (and our own register save area). */
4167 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4169 /* Advertise that the data calculated just above are now valid. */
4170 sparc_prologue_data_valid_p = true;
4172 if (sparc_leaf_function_p)
4174 frame_base_reg = stack_pointer_rtx;
4175 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4179 frame_base_reg = hard_frame_pointer_rtx;
4180 frame_base_offset = SPARC_STACK_BIAS;
4183 if (actual_fsize == 0)
4185 else if (sparc_leaf_function_p)
4187 if (actual_fsize <= 4096)
4188 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4189 else if (actual_fsize <= 8192)
4191 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4192 /* %sp is still the CFA register. */
4193 RTX_FRAME_RELATED_P (insn) = 1;
4195 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4199 rtx reg = gen_rtx_REG (Pmode, 1);
4200 emit_move_insn (reg, GEN_INT (-actual_fsize));
4201 insn = emit_insn (gen_stack_pointer_inc (reg));
4202 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4203 gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4206 RTX_FRAME_RELATED_P (insn) = 1;
4210 if (actual_fsize <= 4096)
4211 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4212 else if (actual_fsize <= 8192)
4214 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
4215 /* %sp is not the CFA register anymore. */
4216 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4220 rtx reg = gen_rtx_REG (Pmode, 1);
4221 emit_move_insn (reg, GEN_INT (-actual_fsize));
4222 insn = emit_insn (gen_save_register_window (reg));
4225 RTX_FRAME_RELATED_P (insn) = 1;
4226 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4227 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4231 emit_save_or_restore_regs (SORR_SAVE);
4233 /* Load the PIC register if needed. */
4234 if (flag_pic && crtl->uses_pic_offset_table)
4235 load_pic_register (false);
4238 /* This function generates the assembly code for function entry, which boils
4239 down to emitting the necessary .register directives. */
4242 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4244 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4245 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4247 sparc_output_scratch_registers (file);
4250 /* Expand the function epilogue, either normal or part of a sibcall.
4251 We emit all the instructions except the return or the call. */
4254 sparc_expand_epilogue (void)
4257 emit_save_or_restore_regs (SORR_RESTORE);
4259 if (actual_fsize == 0)
4261 else if (sparc_leaf_function_p)
4263 if (actual_fsize <= 4096)
4264 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4265 else if (actual_fsize <= 8192)
4267 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4268 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4272 rtx reg = gen_rtx_REG (Pmode, 1);
4273 emit_move_insn (reg, GEN_INT (-actual_fsize));
4274 emit_insn (gen_stack_pointer_dec (reg));
4279 /* Return true if it is appropriate to emit `return' instructions in the
4280 body of a function. */
4283 sparc_can_use_return_insn_p (void)
4285 return sparc_prologue_data_valid_p
4286 && (actual_fsize == 0 || !sparc_leaf_function_p);
4289 /* This function generates the assembly code for function exit. */
4292 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4294 /* If code does not drop into the epilogue, we have to still output
4295 a dummy nop for the sake of sane backtraces. Otherwise, if the
4296 last two instructions of a function were "call foo; dslot;" this
4297 can make the return PC of foo (i.e. address of call instruction
4298 plus 8) point to the first instruction in the next function. */
4300 rtx insn, last_real_insn;
4302 insn = get_last_insn ();
4304 last_real_insn = prev_real_insn (insn);
4306 && GET_CODE (last_real_insn) == INSN
4307 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4308 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4310 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4311 fputs("\tnop\n", file);
4313 sparc_output_deferred_case_vectors ();
4316 /* Output a 'restore' instruction. */
4319 output_restore (rtx pat)
4325 fputs ("\t restore\n", asm_out_file);
4329 gcc_assert (GET_CODE (pat) == SET);
4331 operands[0] = SET_DEST (pat);
4332 pat = SET_SRC (pat);
4334 switch (GET_CODE (pat))
4337 operands[1] = XEXP (pat, 0);
4338 operands[2] = XEXP (pat, 1);
4339 output_asm_insn (" restore %r1, %2, %Y0", operands);
4342 operands[1] = XEXP (pat, 0);
4343 operands[2] = XEXP (pat, 1);
4344 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4347 operands[1] = XEXP (pat, 0);
4348 gcc_assert (XEXP (pat, 1) == const1_rtx);
4349 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4353 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4358 /* Output a return. */
4361 output_return (rtx insn)
4363 if (sparc_leaf_function_p)
4365 /* This is a leaf function so we don't have to bother restoring the
4366 register window, which frees us from dealing with the convoluted
4367 semantics of restore/return. We simply output the jump to the
4368 return address and the insn in the delay slot (if any). */
4370 gcc_assert (! crtl->calls_eh_return);
4372 return "jmp\t%%o7+%)%#";
4376 /* This is a regular function so we have to restore the register window.
4377 We may have a pending insn for the delay slot, which will be either
4378 combined with the 'restore' instruction or put in the delay slot of
4379 the 'return' instruction. */
4381 if (crtl->calls_eh_return)
4383 /* If the function uses __builtin_eh_return, the eh_return
4384 machinery occupies the delay slot. */
4385 gcc_assert (! final_sequence);
4387 if (! flag_delayed_branch)
4388 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4391 fputs ("\treturn\t%i7+8\n", asm_out_file);
4393 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4395 if (flag_delayed_branch)
4396 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4398 fputs ("\t nop\n", asm_out_file);
4400 else if (final_sequence)
4404 delay = NEXT_INSN (insn);
4407 pat = PATTERN (delay);
4409 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4411 epilogue_renumber (&pat, 0);
4412 return "return\t%%i7+%)%#";
4416 output_asm_insn ("jmp\t%%i7+%)", NULL);
4417 output_restore (pat);
4418 PATTERN (delay) = gen_blockage ();
4419 INSN_CODE (delay) = -1;
4424 /* The delay slot is empty. */
4426 return "return\t%%i7+%)\n\t nop";
4427 else if (flag_delayed_branch)
4428 return "jmp\t%%i7+%)\n\t restore";
4430 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4437 /* Output a sibling call. */
4440 output_sibcall (rtx insn, rtx call_operand)
4444 gcc_assert (flag_delayed_branch);
4446 operands[0] = call_operand;
4448 if (sparc_leaf_function_p)
4450 /* This is a leaf function so we don't have to bother restoring the
4451 register window. We simply output the jump to the function and
4452 the insn in the delay slot (if any). */
4454 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4457 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4460 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4461 it into branch if possible. */
4462 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4467 /* This is a regular function so we have to restore the register window.
4468 We may have a pending insn for the delay slot, which will be combined
4469 with the 'restore' instruction. */
4471 output_asm_insn ("call\t%a0, 0", operands);
4475 rtx delay = NEXT_INSN (insn);
4478 output_restore (PATTERN (delay));
4480 PATTERN (delay) = gen_blockage ();
4481 INSN_CODE (delay) = -1;
4484 output_restore (NULL_RTX);
4490 /* Functions for handling argument passing.
4492 For 32-bit, the first 6 args are normally in registers and the rest are
4493 pushed. Any arg that starts within the first 6 words is at least
4494 partially passed in a register unless its data type forbids.
4496 For 64-bit, the argument registers are laid out as an array of 16 elements
4497 and arguments are added sequentially. The first 6 int args and up to the
4498 first 16 fp args (depending on size) are passed in regs.
4500 Slot Stack Integral Float Float in structure Double Long Double
4501 ---- ----- -------- ----- ------------------ ------ -----------
4502 15 [SP+248] %f31 %f30,%f31 %d30
4503 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4504 13 [SP+232] %f27 %f26,%f27 %d26
4505 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4506 11 [SP+216] %f23 %f22,%f23 %d22
4507 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4508 9 [SP+200] %f19 %f18,%f19 %d18
4509 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4510 7 [SP+184] %f15 %f14,%f15 %d14
4511 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4512 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4513 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4514 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4515 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4516 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4517 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4519 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4521 Integral arguments are always passed as 64-bit quantities appropriately
4524 Passing of floating point values is handled as follows.
4525 If a prototype is in scope:
4526 If the value is in a named argument (i.e. not a stdarg function or a
4527 value not part of the `...') then the value is passed in the appropriate
4529 If the value is part of the `...' and is passed in one of the first 6
4530 slots then the value is passed in the appropriate int reg.
4531 If the value is part of the `...' and is not passed in one of the first 6
4532 slots then the value is passed in memory.
4533 If a prototype is not in scope:
4534 If the value is one of the first 6 arguments the value is passed in the
4535 appropriate integer reg and the appropriate fp reg.
4536 If the value is not one of the first 6 arguments the value is passed in
4537 the appropriate fp reg and in memory.
4540 Summary of the calling conventions implemented by GCC on SPARC:
4543 size argument return value
4545 small integer <4 int. reg. int. reg.
4546 word 4 int. reg. int. reg.
4547 double word 8 int. reg. int. reg.
4549 _Complex small integer <8 int. reg. int. reg.
4550 _Complex word 8 int. reg. int. reg.
4551 _Complex double word 16 memory int. reg.
4553 vector integer <=8 int. reg. FP reg.
4554 vector integer >8 memory memory
4556 float 4 int. reg. FP reg.
4557 double 8 int. reg. FP reg.
4558 long double 16 memory memory
4560 _Complex float 8 memory FP reg.
4561 _Complex double 16 memory FP reg.
4562 _Complex long double 32 memory FP reg.
4564 vector float any memory memory
4566 aggregate any memory memory
4571 size argument return value
4573 small integer <8 int. reg. int. reg.
4574 word 8 int. reg. int. reg.
4575 double word 16 int. reg. int. reg.
4577 _Complex small integer <16 int. reg. int. reg.
4578 _Complex word 16 int. reg. int. reg.
4579 _Complex double word 32 memory int. reg.
4581 vector integer <=16 FP reg. FP reg.
4582 vector integer 16<s<=32 memory FP reg.
4583 vector integer >32 memory memory
4585 float 4 FP reg. FP reg.
4586 double 8 FP reg. FP reg.
4587 long double 16 FP reg. FP reg.
4589 _Complex float 8 FP reg. FP reg.
4590 _Complex double 16 FP reg. FP reg.
4591 _Complex long double 32 memory FP reg.
4593 vector float <=16 FP reg. FP reg.
4594 vector float 16<s<=32 memory FP reg.
4595 vector float >32 memory memory
4597 aggregate <=16 reg. reg.
4598 aggregate 16<s<=32 memory reg.
4599 aggregate >32 memory memory
4603 Note #1: complex floating-point types follow the extended SPARC ABIs as
4604 implemented by the Sun compiler.
4606 Note #2: integral vector types follow the scalar floating-point types
4607 conventions to match what is implemented by the Sun VIS SDK.
4609 Note #3: floating-point vector types follow the aggregate types
4613 /* Maximum number of int regs for args. */
4614 #define SPARC_INT_ARG_MAX 6
4615 /* Maximum number of fp regs for args. */
4616 #define SPARC_FP_ARG_MAX 16
4618 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4620 /* Handle the INIT_CUMULATIVE_ARGS macro.
4621 Initialize a variable CUM of type CUMULATIVE_ARGS
4622 for a call to a function whose data type is FNTYPE.
4623 For a library call, FNTYPE is 0. */
4626 init_cumulative_args (struct sparc_args *cum, tree fntype,
4627 rtx libname ATTRIBUTE_UNUSED,
4628 tree fndecl ATTRIBUTE_UNUSED)
4631 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4632 cum->libcall_p = fntype == 0;
4635 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4636 When a prototype says `char' or `short', really pass an `int'. */
4639 sparc_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED)
4641 return TARGET_ARCH32 ? true : false;
4644 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4647 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4649 return TARGET_ARCH64 ? true : false;
4652 /* Scan the record type TYPE and return the following predicates:
4653 - INTREGS_P: the record contains at least one field or sub-field
4654 that is eligible for promotion in integer registers.
4655 - FP_REGS_P: the record contains at least one field or sub-field
4656 that is eligible for promotion in floating-point registers.
4657 - PACKED_P: the record contains at least one field that is packed.
4659 Sub-fields are not taken into account for the PACKED_P predicate. */
4662 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4666 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4668 if (TREE_CODE (field) == FIELD_DECL)
4670 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4671 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4672 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4673 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4679 if (packed_p && DECL_PACKED (field))
4685 /* Compute the slot number to pass an argument in.
4686 Return the slot number or -1 if passing on the stack.
4688 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4689 the preceding args and about the function being called.
4690 MODE is the argument's machine mode.
4691 TYPE is the data type of the argument (as a tree).
4692 This is null for libcalls where that information may
4694 NAMED is nonzero if this argument is a named parameter
4695 (otherwise it is an extra parameter matching an ellipsis).
4696 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4697 *PREGNO records the register number to use if scalar type.
4698 *PPADDING records the amount of padding needed in words. */
4701 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4702 tree type, int named, int incoming_p,
4703 int *pregno, int *ppadding)
4705 int regbase = (incoming_p
4706 ? SPARC_INCOMING_INT_ARG_FIRST
4707 : SPARC_OUTGOING_INT_ARG_FIRST);
4708 int slotno = cum->words;
4709 enum mode_class mclass;
4714 if (type && TREE_ADDRESSABLE (type))
4720 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4723 /* For SPARC64, objects requiring 16-byte alignment get it. */
4725 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4726 && (slotno & 1) != 0)
4727 slotno++, *ppadding = 1;
4729 mclass = GET_MODE_CLASS (mode);
4730 if (type && TREE_CODE (type) == VECTOR_TYPE)
4732 /* Vector types deserve special treatment because they are
4733 polymorphic wrt their mode, depending upon whether VIS
4734 instructions are enabled. */
4735 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4737 /* The SPARC port defines no floating-point vector modes. */
4738 gcc_assert (mode == BLKmode);
4742 /* Integral vector types should either have a vector
4743 mode or an integral mode, because we are guaranteed
4744 by pass_by_reference that their size is not greater
4745 than 16 bytes and TImode is 16-byte wide. */
4746 gcc_assert (mode != BLKmode);
4748 /* Vector integers are handled like floats according to
4750 mclass = MODE_FLOAT;
4757 case MODE_COMPLEX_FLOAT:
4758 case MODE_VECTOR_INT:
4759 if (TARGET_ARCH64 && TARGET_FPU && named)
4761 if (slotno >= SPARC_FP_ARG_MAX)
4763 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4764 /* Arguments filling only one single FP register are
4765 right-justified in the outer double FP register. */
4766 if (GET_MODE_SIZE (mode) <= 4)
4773 case MODE_COMPLEX_INT:
4774 if (slotno >= SPARC_INT_ARG_MAX)
4776 regno = regbase + slotno;
4780 if (mode == VOIDmode)
4781 /* MODE is VOIDmode when generating the actual call. */
4784 gcc_assert (mode == BLKmode);
4788 || (TREE_CODE (type) != VECTOR_TYPE
4789 && TREE_CODE (type) != RECORD_TYPE))
4791 if (slotno >= SPARC_INT_ARG_MAX)
4793 regno = regbase + slotno;
4795 else /* TARGET_ARCH64 && type */
4797 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4799 /* First see what kinds of registers we would need. */
4800 if (TREE_CODE (type) == VECTOR_TYPE)
4803 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4805 /* The ABI obviously doesn't specify how packed structures
4806 are passed. These are defined to be passed in int regs
4807 if possible, otherwise memory. */
4808 if (packed_p || !named)
4809 fpregs_p = 0, intregs_p = 1;
4811 /* If all arg slots are filled, then must pass on stack. */
4812 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4815 /* If there are only int args and all int arg slots are filled,
4816 then must pass on stack. */
4817 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4820 /* Note that even if all int arg slots are filled, fp members may
4821 still be passed in regs if such regs are available.
4822 *PREGNO isn't set because there may be more than one, it's up
4823 to the caller to compute them. */
4836 /* Handle recursive register counting for structure field layout. */
4838 struct function_arg_record_value_parms
4840 rtx ret; /* return expression being built. */
4841 int slotno; /* slot number of the argument. */
4842 int named; /* whether the argument is named. */
4843 int regbase; /* regno of the base register. */
4844 int stack; /* 1 if part of the argument is on the stack. */
4845 int intoffset; /* offset of the first pending integer field. */
4846 unsigned int nregs; /* number of words passed in registers. */
4849 static void function_arg_record_value_3
4850 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4851 static void function_arg_record_value_2
4852 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4853 static void function_arg_record_value_1
4854 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4855 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
4856 static rtx function_arg_union_value (int, enum machine_mode, int, int);
4858 /* A subroutine of function_arg_record_value. Traverse the structure
4859 recursively and determine how many registers will be required. */
4862 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
4863 struct function_arg_record_value_parms *parms,
4868 /* We need to compute how many registers are needed so we can
4869 allocate the PARALLEL but before we can do that we need to know
4870 whether there are any packed fields. The ABI obviously doesn't
4871 specify how structures are passed in this case, so they are
4872 defined to be passed in int regs if possible, otherwise memory,
4873 regardless of whether there are fp values present. */
4876 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4878 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4885 /* Compute how many registers we need. */
4886 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4888 if (TREE_CODE (field) == FIELD_DECL)
4890 HOST_WIDE_INT bitpos = startbitpos;
4892 if (DECL_SIZE (field) != 0)
4894 if (integer_zerop (DECL_SIZE (field)))
4897 if (host_integerp (bit_position (field), 1))
4898 bitpos += int_bit_position (field);
4901 /* ??? FIXME: else assume zero offset. */
4903 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4904 function_arg_record_value_1 (TREE_TYPE (field),
4908 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4909 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4914 if (parms->intoffset != -1)
4916 unsigned int startbit, endbit;
4917 int intslots, this_slotno;
4919 startbit = parms->intoffset & -BITS_PER_WORD;
4920 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4922 intslots = (endbit - startbit) / BITS_PER_WORD;
4923 this_slotno = parms->slotno + parms->intoffset
4926 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4928 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4929 /* We need to pass this field on the stack. */
4933 parms->nregs += intslots;
4934 parms->intoffset = -1;
4937 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4938 If it wasn't true we wouldn't be here. */
4939 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4940 && DECL_MODE (field) == BLKmode)
4941 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4942 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4949 if (parms->intoffset == -1)
4950 parms->intoffset = bitpos;
4956 /* A subroutine of function_arg_record_value. Assign the bits of the
4957 structure between parms->intoffset and bitpos to integer registers. */
4960 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4961 struct function_arg_record_value_parms *parms)
4963 enum machine_mode mode;
4965 unsigned int startbit, endbit;
4966 int this_slotno, intslots, intoffset;
4969 if (parms->intoffset == -1)
4972 intoffset = parms->intoffset;
4973 parms->intoffset = -1;
4975 startbit = intoffset & -BITS_PER_WORD;
4976 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4977 intslots = (endbit - startbit) / BITS_PER_WORD;
4978 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
4980 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4984 /* If this is the trailing part of a word, only load that much into
4985 the register. Otherwise load the whole register. Note that in
4986 the latter case we may pick up unwanted bits. It's not a problem
4987 at the moment but may wish to revisit. */
4989 if (intoffset % BITS_PER_WORD != 0)
4990 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
4995 intoffset /= BITS_PER_UNIT;
4998 regno = parms->regbase + this_slotno;
4999 reg = gen_rtx_REG (mode, regno);
5000 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5001 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5004 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5009 while (intslots > 0);
5012 /* A subroutine of function_arg_record_value. Traverse the structure
5013 recursively and assign bits to floating point registers. Track which
5014 bits in between need integer registers; invoke function_arg_record_value_3
5015 to make that happen. */
5018 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
5019 struct function_arg_record_value_parms *parms,
5025 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5027 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5034 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5036 if (TREE_CODE (field) == FIELD_DECL)
5038 HOST_WIDE_INT bitpos = startbitpos;
5040 if (DECL_SIZE (field) != 0)
5042 if (integer_zerop (DECL_SIZE (field)))
5045 if (host_integerp (bit_position (field), 1))
5046 bitpos += int_bit_position (field);
5049 /* ??? FIXME: else assume zero offset. */
5051 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5052 function_arg_record_value_2 (TREE_TYPE (field),
5056 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5057 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5062 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5063 int regno, nregs, pos;
5064 enum machine_mode mode = DECL_MODE (field);
5067 function_arg_record_value_3 (bitpos, parms);
5069 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5072 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5073 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5075 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5077 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5083 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5084 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5086 reg = gen_rtx_REG (mode, regno);
5087 pos = bitpos / BITS_PER_UNIT;
5088 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5089 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5093 regno += GET_MODE_SIZE (mode) / 4;
5094 reg = gen_rtx_REG (mode, regno);
5095 pos += GET_MODE_SIZE (mode);
5096 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5097 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5103 if (parms->intoffset == -1)
5104 parms->intoffset = bitpos;
5110 /* Used by function_arg and function_value to implement the complex
5111 conventions of the 64-bit ABI for passing and returning structures.
5112 Return an expression valid as a return value for the two macros
5113 FUNCTION_ARG and FUNCTION_VALUE.
5115 TYPE is the data type of the argument (as a tree).
5116 This is null for libcalls where that information may
5118 MODE is the argument's machine mode.
5119 SLOTNO is the index number of the argument's slot in the parameter array.
5120 NAMED is nonzero if this argument is a named parameter
5121 (otherwise it is an extra parameter matching an ellipsis).
5122 REGBASE is the regno of the base register for the parameter array. */
5125 function_arg_record_value (const_tree type, enum machine_mode mode,
5126 int slotno, int named, int regbase)
5128 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5129 struct function_arg_record_value_parms parms;
5132 parms.ret = NULL_RTX;
5133 parms.slotno = slotno;
5134 parms.named = named;
5135 parms.regbase = regbase;
5138 /* Compute how many registers we need. */
5140 parms.intoffset = 0;
5141 function_arg_record_value_1 (type, 0, &parms, false);
5143 /* Take into account pending integer fields. */
5144 if (parms.intoffset != -1)
5146 unsigned int startbit, endbit;
5147 int intslots, this_slotno;
5149 startbit = parms.intoffset & -BITS_PER_WORD;
5150 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5151 intslots = (endbit - startbit) / BITS_PER_WORD;
5152 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5154 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5156 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5157 /* We need to pass this field on the stack. */
5161 parms.nregs += intslots;
5163 nregs = parms.nregs;
5165 /* Allocate the vector and handle some annoying special cases. */
5168 /* ??? Empty structure has no value? Duh? */
5171 /* Though there's nothing really to store, return a word register
5172 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5173 leads to breakage due to the fact that there are zero bytes to
5175 return gen_rtx_REG (mode, regbase);
5179 /* ??? C++ has structures with no fields, and yet a size. Give up
5180 for now and pass everything back in integer registers. */
5181 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5183 if (nregs + slotno > SPARC_INT_ARG_MAX)
5184 nregs = SPARC_INT_ARG_MAX - slotno;
5186 gcc_assert (nregs != 0);
5188 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5190 /* If at least one field must be passed on the stack, generate
5191 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5192 also be passed on the stack. We can't do much better because the
5193 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5194 of structures for which the fields passed exclusively in registers
5195 are not at the beginning of the structure. */
5197 XVECEXP (parms.ret, 0, 0)
5198 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5200 /* Fill in the entries. */
5202 parms.intoffset = 0;
5203 function_arg_record_value_2 (type, 0, &parms, false);
5204 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5206 gcc_assert (parms.nregs == nregs);
5211 /* Used by function_arg and function_value to implement the conventions
5212 of the 64-bit ABI for passing and returning unions.
5213 Return an expression valid as a return value for the two macros
5214 FUNCTION_ARG and FUNCTION_VALUE.
5216 SIZE is the size in bytes of the union.
5217 MODE is the argument's machine mode.
5218 REGNO is the hard register the union will be passed in. */
5221 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5224 int nwords = ROUND_ADVANCE (size), i;
5227 /* See comment in previous function for empty structures. */
5229 return gen_rtx_REG (mode, regno);
5231 if (slotno == SPARC_INT_ARG_MAX - 1)
5234 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5236 for (i = 0; i < nwords; i++)
5238 /* Unions are passed left-justified. */
5239 XVECEXP (regs, 0, i)
5240 = gen_rtx_EXPR_LIST (VOIDmode,
5241 gen_rtx_REG (word_mode, regno),
5242 GEN_INT (UNITS_PER_WORD * i));
5249 /* Used by function_arg and function_value to implement the conventions
5250 for passing and returning large (BLKmode) vectors.
5251 Return an expression valid as a return value for the two macros
5252 FUNCTION_ARG and FUNCTION_VALUE.
5254 SIZE is the size in bytes of the vector (at least 8 bytes).
5255 REGNO is the FP hard register the vector will be passed in. */
5258 function_arg_vector_value (int size, int regno)
5260 int i, nregs = size / 8;
5263 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5265 for (i = 0; i < nregs; i++)
5267 XVECEXP (regs, 0, i)
5268 = gen_rtx_EXPR_LIST (VOIDmode,
5269 gen_rtx_REG (DImode, regno + 2*i),
5276 /* Handle the FUNCTION_ARG macro.
5277 Determine where to put an argument to a function.
5278 Value is zero to push the argument on the stack,
5279 or a hard register in which to store the argument.
5281 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5282 the preceding args and about the function being called.
5283 MODE is the argument's machine mode.
5284 TYPE is the data type of the argument (as a tree).
5285 This is null for libcalls where that information may
5287 NAMED is nonzero if this argument is a named parameter
5288 (otherwise it is an extra parameter matching an ellipsis).
5289 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5292 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5293 tree type, int named, int incoming_p)
5295 int regbase = (incoming_p
5296 ? SPARC_INCOMING_INT_ARG_FIRST
5297 : SPARC_OUTGOING_INT_ARG_FIRST);
5298 int slotno, regno, padding;
5299 enum mode_class mclass = GET_MODE_CLASS (mode);
5301 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5306 /* Vector types deserve special treatment because they are polymorphic wrt
5307 their mode, depending upon whether VIS instructions are enabled. */
5308 if (type && TREE_CODE (type) == VECTOR_TYPE)
5310 HOST_WIDE_INT size = int_size_in_bytes (type);
5311 gcc_assert ((TARGET_ARCH32 && size <= 8)
5312 || (TARGET_ARCH64 && size <= 16));
5314 if (mode == BLKmode)
5315 return function_arg_vector_value (size,
5316 SPARC_FP_ARG_FIRST + 2*slotno);
5318 mclass = MODE_FLOAT;
5322 return gen_rtx_REG (mode, regno);
5324 /* Structures up to 16 bytes in size are passed in arg slots on the stack
5325 and are promoted to registers if possible. */
5326 if (type && TREE_CODE (type) == RECORD_TYPE)
5328 HOST_WIDE_INT size = int_size_in_bytes (type);
5329 gcc_assert (size <= 16);
5331 return function_arg_record_value (type, mode, slotno, named, regbase);
5334 /* Unions up to 16 bytes in size are passed in integer registers. */
5335 else if (type && TREE_CODE (type) == UNION_TYPE)
5337 HOST_WIDE_INT size = int_size_in_bytes (type);
5338 gcc_assert (size <= 16);
5340 return function_arg_union_value (size, mode, slotno, regno);
5343 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5344 but also have the slot allocated for them.
5345 If no prototype is in scope fp values in register slots get passed
5346 in two places, either fp regs and int regs or fp regs and memory. */
5347 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5348 && SPARC_FP_REG_P (regno))
5350 rtx reg = gen_rtx_REG (mode, regno);
5351 if (cum->prototype_p || cum->libcall_p)
5353 /* "* 2" because fp reg numbers are recorded in 4 byte
5356 /* ??? This will cause the value to be passed in the fp reg and
5357 in the stack. When a prototype exists we want to pass the
5358 value in the reg but reserve space on the stack. That's an
5359 optimization, and is deferred [for a bit]. */
5360 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5361 return gen_rtx_PARALLEL (mode,
5363 gen_rtx_EXPR_LIST (VOIDmode,
5364 NULL_RTX, const0_rtx),
5365 gen_rtx_EXPR_LIST (VOIDmode,
5369 /* ??? It seems that passing back a register even when past
5370 the area declared by REG_PARM_STACK_SPACE will allocate
5371 space appropriately, and will not copy the data onto the
5372 stack, exactly as we desire.
5374 This is due to locate_and_pad_parm being called in
5375 expand_call whenever reg_parm_stack_space > 0, which
5376 while beneficial to our example here, would seem to be
5377 in error from what had been intended. Ho hum... -- r~ */
5385 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5389 /* On incoming, we don't need to know that the value
5390 is passed in %f0 and %i0, and it confuses other parts
5391 causing needless spillage even on the simplest cases. */
5395 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5396 + (regno - SPARC_FP_ARG_FIRST) / 2);
5398 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5399 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5401 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5405 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5406 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5407 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5412 /* All other aggregate types are passed in an integer register in a mode
5413 corresponding to the size of the type. */
5414 else if (type && AGGREGATE_TYPE_P (type))
5416 HOST_WIDE_INT size = int_size_in_bytes (type);
5417 gcc_assert (size <= 16);
5419 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5422 return gen_rtx_REG (mode, regno);
5425 /* For an arg passed partly in registers and partly in memory,
5426 this is the number of bytes of registers used.
5427 For args passed entirely in registers or entirely in memory, zero.
5429 Any arg that starts in the first 6 regs but won't entirely fit in them
5430 needs partial registers on v8. On v9, structures with integer
5431 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5432 values that begin in the last fp reg [where "last fp reg" varies with the
5433 mode] will be split between that reg and memory. */
5436 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5437 tree type, bool named)
5439 int slotno, regno, padding;
5441 /* We pass 0 for incoming_p here, it doesn't matter. */
5442 slotno = function_arg_slotno (cum, mode, type, named, 0, ®no, &padding);
5449 if ((slotno + (mode == BLKmode
5450 ? ROUND_ADVANCE (int_size_in_bytes (type))
5451 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5452 > SPARC_INT_ARG_MAX)
5453 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5457 /* We are guaranteed by pass_by_reference that the size of the
5458 argument is not greater than 16 bytes, so we only need to return
5459 one word if the argument is partially passed in registers. */
5461 if (type && AGGREGATE_TYPE_P (type))
5463 int size = int_size_in_bytes (type);
5465 if (size > UNITS_PER_WORD
5466 && slotno == SPARC_INT_ARG_MAX - 1)
5467 return UNITS_PER_WORD;
5469 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5470 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5471 && ! (TARGET_FPU && named)))
5473 /* The complex types are passed as packed types. */
5474 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5475 && slotno == SPARC_INT_ARG_MAX - 1)
5476 return UNITS_PER_WORD;
5478 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5480 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5482 return UNITS_PER_WORD;
5489 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5490 Specify whether to pass the argument by reference. */
5493 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5494 enum machine_mode mode, const_tree type,
5495 bool named ATTRIBUTE_UNUSED)
5498 /* Original SPARC 32-bit ABI says that structures and unions,
5499 and quad-precision floats are passed by reference. For Pascal,
5500 also pass arrays by reference. All other base types are passed
5503 Extended ABI (as implemented by the Sun compiler) says that all
5504 complex floats are passed by reference. Pass complex integers
5505 in registers up to 8 bytes. More generally, enforce the 2-word
5506 cap for passing arguments in registers.
5508 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5509 integers are passed like floats of the same size, that is in
5510 registers up to 8 bytes. Pass all vector floats by reference
5511 like structure and unions. */
5512 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5514 /* Catch CDImode, TFmode, DCmode and TCmode. */
5515 || GET_MODE_SIZE (mode) > 8
5517 && TREE_CODE (type) == VECTOR_TYPE
5518 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5520 /* Original SPARC 64-bit ABI says that structures and unions
5521 smaller than 16 bytes are passed in registers, as well as
5522 all other base types.
5524 Extended ABI (as implemented by the Sun compiler) says that
5525 complex floats are passed in registers up to 16 bytes. Pass
5526 all complex integers in registers up to 16 bytes. More generally,
5527 enforce the 2-word cap for passing arguments in registers.
5529 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5530 integers are passed like floats of the same size, that is in
5531 registers (up to 16 bytes). Pass all vector floats like structure
5534 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5535 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5536 /* Catch CTImode and TCmode. */
5537 || GET_MODE_SIZE (mode) > 16);
5540 /* Handle the FUNCTION_ARG_ADVANCE macro.
5541 Update the data in CUM to advance over an argument
5542 of mode MODE and data type TYPE.
5543 TYPE is null for libcalls where that information may not be available. */
5546 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5547 tree type, int named)
5549 int slotno, regno, padding;
5551 /* We pass 0 for incoming_p here, it doesn't matter. */
5552 slotno = function_arg_slotno (cum, mode, type, named, 0, ®no, &padding);
5554 /* If register required leading padding, add it. */
5556 cum->words += padding;
5560 cum->words += (mode != BLKmode
5561 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5562 : ROUND_ADVANCE (int_size_in_bytes (type)));
5566 if (type && AGGREGATE_TYPE_P (type))
5568 int size = int_size_in_bytes (type);
5572 else if (size <= 16)
5574 else /* passed by reference */
5579 cum->words += (mode != BLKmode
5580 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5581 : ROUND_ADVANCE (int_size_in_bytes (type)));
5586 /* Handle the FUNCTION_ARG_PADDING macro.
5587 For the 64 bit ABI structs are always stored left shifted in their
5591 function_arg_padding (enum machine_mode mode, const_tree type)
5593 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5596 /* Fall back to the default. */
5597 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5600 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5601 Specify whether to return the return value in memory. */
5604 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5607 /* Original SPARC 32-bit ABI says that structures and unions,
5608 and quad-precision floats are returned in memory. All other
5609 base types are returned in registers.
5611 Extended ABI (as implemented by the Sun compiler) says that
5612 all complex floats are returned in registers (8 FP registers
5613 at most for '_Complex long double'). Return all complex integers
5614 in registers (4 at most for '_Complex long long').
5616 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5617 integers are returned like floats of the same size, that is in
5618 registers up to 8 bytes and in memory otherwise. Return all
5619 vector floats in memory like structure and unions; note that
5620 they always have BLKmode like the latter. */
5621 return (TYPE_MODE (type) == BLKmode
5622 || TYPE_MODE (type) == TFmode
5623 || (TREE_CODE (type) == VECTOR_TYPE
5624 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5626 /* Original SPARC 64-bit ABI says that structures and unions
5627 smaller than 32 bytes are returned in registers, as well as
5628 all other base types.
5630 Extended ABI (as implemented by the Sun compiler) says that all
5631 complex floats are returned in registers (8 FP registers at most
5632 for '_Complex long double'). Return all complex integers in
5633 registers (4 at most for '_Complex TItype').
5635 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5636 integers are returned like floats of the same size, that is in
5637 registers. Return all vector floats like structure and unions;
5638 note that they always have BLKmode like the latter. */
5639 return ((TYPE_MODE (type) == BLKmode
5640 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5643 /* Handle the TARGET_STRUCT_VALUE target hook.
5644 Return where to find the structure return value address. */
5647 sparc_struct_value_rtx (tree fndecl, int incoming)
5656 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5657 STRUCT_VALUE_OFFSET));
5659 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5660 STRUCT_VALUE_OFFSET));
5662 /* Only follow the SPARC ABI for fixed-size structure returns.
5663 Variable size structure returns are handled per the normal
5664 procedures in GCC. This is enabled by -mstd-struct-return */
5666 && sparc_std_struct_return
5667 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
5668 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
5670 /* We must check and adjust the return address, as it is
5671 optional as to whether the return object is really
5673 rtx ret_rtx = gen_rtx_REG (Pmode, 31);
5674 rtx scratch = gen_reg_rtx (SImode);
5675 rtx endlab = gen_label_rtx ();
5677 /* Calculate the return object size */
5678 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
5679 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
5680 /* Construct a temporary return value */
5681 rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
5683 /* Implement SPARC 32-bit psABI callee returns struck checking
5686 Fetch the instruction where we will return to and see if
5687 it's an unimp instruction (the most significant 10 bits
5689 emit_move_insn (scratch, gen_rtx_MEM (SImode,
5690 plus_constant (ret_rtx, 8)));
5691 /* Assume the size is valid and pre-adjust */
5692 emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5693 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
5694 emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5695 /* Assign stack temp:
5696 Write the address of the memory pointed to by temp_val into
5697 the memory pointed to by mem */
5698 emit_move_insn (mem, XEXP (temp_val, 0));
5699 emit_label (endlab);
5702 set_mem_alias_set (mem, struct_value_alias_set);
5707 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5708 For v9, function return values are subject to the same rules as arguments,
5709 except that up to 32 bytes may be returned in registers. */
5712 function_value (const_tree type, enum machine_mode mode, int incoming_p)
5714 /* Beware that the two values are swapped here wrt function_arg. */
5715 int regbase = (incoming_p
5716 ? SPARC_OUTGOING_INT_ARG_FIRST
5717 : SPARC_INCOMING_INT_ARG_FIRST);
5718 enum mode_class mclass = GET_MODE_CLASS (mode);
5721 /* Vector types deserve special treatment because they are polymorphic wrt
5722 their mode, depending upon whether VIS instructions are enabled. */
5723 if (type && TREE_CODE (type) == VECTOR_TYPE)
5725 HOST_WIDE_INT size = int_size_in_bytes (type);
5726 gcc_assert ((TARGET_ARCH32 && size <= 8)
5727 || (TARGET_ARCH64 && size <= 32));
5729 if (mode == BLKmode)
5730 return function_arg_vector_value (size,
5731 SPARC_FP_ARG_FIRST);
5733 mclass = MODE_FLOAT;
5736 if (TARGET_ARCH64 && type)
5738 /* Structures up to 32 bytes in size are returned in registers. */
5739 if (TREE_CODE (type) == RECORD_TYPE)
5741 HOST_WIDE_INT size = int_size_in_bytes (type);
5742 gcc_assert (size <= 32);
5744 return function_arg_record_value (type, mode, 0, 1, regbase);
5747 /* Unions up to 32 bytes in size are returned in integer registers. */
5748 else if (TREE_CODE (type) == UNION_TYPE)
5750 HOST_WIDE_INT size = int_size_in_bytes (type);
5751 gcc_assert (size <= 32);
5753 return function_arg_union_value (size, mode, 0, regbase);
5756 /* Objects that require it are returned in FP registers. */
5757 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5760 /* All other aggregate types are returned in an integer register in a
5761 mode corresponding to the size of the type. */
5762 else if (AGGREGATE_TYPE_P (type))
5764 /* All other aggregate types are passed in an integer register
5765 in a mode corresponding to the size of the type. */
5766 HOST_WIDE_INT size = int_size_in_bytes (type);
5767 gcc_assert (size <= 32);
5769 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5771 /* ??? We probably should have made the same ABI change in
5772 3.4.0 as the one we made for unions. The latter was
5773 required by the SCD though, while the former is not
5774 specified, so we favored compatibility and efficiency.
5776 Now we're stuck for aggregates larger than 16 bytes,
5777 because OImode vanished in the meantime. Let's not
5778 try to be unduly clever, and simply follow the ABI
5779 for unions in that case. */
5780 if (mode == BLKmode)
5781 return function_arg_union_value (size, mode, 0, regbase);
5786 /* This must match PROMOTE_FUNCTION_MODE. */
5787 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5791 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
5792 regno = SPARC_FP_ARG_FIRST;
5796 return gen_rtx_REG (mode, regno);
5799 /* Do what is necessary for `va_start'. We look at the current function
5800 to determine if stdarg or varargs is used and return the address of
5801 the first unnamed parameter. */
5804 sparc_builtin_saveregs (void)
5806 int first_reg = crtl->args.info.words;
5810 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5811 emit_move_insn (gen_rtx_MEM (word_mode,
5812 gen_rtx_PLUS (Pmode,
5814 GEN_INT (FIRST_PARM_OFFSET (0)
5817 gen_rtx_REG (word_mode,
5818 SPARC_INCOMING_INT_ARG_FIRST + regno));
5820 address = gen_rtx_PLUS (Pmode,
5822 GEN_INT (FIRST_PARM_OFFSET (0)
5823 + UNITS_PER_WORD * first_reg));
5828 /* Implement `va_start' for stdarg. */
5831 sparc_va_start (tree valist, rtx nextarg)
5833 nextarg = expand_builtin_saveregs ();
5834 std_expand_builtin_va_start (valist, nextarg);
5837 /* Implement `va_arg' for stdarg. */
5840 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
5843 HOST_WIDE_INT size, rsize, align;
5846 tree ptrtype = build_pointer_type (type);
5848 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5851 size = rsize = UNITS_PER_WORD;
5857 size = int_size_in_bytes (type);
5858 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5863 /* For SPARC64, objects requiring 16-byte alignment get it. */
5864 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5865 align = 2 * UNITS_PER_WORD;
5867 /* SPARC-V9 ABI states that structures up to 16 bytes in size
5868 are left-justified in their slots. */
5869 if (AGGREGATE_TYPE_P (type))
5872 size = rsize = UNITS_PER_WORD;
5882 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5883 size_int (align - 1));
5884 incr = fold_convert (sizetype, incr);
5885 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
5887 incr = fold_convert (ptr_type_node, incr);
5890 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5893 if (BYTES_BIG_ENDIAN && size < rsize)
5894 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5895 size_int (rsize - size));
5899 addr = fold_convert (build_pointer_type (ptrtype), addr);
5900 addr = build_va_arg_indirect_ref (addr);
5903 /* If the address isn't aligned properly for the type, we need a temporary.
5904 FIXME: This is inefficient, usually we can do this in registers. */
5905 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
5907 tree tmp = create_tmp_var (type, "va_arg_tmp");
5908 tree dest_addr = build_fold_addr_expr (tmp);
5909 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
5910 3, dest_addr, addr, size_int (rsize));
5911 TREE_ADDRESSABLE (tmp) = 1;
5912 gimplify_and_add (copy, pre_p);
5917 addr = fold_convert (ptrtype, addr);
5920 = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
5921 gimplify_assign (valist, incr, post_p);
5923 return build_va_arg_indirect_ref (addr);
5926 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5927 Specify whether the vector mode is supported by the hardware. */
5930 sparc_vector_mode_supported_p (enum machine_mode mode)
5932 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5935 /* Return the string to output an unconditional branch to LABEL, which is
5936 the operand number of the label.
5938 DEST is the destination insn (i.e. the label), INSN is the source. */
5941 output_ubranch (rtx dest, int label, rtx insn)
5943 static char string[64];
5944 bool v9_form = false;
5947 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5949 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5950 - INSN_ADDRESSES (INSN_UID (insn)));
5951 /* Leave some instructions for "slop". */
5952 if (delta >= -260000 && delta < 260000)
5957 strcpy (string, "ba%*,pt\t%%xcc, ");
5959 strcpy (string, "b%*\t");
5961 p = strchr (string, '\0');
5972 /* Return the string to output a conditional branch to LABEL, which is
5973 the operand number of the label. OP is the conditional expression.
5974 XEXP (OP, 0) is assumed to be a condition code register (integer or
5975 floating point) and its mode specifies what kind of comparison we made.
5977 DEST is the destination insn (i.e. the label), INSN is the source.
5979 REVERSED is nonzero if we should reverse the sense of the comparison.
5981 ANNUL is nonzero if we should generate an annulling branch. */
5984 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
5987 static char string[64];
5988 enum rtx_code code = GET_CODE (op);
5989 rtx cc_reg = XEXP (op, 0);
5990 enum machine_mode mode = GET_MODE (cc_reg);
5991 const char *labelno, *branch;
5992 int spaces = 8, far;
5995 /* v9 branches are limited to +-1MB. If it is too far away,
6008 fbne,a,pn %fcc2, .LC29
6016 far = TARGET_V9 && (get_attr_length (insn) >= 3);
6019 /* Reversal of FP compares takes care -- an ordered compare
6020 becomes an unordered compare and vice versa. */
6021 if (mode == CCFPmode || mode == CCFPEmode)
6022 code = reverse_condition_maybe_unordered (code);
6024 code = reverse_condition (code);
6027 /* Start by writing the branch condition. */
6028 if (mode == CCFPmode || mode == CCFPEmode)
6079 /* ??? !v9: FP branches cannot be preceded by another floating point
6080 insn. Because there is currently no concept of pre-delay slots,
6081 we can fix this only by always emitting a nop before a floating
6086 strcpy (string, "nop\n\t");
6087 strcat (string, branch);
6100 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6112 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6133 strcpy (string, branch);
6135 spaces -= strlen (branch);
6136 p = strchr (string, '\0');
6138 /* Now add the annulling, the label, and a possible noop. */
6151 if (! far && insn && INSN_ADDRESSES_SET_P ())
6153 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6154 - INSN_ADDRESSES (INSN_UID (insn)));
6155 /* Leave some instructions for "slop". */
6156 if (delta < -260000 || delta >= 260000)
6160 if (mode == CCFPmode || mode == CCFPEmode)
6162 static char v9_fcc_labelno[] = "%%fccX, ";
6163 /* Set the char indicating the number of the fcc reg to use. */
6164 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6165 labelno = v9_fcc_labelno;
6168 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6172 else if (mode == CCXmode || mode == CCX_NOOVmode)
6174 labelno = "%%xcc, ";
6179 labelno = "%%icc, ";
6184 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6187 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6200 strcpy (p, labelno);
6201 p = strchr (p, '\0');
6204 strcpy (p, ".+12\n\t nop\n\tb\t");
6205 /* Skip the next insn if requested or
6206 if we know that it will be a nop. */
6207 if (annul || ! final_sequence)
6221 /* Emit a library call comparison between floating point X and Y.
6222 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
6223 Return the new operator to be used in the comparison sequence.
6225 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6226 values as arguments instead of the TFmode registers themselves,
6227 that's why we cannot call emit_float_lib_cmp. */
6230 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6233 rtx slot0, slot1, result, tem, tem2;
6234 enum machine_mode mode;
6235 enum rtx_code new_comparison;
6240 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
6244 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
6248 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
6252 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
6256 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
6260 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
6271 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
6284 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6285 emit_move_insn (slot0, x);
6292 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6293 emit_move_insn (slot1, y);
6296 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6298 XEXP (slot0, 0), Pmode,
6299 XEXP (slot1, 0), Pmode);
6304 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6306 x, TFmode, y, TFmode);
6311 /* Immediately move the result of the libcall into a pseudo
6312 register so reload doesn't clobber the value if it needs
6313 the return register for a spill reg. */
6314 result = gen_reg_rtx (mode);
6315 emit_move_insn (result, hard_libcall_value (mode));
6320 return gen_rtx_NE (VOIDmode, result, const0_rtx);
6323 new_comparison = (comparison == UNORDERED ? EQ : NE);
6324 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
6327 new_comparison = (comparison == UNGT ? GT : NE);
6328 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
6330 return gen_rtx_NE (VOIDmode, result, const2_rtx);
6332 tem = gen_reg_rtx (mode);
6334 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6336 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6337 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
6340 tem = gen_reg_rtx (mode);
6342 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6344 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6345 tem2 = gen_reg_rtx (mode);
6347 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6349 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6350 new_comparison = (comparison == UNEQ ? EQ : NE);
6351 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
6357 /* Generate an unsigned DImode to FP conversion. This is the same code
6358 optabs would emit if we didn't have TFmode patterns. */
6361 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6363 rtx neglab, donelab, i0, i1, f0, in, out;
6366 in = force_reg (DImode, operands[1]);
6367 neglab = gen_label_rtx ();
6368 donelab = gen_label_rtx ();
6369 i0 = gen_reg_rtx (DImode);
6370 i1 = gen_reg_rtx (DImode);
6371 f0 = gen_reg_rtx (mode);
6373 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6375 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6376 emit_jump_insn (gen_jump (donelab));
6379 emit_label (neglab);
6381 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6382 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6383 emit_insn (gen_iordi3 (i0, i0, i1));
6384 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6385 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6387 emit_label (donelab);
6390 /* Generate an FP to unsigned DImode conversion. This is the same code
6391 optabs would emit if we didn't have TFmode patterns. */
6394 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6396 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6399 in = force_reg (mode, operands[1]);
6400 neglab = gen_label_rtx ();
6401 donelab = gen_label_rtx ();
6402 i0 = gen_reg_rtx (DImode);
6403 i1 = gen_reg_rtx (DImode);
6404 limit = gen_reg_rtx (mode);
6405 f0 = gen_reg_rtx (mode);
6407 emit_move_insn (limit,
6408 CONST_DOUBLE_FROM_REAL_VALUE (
6409 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6410 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6412 emit_insn (gen_rtx_SET (VOIDmode,
6414 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6415 emit_jump_insn (gen_jump (donelab));
6418 emit_label (neglab);
6420 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6421 emit_insn (gen_rtx_SET (VOIDmode,
6423 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6424 emit_insn (gen_movdi (i1, const1_rtx));
6425 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6426 emit_insn (gen_xordi3 (out, i0, i1));
6428 emit_label (donelab);
6431 /* Return the string to output a conditional branch to LABEL, testing
6432 register REG. LABEL is the operand number of the label; REG is the
6433 operand number of the reg. OP is the conditional expression. The mode
6434 of REG says what kind of comparison we made.
6436 DEST is the destination insn (i.e. the label), INSN is the source.
6438 REVERSED is nonzero if we should reverse the sense of the comparison.
6440 ANNUL is nonzero if we should generate an annulling branch. */
6443 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6444 int annul, rtx insn)
6446 static char string[64];
6447 enum rtx_code code = GET_CODE (op);
6448 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6453 /* branch on register are limited to +-128KB. If it is too far away,
6466 brgez,a,pn %o1, .LC29
6472 ba,pt %xcc, .LC29 */
6474 far = get_attr_length (insn) >= 3;
6476 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6478 code = reverse_condition (code);
6480 /* Only 64 bit versions of these instructions exist. */
6481 gcc_assert (mode == DImode);
6483 /* Start by writing the branch condition. */
6488 strcpy (string, "brnz");
6492 strcpy (string, "brz");
6496 strcpy (string, "brgez");
6500 strcpy (string, "brlz");
6504 strcpy (string, "brlez");
6508 strcpy (string, "brgz");
6515 p = strchr (string, '\0');
6517 /* Now add the annulling, reg, label, and nop. */
6524 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6527 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6532 *p = p < string + 8 ? '\t' : ' ';
6540 int veryfar = 1, delta;
6542 if (INSN_ADDRESSES_SET_P ())
6544 delta = (INSN_ADDRESSES (INSN_UID (dest))
6545 - INSN_ADDRESSES (INSN_UID (insn)));
6546 /* Leave some instructions for "slop". */
6547 if (delta >= -260000 && delta < 260000)
6551 strcpy (p, ".+12\n\t nop\n\t");
6552 /* Skip the next insn if requested or
6553 if we know that it will be a nop. */
6554 if (annul || ! final_sequence)
6564 strcpy (p, "ba,pt\t%%xcc, ");
6578 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6579 Such instructions cannot be used in the delay slot of return insn on v9.
6580 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6584 epilogue_renumber (register rtx *where, int test)
6586 register const char *fmt;
6588 register enum rtx_code code;
6593 code = GET_CODE (*where);
6598 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6600 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6601 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6609 /* Do not replace the frame pointer with the stack pointer because
6610 it can cause the delayed instruction to load below the stack.
6611 This occurs when instructions like:
6613 (set (reg/i:SI 24 %i0)
6614 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6615 (const_int -20 [0xffffffec])) 0))
6617 are in the return delayed slot. */
6619 if (GET_CODE (XEXP (*where, 0)) == REG
6620 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6621 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6622 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6627 if (SPARC_STACK_BIAS
6628 && GET_CODE (XEXP (*where, 0)) == REG
6629 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6637 fmt = GET_RTX_FORMAT (code);
6639 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6644 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6645 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6648 else if (fmt[i] == 'e'
6649 && epilogue_renumber (&(XEXP (*where, i)), test))
6655 /* Leaf functions and non-leaf functions have different needs. */
6658 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6661 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6663 static const int *const reg_alloc_orders[] = {
6664 reg_leaf_alloc_order,
6665 reg_nonleaf_alloc_order};
6668 order_regs_for_local_alloc (void)
6670 static int last_order_nonleaf = 1;
6672 if (df_regs_ever_live_p (15) != last_order_nonleaf)
6674 last_order_nonleaf = !last_order_nonleaf;
6675 memcpy ((char *) reg_alloc_order,
6676 (const char *) reg_alloc_orders[last_order_nonleaf],
6677 FIRST_PSEUDO_REGISTER * sizeof (int));
6681 /* Return 1 if REG and MEM are legitimate enough to allow the various
6682 mem<-->reg splits to be run. */
6685 sparc_splitdi_legitimate (rtx reg, rtx mem)
6687 /* Punt if we are here by mistake. */
6688 gcc_assert (reload_completed);
6690 /* We must have an offsettable memory reference. */
6691 if (! offsettable_memref_p (mem))
6694 /* If we have legitimate args for ldd/std, we do not want
6695 the split to happen. */
6696 if ((REGNO (reg) % 2) == 0
6697 && mem_min_alignment (mem, 8))
6704 /* Return 1 if x and y are some kind of REG and they refer to
6705 different hard registers. This test is guaranteed to be
6706 run after reload. */
6709 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6711 if (GET_CODE (x) != REG)
6713 if (GET_CODE (y) != REG)
6715 if (REGNO (x) == REGNO (y))
6720 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6721 This makes them candidates for using ldd and std insns.
6723 Note reg1 and reg2 *must* be hard registers. */
6726 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6728 /* We might have been passed a SUBREG. */
6729 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6732 if (REGNO (reg1) % 2 != 0)
6735 /* Integer ldd is deprecated in SPARC V9 */
6736 if (TARGET_V9 && REGNO (reg1) < 32)
6739 return (REGNO (reg1) == REGNO (reg2) - 1);
6742 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6745 This can only happen when addr1 and addr2, the addresses in mem1
6746 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6747 addr1 must also be aligned on a 64-bit boundary.
6749 Also iff dependent_reg_rtx is not null it should not be used to
6750 compute the address for mem1, i.e. we cannot optimize a sequence
6762 But, note that the transformation from:
6767 is perfectly fine. Thus, the peephole2 patterns always pass us
6768 the destination register of the first load, never the second one.
6770 For stores we don't have a similar problem, so dependent_reg_rtx is
6774 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6778 HOST_WIDE_INT offset1;
6780 /* The mems cannot be volatile. */
6781 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6784 /* MEM1 should be aligned on a 64-bit boundary. */
6785 if (MEM_ALIGN (mem1) < 64)
6788 addr1 = XEXP (mem1, 0);
6789 addr2 = XEXP (mem2, 0);
6791 /* Extract a register number and offset (if used) from the first addr. */
6792 if (GET_CODE (addr1) == PLUS)
6794 /* If not a REG, return zero. */
6795 if (GET_CODE (XEXP (addr1, 0)) != REG)
6799 reg1 = REGNO (XEXP (addr1, 0));
6800 /* The offset must be constant! */
6801 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6803 offset1 = INTVAL (XEXP (addr1, 1));
6806 else if (GET_CODE (addr1) != REG)
6810 reg1 = REGNO (addr1);
6811 /* This was a simple (mem (reg)) expression. Offset is 0. */
6815 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6816 if (GET_CODE (addr2) != PLUS)
6819 if (GET_CODE (XEXP (addr2, 0)) != REG
6820 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6823 if (reg1 != REGNO (XEXP (addr2, 0)))
6826 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6829 /* The first offset must be evenly divisible by 8 to ensure the
6830 address is 64 bit aligned. */
6831 if (offset1 % 8 != 0)
6834 /* The offset for the second addr must be 4 more than the first addr. */
6835 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6838 /* All the tests passed. addr1 and addr2 are valid for ldd and std
6843 /* Return 1 if reg is a pseudo, or is the first register in
6844 a hard register pair. This makes it suitable for use in
6845 ldd and std insns. */
6848 register_ok_for_ldd (rtx reg)
6850 /* We might have been passed a SUBREG. */
6854 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6855 return (REGNO (reg) % 2 == 0);
6860 /* Return 1 if OP is a memory whose address is known to be
6861 aligned to 8-byte boundary, or a pseudo during reload.
6862 This makes it suitable for use in ldd and std insns. */
6865 memory_ok_for_ldd (rtx op)
6869 /* In 64-bit mode, we assume that the address is word-aligned. */
6870 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
6873 if ((reload_in_progress || reload_completed)
6874 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
6877 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
6879 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
6888 /* Print operand X (an rtx) in assembler syntax to file FILE.
6889 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6890 For `%' followed by punctuation, CODE is the punctuation and X is null. */
6893 print_operand (FILE *file, rtx x, int code)
6898 /* Output an insn in a delay slot. */
6900 sparc_indent_opcode = 1;
6902 fputs ("\n\t nop", file);
6905 /* Output an annul flag if there's nothing for the delay slot and we
6906 are optimizing. This is always used with '(' below.
6907 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6908 this is a dbx bug. So, we only do this when optimizing.
6909 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6910 Always emit a nop in case the next instruction is a branch. */
6911 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6915 /* Output a 'nop' if there's nothing for the delay slot and we are
6916 not optimizing. This is always used with '*' above. */
6917 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6918 fputs ("\n\t nop", file);
6919 else if (final_sequence)
6920 sparc_indent_opcode = 1;
6923 /* Output the right displacement from the saved PC on function return.
6924 The caller may have placed an "unimp" insn immediately after the call
6925 so we have to account for it. This insn is used in the 32-bit ABI
6926 when calling a function that returns a non zero-sized structure. The
6927 64-bit ABI doesn't have it. Be careful to have this test be the same
6928 as that used on the call. The exception here is that when
6929 sparc_std_struct_return is enabled, the psABI is followed exactly
6930 and the adjustment is made by the code in sparc_struct_value_rtx.
6931 The call emitted is the same when sparc_std_struct_return is
6934 && cfun->returns_struct
6935 && ! sparc_std_struct_return
6936 && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6938 && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6944 /* Output the Embedded Medium/Anywhere code model base register. */
6945 fputs (EMBMEDANY_BASE_REG, file);
6948 /* Print some local dynamic TLS name. */
6949 assemble_name (file, get_some_local_dynamic_name ());
6953 /* Adjust the operand to take into account a RESTORE operation. */
6954 if (GET_CODE (x) == CONST_INT)
6956 else if (GET_CODE (x) != REG)
6957 output_operand_lossage ("invalid %%Y operand");
6958 else if (REGNO (x) < 8)
6959 fputs (reg_names[REGNO (x)], file);
6960 else if (REGNO (x) >= 24 && REGNO (x) < 32)
6961 fputs (reg_names[REGNO (x)-16], file);
6963 output_operand_lossage ("invalid %%Y operand");
6966 /* Print out the low order register name of a register pair. */
6967 if (WORDS_BIG_ENDIAN)
6968 fputs (reg_names[REGNO (x)+1], file);
6970 fputs (reg_names[REGNO (x)], file);
6973 /* Print out the high order register name of a register pair. */
6974 if (WORDS_BIG_ENDIAN)
6975 fputs (reg_names[REGNO (x)], file);
6977 fputs (reg_names[REGNO (x)+1], file);
6980 /* Print out the second register name of a register pair or quad.
6981 I.e., R (%o0) => %o1. */
6982 fputs (reg_names[REGNO (x)+1], file);
6985 /* Print out the third register name of a register quad.
6986 I.e., S (%o0) => %o2. */
6987 fputs (reg_names[REGNO (x)+2], file);
6990 /* Print out the fourth register name of a register quad.
6991 I.e., T (%o0) => %o3. */
6992 fputs (reg_names[REGNO (x)+3], file);
6995 /* Print a condition code register. */
6996 if (REGNO (x) == SPARC_ICC_REG)
6998 /* We don't handle CC[X]_NOOVmode because they're not supposed
7000 if (GET_MODE (x) == CCmode)
7001 fputs ("%icc", file);
7002 else if (GET_MODE (x) == CCXmode)
7003 fputs ("%xcc", file);
7008 /* %fccN register */
7009 fputs (reg_names[REGNO (x)], file);
7012 /* Print the operand's address only. */
7013 output_address (XEXP (x, 0));
7016 /* In this case we need a register. Use %g0 if the
7017 operand is const0_rtx. */
7019 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
7021 fputs ("%g0", file);
7028 switch (GET_CODE (x))
7030 case IOR: fputs ("or", file); break;
7031 case AND: fputs ("and", file); break;
7032 case XOR: fputs ("xor", file); break;
7033 default: output_operand_lossage ("invalid %%A operand");
7038 switch (GET_CODE (x))
7040 case IOR: fputs ("orn", file); break;
7041 case AND: fputs ("andn", file); break;
7042 case XOR: fputs ("xnor", file); break;
7043 default: output_operand_lossage ("invalid %%B operand");
7047 /* These are used by the conditional move instructions. */
7051 enum rtx_code rc = GET_CODE (x);
7055 enum machine_mode mode = GET_MODE (XEXP (x, 0));
7056 if (mode == CCFPmode || mode == CCFPEmode)
7057 rc = reverse_condition_maybe_unordered (GET_CODE (x));
7059 rc = reverse_condition (GET_CODE (x));
7063 case NE: fputs ("ne", file); break;
7064 case EQ: fputs ("e", file); break;
7065 case GE: fputs ("ge", file); break;
7066 case GT: fputs ("g", file); break;
7067 case LE: fputs ("le", file); break;
7068 case LT: fputs ("l", file); break;
7069 case GEU: fputs ("geu", file); break;
7070 case GTU: fputs ("gu", file); break;
7071 case LEU: fputs ("leu", file); break;
7072 case LTU: fputs ("lu", file); break;
7073 case LTGT: fputs ("lg", file); break;
7074 case UNORDERED: fputs ("u", file); break;
7075 case ORDERED: fputs ("o", file); break;
7076 case UNLT: fputs ("ul", file); break;
7077 case UNLE: fputs ("ule", file); break;
7078 case UNGT: fputs ("ug", file); break;
7079 case UNGE: fputs ("uge", file); break;
7080 case UNEQ: fputs ("ue", file); break;
7081 default: output_operand_lossage (code == 'c'
7082 ? "invalid %%c operand"
7083 : "invalid %%C operand");
7088 /* These are used by the movr instruction pattern. */
7092 enum rtx_code rc = (code == 'd'
7093 ? reverse_condition (GET_CODE (x))
7097 case NE: fputs ("ne", file); break;
7098 case EQ: fputs ("e", file); break;
7099 case GE: fputs ("gez", file); break;
7100 case LT: fputs ("lz", file); break;
7101 case LE: fputs ("lez", file); break;
7102 case GT: fputs ("gz", file); break;
7103 default: output_operand_lossage (code == 'd'
7104 ? "invalid %%d operand"
7105 : "invalid %%D operand");
7112 /* Print a sign-extended character. */
7113 int i = trunc_int_for_mode (INTVAL (x), QImode);
7114 fprintf (file, "%d", i);
7119 /* Operand must be a MEM; write its address. */
7120 if (GET_CODE (x) != MEM)
7121 output_operand_lossage ("invalid %%f operand");
7122 output_address (XEXP (x, 0));
7127 /* Print a sign-extended 32-bit value. */
7129 if (GET_CODE(x) == CONST_INT)
7131 else if (GET_CODE(x) == CONST_DOUBLE)
7132 i = CONST_DOUBLE_LOW (x);
7135 output_operand_lossage ("invalid %%s operand");
7138 i = trunc_int_for_mode (i, SImode);
7139 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7144 /* Do nothing special. */
7148 /* Undocumented flag. */
7149 output_operand_lossage ("invalid operand output code");
7152 if (GET_CODE (x) == REG)
7153 fputs (reg_names[REGNO (x)], file);
7154 else if (GET_CODE (x) == MEM)
7157 /* Poor Sun assembler doesn't understand absolute addressing. */
7158 if (CONSTANT_P (XEXP (x, 0)))
7159 fputs ("%g0+", file);
7160 output_address (XEXP (x, 0));
7163 else if (GET_CODE (x) == HIGH)
7165 fputs ("%hi(", file);
7166 output_addr_const (file, XEXP (x, 0));
7169 else if (GET_CODE (x) == LO_SUM)
7171 print_operand (file, XEXP (x, 0), 0);
7172 if (TARGET_CM_MEDMID)
7173 fputs ("+%l44(", file);
7175 fputs ("+%lo(", file);
7176 output_addr_const (file, XEXP (x, 1));
7179 else if (GET_CODE (x) == CONST_DOUBLE
7180 && (GET_MODE (x) == VOIDmode
7181 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7183 if (CONST_DOUBLE_HIGH (x) == 0)
7184 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7185 else if (CONST_DOUBLE_HIGH (x) == -1
7186 && CONST_DOUBLE_LOW (x) < 0)
7187 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7189 output_operand_lossage ("long long constant not a valid immediate operand");
7191 else if (GET_CODE (x) == CONST_DOUBLE)
7192 output_operand_lossage ("floating point constant not a valid immediate operand");
7193 else { output_addr_const (file, x); }
7196 /* Target hook for assembling integer objects. The sparc version has
7197 special handling for aligned DI-mode objects. */
7200 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7202 /* ??? We only output .xword's for symbols and only then in environments
7203 where the assembler can handle them. */
7204 if (aligned_p && size == 8
7205 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7209 assemble_integer_with_op ("\t.xword\t", x);
7214 assemble_aligned_integer (4, const0_rtx);
7215 assemble_aligned_integer (4, x);
7219 return default_assemble_integer (x, size, aligned_p);
7222 /* Return the value of a code used in the .proc pseudo-op that says
7223 what kind of result this function returns. For non-C types, we pick
7224 the closest C type. */
7226 #ifndef SHORT_TYPE_SIZE
7227 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7230 #ifndef INT_TYPE_SIZE
7231 #define INT_TYPE_SIZE BITS_PER_WORD
7234 #ifndef LONG_TYPE_SIZE
7235 #define LONG_TYPE_SIZE BITS_PER_WORD
7238 #ifndef LONG_LONG_TYPE_SIZE
7239 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7242 #ifndef FLOAT_TYPE_SIZE
7243 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7246 #ifndef DOUBLE_TYPE_SIZE
7247 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7250 #ifndef LONG_DOUBLE_TYPE_SIZE
7251 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7255 sparc_type_code (register tree type)
7257 register unsigned long qualifiers = 0;
7258 register unsigned shift;
7260 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7261 setting more, since some assemblers will give an error for this. Also,
7262 we must be careful to avoid shifts of 32 bits or more to avoid getting
7263 unpredictable results. */
7265 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7267 switch (TREE_CODE (type))
7273 qualifiers |= (3 << shift);
7278 qualifiers |= (2 << shift);
7282 case REFERENCE_TYPE:
7284 qualifiers |= (1 << shift);
7288 return (qualifiers | 8);
7291 case QUAL_UNION_TYPE:
7292 return (qualifiers | 9);
7295 return (qualifiers | 10);
7298 return (qualifiers | 16);
7301 /* If this is a range type, consider it to be the underlying
7303 if (TREE_TYPE (type) != 0)
7306 /* Carefully distinguish all the standard types of C,
7307 without messing up if the language is not C. We do this by
7308 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7309 look at both the names and the above fields, but that's redundant.
7310 Any type whose size is between two C types will be considered
7311 to be the wider of the two types. Also, we do not have a
7312 special code to use for "long long", so anything wider than
7313 long is treated the same. Note that we can't distinguish
7314 between "int" and "long" in this code if they are the same
7315 size, but that's fine, since neither can the assembler. */
7317 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7318 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7320 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7321 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7323 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7324 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7327 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7330 /* If this is a range type, consider it to be the underlying
7332 if (TREE_TYPE (type) != 0)
7335 /* Carefully distinguish all the standard types of C,
7336 without messing up if the language is not C. */
7338 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7339 return (qualifiers | 6);
7342 return (qualifiers | 7);
7344 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7345 /* ??? We need to distinguish between double and float complex types,
7346 but I don't know how yet because I can't reach this code from
7347 existing front-ends. */
7348 return (qualifiers | 7); /* Who knows? */
7351 case BOOLEAN_TYPE: /* Boolean truth value type. */
7352 case LANG_TYPE: /* ? */
7356 gcc_unreachable (); /* Not a type! */
7363 /* Nested function support. */
7365 /* Emit RTL insns to initialize the variable parts of a trampoline.
7366 FNADDR is an RTX for the address of the function's pure code.
7367 CXT is an RTX for the static chain value for the function.
7369 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7370 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7371 (to store insns). This is a bit excessive. Perhaps a different
7372 mechanism would be better here.
7374 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7377 sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7379 /* SPARC 32-bit trampoline:
7382 sethi %hi(static), %g2
7384 or %g2, %lo(static), %g2
7386 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7387 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7391 (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
7392 expand_binop (SImode, ior_optab,
7393 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7394 size_int (10), 0, 1),
7395 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7396 NULL_RTX, 1, OPTAB_DIRECT));
7399 (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7400 expand_binop (SImode, ior_optab,
7401 expand_shift (RSHIFT_EXPR, SImode, cxt,
7402 size_int (10), 0, 1),
7403 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7404 NULL_RTX, 1, OPTAB_DIRECT));
7407 (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7408 expand_binop (SImode, ior_optab,
7409 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7410 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7411 NULL_RTX, 1, OPTAB_DIRECT));
7414 (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7415 expand_binop (SImode, ior_optab,
7416 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7417 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7418 NULL_RTX, 1, OPTAB_DIRECT));
7420 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7421 aligned on a 16 byte boundary so one flush clears it all. */
7422 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
7423 if (sparc_cpu != PROCESSOR_ULTRASPARC
7424 && sparc_cpu != PROCESSOR_ULTRASPARC3
7425 && sparc_cpu != PROCESSOR_NIAGARA
7426 && sparc_cpu != PROCESSOR_NIAGARA2)
7427 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
7428 plus_constant (tramp, 8)))));
7430 /* Call __enable_execute_stack after writing onto the stack to make sure
7431 the stack address is accessible. */
7432 #ifdef ENABLE_EXECUTE_STACK
7433 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7434 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7439 /* The 64-bit version is simpler because it makes more sense to load the
7440 values as "immediate" data out of the trampoline. It's also easier since
7441 we can read the PC without clobbering a register. */
7444 sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7446 /* SPARC 64-bit trampoline:
7455 emit_move_insn (gen_rtx_MEM (SImode, tramp),
7456 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7457 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7458 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7459 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7460 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7461 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7462 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7463 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
7464 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
7465 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
7467 if (sparc_cpu != PROCESSOR_ULTRASPARC
7468 && sparc_cpu != PROCESSOR_ULTRASPARC3
7469 && sparc_cpu != PROCESSOR_NIAGARA
7470 && sparc_cpu != PROCESSOR_NIAGARA2)
7471 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
7473 /* Call __enable_execute_stack after writing onto the stack to make sure
7474 the stack address is accessible. */
7475 #ifdef ENABLE_EXECUTE_STACK
7476 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7477 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7481 /* Adjust the cost of a scheduling dependency. Return the new cost of
7482 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7485 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7487 enum attr_type insn_type;
7489 if (! recog_memoized (insn))
7492 insn_type = get_attr_type (insn);
7494 if (REG_NOTE_KIND (link) == 0)
7496 /* Data dependency; DEP_INSN writes a register that INSN reads some
7499 /* if a load, then the dependence must be on the memory address;
7500 add an extra "cycle". Note that the cost could be two cycles
7501 if the reg was written late in an instruction group; we ca not tell
7503 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7506 /* Get the delay only if the address of the store is the dependence. */
7507 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7509 rtx pat = PATTERN(insn);
7510 rtx dep_pat = PATTERN (dep_insn);
7512 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7513 return cost; /* This should not happen! */
7515 /* The dependency between the two instructions was on the data that
7516 is being stored. Assume that this implies that the address of the
7517 store is not dependent. */
7518 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7521 return cost + 3; /* An approximation. */
7524 /* A shift instruction cannot receive its data from an instruction
7525 in the same cycle; add a one cycle penalty. */
7526 if (insn_type == TYPE_SHIFT)
7527 return cost + 3; /* Split before cascade into shift. */
7531 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7532 INSN writes some cycles later. */
7534 /* These are only significant for the fpu unit; writing a fp reg before
7535 the fpu has finished with it stalls the processor. */
7537 /* Reusing an integer register causes no problems. */
7538 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7546 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7548 enum attr_type insn_type, dep_type;
7549 rtx pat = PATTERN(insn);
7550 rtx dep_pat = PATTERN (dep_insn);
7552 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7555 insn_type = get_attr_type (insn);
7556 dep_type = get_attr_type (dep_insn);
7558 switch (REG_NOTE_KIND (link))
7561 /* Data dependency; DEP_INSN writes a register that INSN reads some
7568 /* Get the delay iff the address of the store is the dependence. */
7569 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7572 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7579 /* If a load, then the dependence must be on the memory address. If
7580 the addresses aren't equal, then it might be a false dependency */
7581 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7583 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7584 || GET_CODE (SET_DEST (dep_pat)) != MEM
7585 || GET_CODE (SET_SRC (pat)) != MEM
7586 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7587 XEXP (SET_SRC (pat), 0)))
7595 /* Compare to branch latency is 0. There is no benefit from
7596 separating compare and branch. */
7597 if (dep_type == TYPE_COMPARE)
7599 /* Floating point compare to branch latency is less than
7600 compare to conditional move. */
7601 if (dep_type == TYPE_FPCMP)
7610 /* Anti-dependencies only penalize the fpu unit. */
7611 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7623 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7627 case PROCESSOR_SUPERSPARC:
7628 cost = supersparc_adjust_cost (insn, link, dep, cost);
7630 case PROCESSOR_HYPERSPARC:
7631 case PROCESSOR_SPARCLITE86X:
7632 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7641 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7642 int sched_verbose ATTRIBUTE_UNUSED,
7643 int max_ready ATTRIBUTE_UNUSED)
7648 sparc_use_sched_lookahead (void)
7650 if (sparc_cpu == PROCESSOR_NIAGARA
7651 || sparc_cpu == PROCESSOR_NIAGARA2)
7653 if (sparc_cpu == PROCESSOR_ULTRASPARC
7654 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7656 if ((1 << sparc_cpu) &
7657 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7658 (1 << PROCESSOR_SPARCLITE86X)))
7664 sparc_issue_rate (void)
7668 case PROCESSOR_NIAGARA:
7669 case PROCESSOR_NIAGARA2:
7673 /* Assume V9 processors are capable of at least dual-issue. */
7675 case PROCESSOR_SUPERSPARC:
7677 case PROCESSOR_HYPERSPARC:
7678 case PROCESSOR_SPARCLITE86X:
7680 case PROCESSOR_ULTRASPARC:
7681 case PROCESSOR_ULTRASPARC3:
7687 set_extends (rtx insn)
7689 register rtx pat = PATTERN (insn);
7691 switch (GET_CODE (SET_SRC (pat)))
7693 /* Load and some shift instructions zero extend. */
7696 /* sethi clears the high bits */
7698 /* LO_SUM is used with sethi. sethi cleared the high
7699 bits and the values used with lo_sum are positive */
7701 /* Store flag stores 0 or 1 */
7711 rtx op0 = XEXP (SET_SRC (pat), 0);
7712 rtx op1 = XEXP (SET_SRC (pat), 1);
7713 if (GET_CODE (op1) == CONST_INT)
7714 return INTVAL (op1) >= 0;
7715 if (GET_CODE (op0) != REG)
7717 if (sparc_check_64 (op0, insn) == 1)
7719 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7724 rtx op0 = XEXP (SET_SRC (pat), 0);
7725 rtx op1 = XEXP (SET_SRC (pat), 1);
7726 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7728 if (GET_CODE (op1) == CONST_INT)
7729 return INTVAL (op1) >= 0;
7730 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7733 return GET_MODE (SET_SRC (pat)) == SImode;
7734 /* Positive integers leave the high bits zero. */
7736 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7738 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7741 return - (GET_MODE (SET_SRC (pat)) == SImode);
7743 return sparc_check_64 (SET_SRC (pat), insn);
7749 /* We _ought_ to have only one kind per function, but... */
7750 static GTY(()) rtx sparc_addr_diff_list;
7751 static GTY(()) rtx sparc_addr_list;
7754 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7756 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7758 sparc_addr_diff_list
7759 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7761 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7765 sparc_output_addr_vec (rtx vec)
7767 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7768 int idx, vlen = XVECLEN (body, 0);
7770 #ifdef ASM_OUTPUT_ADDR_VEC_START
7771 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7774 #ifdef ASM_OUTPUT_CASE_LABEL
7775 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7778 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7781 for (idx = 0; idx < vlen; idx++)
7783 ASM_OUTPUT_ADDR_VEC_ELT
7784 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7787 #ifdef ASM_OUTPUT_ADDR_VEC_END
7788 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7793 sparc_output_addr_diff_vec (rtx vec)
7795 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7796 rtx base = XEXP (XEXP (body, 0), 0);
7797 int idx, vlen = XVECLEN (body, 1);
7799 #ifdef ASM_OUTPUT_ADDR_VEC_START
7800 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7803 #ifdef ASM_OUTPUT_CASE_LABEL
7804 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7807 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7810 for (idx = 0; idx < vlen; idx++)
7812 ASM_OUTPUT_ADDR_DIFF_ELT
7815 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7816 CODE_LABEL_NUMBER (base));
7819 #ifdef ASM_OUTPUT_ADDR_VEC_END
7820 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7825 sparc_output_deferred_case_vectors (void)
7830 if (sparc_addr_list == NULL_RTX
7831 && sparc_addr_diff_list == NULL_RTX)
7834 /* Align to cache line in the function's code section. */
7835 switch_to_section (current_function_section ());
7837 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7839 ASM_OUTPUT_ALIGN (asm_out_file, align);
7841 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7842 sparc_output_addr_vec (XEXP (t, 0));
7843 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7844 sparc_output_addr_diff_vec (XEXP (t, 0));
7846 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7849 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7850 unknown. Return 1 if the high bits are zero, -1 if the register is
7853 sparc_check_64 (rtx x, rtx insn)
7855 /* If a register is set only once it is safe to ignore insns this
7856 code does not know how to handle. The loop will either recognize
7857 the single set and return the correct value or fail to recognize
7862 gcc_assert (GET_CODE (x) == REG);
7864 if (GET_MODE (x) == DImode)
7865 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7867 if (flag_expensive_optimizations
7868 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
7874 insn = get_last_insn_anywhere ();
7879 while ((insn = PREV_INSN (insn)))
7881 switch (GET_CODE (insn))
7894 rtx pat = PATTERN (insn);
7895 if (GET_CODE (pat) != SET)
7897 if (rtx_equal_p (x, SET_DEST (pat)))
7898 return set_extends (insn);
7899 if (y && rtx_equal_p (y, SET_DEST (pat)))
7900 return set_extends (insn);
7901 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7909 /* Returns assembly code to perform a DImode shift using
7910 a 64-bit global or out register on SPARC-V8+. */
7912 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7914 static char asm_code[60];
7916 /* The scratch register is only required when the destination
7917 register is not a 64-bit global or out register. */
7918 if (which_alternative != 2)
7919 operands[3] = operands[0];
7921 /* We can only shift by constants <= 63. */
7922 if (GET_CODE (operands[2]) == CONST_INT)
7923 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7925 if (GET_CODE (operands[1]) == CONST_INT)
7927 output_asm_insn ("mov\t%1, %3", operands);
7931 output_asm_insn ("sllx\t%H1, 32, %3", operands);
7932 if (sparc_check_64 (operands[1], insn) <= 0)
7933 output_asm_insn ("srl\t%L1, 0, %L1", operands);
7934 output_asm_insn ("or\t%L1, %3, %3", operands);
7937 strcpy(asm_code, opcode);
7939 if (which_alternative != 2)
7940 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7942 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7945 /* Output rtl to increment the profiler label LABELNO
7946 for profiling a function entry. */
7949 sparc_profile_hook (int labelno)
7954 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7955 if (NO_PROFILE_COUNTERS)
7957 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
7961 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7962 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7963 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7967 #ifdef OBJECT_FORMAT_ELF
7969 sparc_elf_asm_named_section (const char *name, unsigned int flags,
7972 if (flags & SECTION_MERGE)
7974 /* entsize cannot be expressed in this section attributes
7976 default_elf_asm_named_section (name, flags, decl);
7980 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
7982 if (!(flags & SECTION_DEBUG))
7983 fputs (",#alloc", asm_out_file);
7984 if (flags & SECTION_WRITE)
7985 fputs (",#write", asm_out_file);
7986 if (flags & SECTION_TLS)
7987 fputs (",#tls", asm_out_file);
7988 if (flags & SECTION_CODE)
7989 fputs (",#execinstr", asm_out_file);
7991 /* ??? Handle SECTION_BSS. */
7993 fputc ('\n', asm_out_file);
7995 #endif /* OBJECT_FORMAT_ELF */
7997 /* We do not allow indirect calls to be optimized into sibling calls.
7999 We cannot use sibling calls when delayed branches are disabled
8000 because they will likely require the call delay slot to be filled.
8002 Also, on SPARC 32-bit we cannot emit a sibling call when the
8003 current function returns a structure. This is because the "unimp
8004 after call" convention would cause the callee to return to the
8005 wrong place. The generic code already disallows cases where the
8006 function being called returns a structure.
8008 It may seem strange how this last case could occur. Usually there
8009 is code after the call which jumps to epilogue code which dumps the
8010 return value into the struct return area. That ought to invalidate
8011 the sibling call right? Well, in the C++ case we can end up passing
8012 the pointer to the struct return area to a constructor (which returns
8013 void) and then nothing else happens. Such a sibling call would look
8014 valid without the added check here.
8016 VxWorks PIC PLT entries require the global pointer to be initialized
8017 on entry. We therefore can't emit sibling calls to them. */
8019 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8022 && flag_delayed_branch
8023 && (TARGET_ARCH64 || ! cfun->returns_struct)
8024 && !(TARGET_VXWORKS_RTP
8026 && !targetm.binds_local_p (decl)));
8029 /* libfunc renaming. */
8030 #include "config/gofast.h"
8033 sparc_init_libfuncs (void)
8037 /* Use the subroutines that Sun's library provides for integer
8038 multiply and divide. The `*' prevents an underscore from
8039 being prepended by the compiler. .umul is a little faster
8041 set_optab_libfunc (smul_optab, SImode, "*.umul");
8042 set_optab_libfunc (sdiv_optab, SImode, "*.div");
8043 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
8044 set_optab_libfunc (smod_optab, SImode, "*.rem");
8045 set_optab_libfunc (umod_optab, SImode, "*.urem");
8047 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
8048 set_optab_libfunc (add_optab, TFmode, "_Q_add");
8049 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
8050 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
8051 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
8052 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
8054 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
8055 is because with soft-float, the SFmode and DFmode sqrt
8056 instructions will be absent, and the compiler will notice and
8057 try to use the TFmode sqrt instruction for calls to the
8058 builtin function sqrt, but this fails. */
8060 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
8062 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
8063 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
8064 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
8065 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
8066 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
8067 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
8069 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
8070 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
8071 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
8072 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
8074 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
8075 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
8076 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
8077 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
8079 if (DITF_CONVERSION_LIBFUNCS)
8081 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
8082 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
8083 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
8084 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
8087 if (SUN_CONVERSION_LIBFUNCS)
8089 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
8090 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
8091 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
8092 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
8097 /* In the SPARC 64bit ABI, SImode multiply and divide functions
8098 do not exist in the library. Make sure the compiler does not
8099 emit calls to them by accident. (It should always use the
8100 hardware instructions.) */
8101 set_optab_libfunc (smul_optab, SImode, 0);
8102 set_optab_libfunc (sdiv_optab, SImode, 0);
8103 set_optab_libfunc (udiv_optab, SImode, 0);
8104 set_optab_libfunc (smod_optab, SImode, 0);
8105 set_optab_libfunc (umod_optab, SImode, 0);
8107 if (SUN_INTEGER_MULTIPLY_64)
8109 set_optab_libfunc (smul_optab, DImode, "__mul64");
8110 set_optab_libfunc (sdiv_optab, DImode, "__div64");
8111 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
8112 set_optab_libfunc (smod_optab, DImode, "__rem64");
8113 set_optab_libfunc (umod_optab, DImode, "__urem64");
8116 if (SUN_CONVERSION_LIBFUNCS)
8118 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
8119 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
8120 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
8121 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
8125 gofast_maybe_init_libfuncs ();
8128 #define def_builtin(NAME, CODE, TYPE) \
8129 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
8132 /* Implement the TARGET_INIT_BUILTINS target hook.
8133 Create builtin functions for special SPARC instructions. */
8136 sparc_init_builtins (void)
8139 sparc_vis_init_builtins ();
8142 /* Create builtin functions for VIS 1.0 instructions. */
8145 sparc_vis_init_builtins (void)
8147 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
8148 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
8149 tree v4hi = build_vector_type (intHI_type_node, 4);
8150 tree v2hi = build_vector_type (intHI_type_node, 2);
8151 tree v2si = build_vector_type (intSI_type_node, 2);
8153 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
8154 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
8155 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
8156 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
8157 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
8158 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
8159 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
8160 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
8161 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
8162 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
8163 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
8164 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
8165 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
8167 intDI_type_node, 0);
8168 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
8170 intDI_type_node, 0);
8171 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
8173 intSI_type_node, 0);
8174 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
8176 intDI_type_node, 0);
8178 /* Packing and expanding vectors. */
8179 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
8180 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
8181 v8qi_ftype_v2si_v8qi);
8182 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
8184 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
8185 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
8186 v8qi_ftype_v4qi_v4qi);
8188 /* Multiplications. */
8189 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
8190 v4hi_ftype_v4qi_v4hi);
8191 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
8192 v4hi_ftype_v4qi_v2hi);
8193 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
8194 v4hi_ftype_v4qi_v2hi);
8195 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
8196 v4hi_ftype_v8qi_v4hi);
8197 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
8198 v4hi_ftype_v8qi_v4hi);
8199 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
8200 v2si_ftype_v4qi_v2hi);
8201 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
8202 v2si_ftype_v4qi_v2hi);
8204 /* Data aligning. */
8205 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
8206 v4hi_ftype_v4hi_v4hi);
8207 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
8208 v8qi_ftype_v8qi_v8qi);
8209 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
8210 v2si_ftype_v2si_v2si);
8211 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
8214 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
8217 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
8220 /* Pixel distance. */
8221 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
8222 di_ftype_v8qi_v8qi_di);
8225 /* Handle TARGET_EXPAND_BUILTIN target hook.
8226 Expand builtin functions for sparc intrinsics. */
8229 sparc_expand_builtin (tree exp, rtx target,
8230 rtx subtarget ATTRIBUTE_UNUSED,
8231 enum machine_mode tmode ATTRIBUTE_UNUSED,
8232 int ignore ATTRIBUTE_UNUSED)
8235 call_expr_arg_iterator iter;
8236 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8237 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
8239 enum machine_mode mode[4];
8242 mode[0] = insn_data[icode].operand[0].mode;
8244 || GET_MODE (target) != mode[0]
8245 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
8246 op[0] = gen_reg_rtx (mode[0]);
8250 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8253 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8254 op[arg_count] = expand_normal (arg);
8256 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8258 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8264 pat = GEN_FCN (icode) (op[0], op[1]);
8267 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8270 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8285 sparc_vis_mul8x16 (int e8, int e16)
8287 return (e8 * e16 + 128) / 256;
8290 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8291 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8292 constants. A tree list with the results of the multiplications is returned,
8293 and each element in the list is of INNER_TYPE. */
8296 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8298 tree n_elts = NULL_TREE;
8303 case CODE_FOR_fmul8x16_vis:
8304 for (; elts0 && elts1;
8305 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8308 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8309 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8310 n_elts = tree_cons (NULL_TREE,
8311 build_int_cst (inner_type, val),
8316 case CODE_FOR_fmul8x16au_vis:
8317 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8319 for (; elts0; elts0 = TREE_CHAIN (elts0))
8322 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8324 n_elts = tree_cons (NULL_TREE,
8325 build_int_cst (inner_type, val),
8330 case CODE_FOR_fmul8x16al_vis:
8331 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8333 for (; elts0; elts0 = TREE_CHAIN (elts0))
8336 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8338 n_elts = tree_cons (NULL_TREE,
8339 build_int_cst (inner_type, val),
8348 return nreverse (n_elts);
8351 /* Handle TARGET_FOLD_BUILTIN target hook.
8352 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8353 result of the function call is ignored. NULL_TREE is returned if the
8354 function could not be folded. */
8357 sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
8359 tree arg0, arg1, arg2;
8360 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8361 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
8364 && icode != CODE_FOR_alignaddrsi_vis
8365 && icode != CODE_FOR_alignaddrdi_vis)
8366 return fold_convert (rtype, integer_zero_node);
8370 case CODE_FOR_fexpand_vis:
8371 arg0 = TREE_VALUE (arglist);
8374 if (TREE_CODE (arg0) == VECTOR_CST)
8376 tree inner_type = TREE_TYPE (rtype);
8377 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8378 tree n_elts = NULL_TREE;
8380 for (; elts; elts = TREE_CHAIN (elts))
8382 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8383 n_elts = tree_cons (NULL_TREE,
8384 build_int_cst (inner_type, val),
8387 return build_vector (rtype, nreverse (n_elts));
8391 case CODE_FOR_fmul8x16_vis:
8392 case CODE_FOR_fmul8x16au_vis:
8393 case CODE_FOR_fmul8x16al_vis:
8394 arg0 = TREE_VALUE (arglist);
8395 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8399 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8401 tree inner_type = TREE_TYPE (rtype);
8402 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8403 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8404 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
8407 return build_vector (rtype, n_elts);
8411 case CODE_FOR_fpmerge_vis:
8412 arg0 = TREE_VALUE (arglist);
8413 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8417 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8419 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8420 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8421 tree n_elts = NULL_TREE;
8423 for (; elts0 && elts1;
8424 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8426 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8427 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8430 return build_vector (rtype, nreverse (n_elts));
8434 case CODE_FOR_pdist_vis:
8435 arg0 = TREE_VALUE (arglist);
8436 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8437 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8442 if (TREE_CODE (arg0) == VECTOR_CST
8443 && TREE_CODE (arg1) == VECTOR_CST
8444 && TREE_CODE (arg2) == INTEGER_CST)
8447 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8448 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8449 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8450 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8452 for (; elts0 && elts1;
8453 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8455 unsigned HOST_WIDE_INT
8456 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8457 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8458 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8459 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8461 unsigned HOST_WIDE_INT l;
8464 overflow |= neg_double (low1, high1, &l, &h);
8465 overflow |= add_double (low0, high0, l, h, &l, &h);
8467 overflow |= neg_double (l, h, &l, &h);
8469 overflow |= add_double (low, high, l, h, &low, &high);
8472 gcc_assert (overflow == 0);
8474 return build_int_cst_wide (rtype, low, high);
8484 /* ??? This duplicates information provided to the compiler by the
8485 ??? scheduler description. Some day, teach genautomata to output
8486 ??? the latencies and then CSE will just use that. */
8489 sparc_rtx_costs (rtx x, int code, int outer_code, int *total,
8490 bool speed ATTRIBUTE_UNUSED)
8492 enum machine_mode mode = GET_MODE (x);
8493 bool float_mode_p = FLOAT_MODE_P (mode);
8498 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8516 if (GET_MODE (x) == VOIDmode
8517 && ((CONST_DOUBLE_HIGH (x) == 0
8518 && CONST_DOUBLE_LOW (x) < 0x1000)
8519 || (CONST_DOUBLE_HIGH (x) == -1
8520 && CONST_DOUBLE_LOW (x) < 0
8521 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8528 /* If outer-code was a sign or zero extension, a cost
8529 of COSTS_N_INSNS (1) was already added in. This is
8530 why we are subtracting it back out. */
8531 if (outer_code == ZERO_EXTEND)
8533 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8535 else if (outer_code == SIGN_EXTEND)
8537 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8539 else if (float_mode_p)
8541 *total = sparc_costs->float_load;
8545 *total = sparc_costs->int_load;
8553 *total = sparc_costs->float_plusminus;
8555 *total = COSTS_N_INSNS (1);
8560 *total = sparc_costs->float_mul;
8561 else if (! TARGET_HARD_MUL)
8562 *total = COSTS_N_INSNS (25);
8568 if (sparc_costs->int_mul_bit_factor)
8572 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8574 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8575 for (nbits = 0; value != 0; value &= value - 1)
8578 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8579 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8581 rtx x1 = XEXP (x, 1);
8582 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8583 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8585 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8587 for (; value2 != 0; value2 &= value2 - 1)
8595 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8596 bit_cost = COSTS_N_INSNS (bit_cost);
8600 *total = sparc_costs->int_mulX + bit_cost;
8602 *total = sparc_costs->int_mul + bit_cost;
8609 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8619 *total = sparc_costs->float_div_df;
8621 *total = sparc_costs->float_div_sf;
8626 *total = sparc_costs->int_divX;
8628 *total = sparc_costs->int_div;
8635 *total = COSTS_N_INSNS (1);
8642 case UNSIGNED_FLOAT:
8646 case FLOAT_TRUNCATE:
8647 *total = sparc_costs->float_move;
8652 *total = sparc_costs->float_sqrt_df;
8654 *total = sparc_costs->float_sqrt_sf;
8659 *total = sparc_costs->float_cmp;
8661 *total = COSTS_N_INSNS (1);
8666 *total = sparc_costs->float_cmove;
8668 *total = sparc_costs->int_cmove;
8672 /* Handle the NAND vector patterns. */
8673 if (sparc_vector_mode_supported_p (GET_MODE (x))
8674 && GET_CODE (XEXP (x, 0)) == NOT
8675 && GET_CODE (XEXP (x, 1)) == NOT)
8677 *total = COSTS_N_INSNS (1);
8688 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
8689 This is achieved by means of a manual dynamic stack space allocation in
8690 the current frame. We make the assumption that SEQ doesn't contain any
8691 function calls, with the possible exception of calls to the PIC helper. */
8694 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8696 /* We must preserve the lowest 16 words for the register save area. */
8697 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
8698 /* We really need only 2 words of fresh stack space. */
8699 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
8702 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
8703 SPARC_STACK_BIAS + offset));
8705 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
8706 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8708 emit_insn (gen_rtx_SET (VOIDmode,
8709 adjust_address (slot, word_mode, UNITS_PER_WORD),
8713 emit_insn (gen_rtx_SET (VOIDmode,
8715 adjust_address (slot, word_mode, UNITS_PER_WORD)));
8716 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8717 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
8720 /* Output the assembler code for a thunk function. THUNK_DECL is the
8721 declaration for the thunk function itself, FUNCTION is the decl for
8722 the target function. DELTA is an immediate constant offset to be
8723 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8724 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8727 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8728 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8731 rtx this_rtx, insn, funexp;
8732 unsigned int int_arg_first;
8734 reload_completed = 1;
8735 epilogue_completed = 1;
8737 emit_note (NOTE_INSN_PROLOGUE_END);
8739 if (flag_delayed_branch)
8741 /* We will emit a regular sibcall below, so we need to instruct
8742 output_sibcall that we are in a leaf function. */
8743 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8745 /* This will cause final.c to invoke leaf_renumber_regs so we
8746 must behave as if we were in a not-yet-leafified function. */
8747 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8751 /* We will emit the sibcall manually below, so we will need to
8752 manually spill non-leaf registers. */
8753 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8755 /* We really are in a leaf function. */
8756 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8759 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8760 returns a structure, the structure return pointer is there instead. */
8761 if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8762 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
8764 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
8766 /* Add DELTA. When possible use a plain add, otherwise load it into
8767 a register first. */
8770 rtx delta_rtx = GEN_INT (delta);
8772 if (! SPARC_SIMM13_P (delta))
8774 rtx scratch = gen_rtx_REG (Pmode, 1);
8775 emit_move_insn (scratch, delta_rtx);
8776 delta_rtx = scratch;
8779 /* THIS_RTX += DELTA. */
8780 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
8783 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
8786 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8787 rtx scratch = gen_rtx_REG (Pmode, 1);
8789 gcc_assert (vcall_offset < 0);
8791 /* SCRATCH = *THIS_RTX. */
8792 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
8794 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8795 may not have any available scratch register at this point. */
8796 if (SPARC_SIMM13_P (vcall_offset))
8798 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8799 else if (! fixed_regs[5]
8800 /* The below sequence is made up of at least 2 insns,
8801 while the default method may need only one. */
8802 && vcall_offset < -8192)
8804 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8805 emit_move_insn (scratch2, vcall_offset_rtx);
8806 vcall_offset_rtx = scratch2;
8810 rtx increment = GEN_INT (-4096);
8812 /* VCALL_OFFSET is a negative number whose typical range can be
8813 estimated as -32768..0 in 32-bit mode. In almost all cases
8814 it is therefore cheaper to emit multiple add insns than
8815 spilling and loading the constant into a register (at least
8817 while (! SPARC_SIMM13_P (vcall_offset))
8819 emit_insn (gen_add2_insn (scratch, increment));
8820 vcall_offset += 4096;
8822 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8825 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
8826 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8827 gen_rtx_PLUS (Pmode,
8829 vcall_offset_rtx)));
8831 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
8832 emit_insn (gen_add2_insn (this_rtx, scratch));
8835 /* Generate a tail call to the target function. */
8836 if (! TREE_USED (function))
8838 assemble_external (function);
8839 TREE_USED (function) = 1;
8841 funexp = XEXP (DECL_RTL (function), 0);
8843 if (flag_delayed_branch)
8845 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8846 insn = emit_call_insn (gen_sibcall (funexp));
8847 SIBLING_CALL_P (insn) = 1;
8851 /* The hoops we have to jump through in order to generate a sibcall
8852 without using delay slots... */
8853 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
8857 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8858 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
8860 /* Delay emitting the PIC helper function because it needs to
8861 change the section and we are emitting assembly code. */
8862 load_pic_register (true); /* clobbers %o7 */
8863 scratch = legitimize_pic_address (funexp, Pmode, scratch);
8866 emit_and_preserve (seq, spill_reg, spill_reg2);
8868 else if (TARGET_ARCH32)
8870 emit_insn (gen_rtx_SET (VOIDmode,
8872 gen_rtx_HIGH (SImode, funexp)));
8873 emit_insn (gen_rtx_SET (VOIDmode,
8875 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8877 else /* TARGET_ARCH64 */
8879 switch (sparc_cmodel)
8883 /* The destination can serve as a temporary. */
8884 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8889 /* The destination cannot serve as a temporary. */
8890 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8892 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8895 emit_and_preserve (seq, spill_reg, 0);
8903 emit_jump_insn (gen_indirect_jump (scratch));
8908 /* Run just enough of rest_of_compilation to get the insns emitted.
8909 There's not really enough bulk here to make other passes such as
8910 instruction scheduling worth while. Note that use_thunk calls
8911 assemble_start_function and assemble_end_function. */
8912 insn = get_insns ();
8913 insn_locators_alloc ();
8914 shorten_branches (insn);
8915 final_start_function (insn, file, 1);
8916 final (insn, file, 1);
8917 final_end_function ();
8918 free_after_compilation (cfun);
8920 reload_completed = 0;
8921 epilogue_completed = 0;
8924 /* Return true if sparc_output_mi_thunk would be able to output the
8925 assembler code for the thunk function specified by the arguments
8926 it is passed, and false otherwise. */
8928 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
8929 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8930 HOST_WIDE_INT vcall_offset,
8931 const_tree function ATTRIBUTE_UNUSED)
8933 /* Bound the loop used in the default method above. */
8934 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8937 /* How to allocate a 'struct machine_function'. */
8939 static struct machine_function *
8940 sparc_init_machine_status (void)
8942 return GGC_CNEW (struct machine_function);
8945 /* Locate some local-dynamic symbol still in use by this function
8946 so that we can print its name in local-dynamic base patterns. */
8949 get_some_local_dynamic_name (void)
8953 if (cfun->machine->some_ld_name)
8954 return cfun->machine->some_ld_name;
8956 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8958 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8959 return cfun->machine->some_ld_name;
8965 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8970 && GET_CODE (x) == SYMBOL_REF
8971 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8973 cfun->machine->some_ld_name = XSTR (x, 0);
8980 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
8981 This is called from dwarf2out.c to emit call frame instructions
8982 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
8984 sparc_dwarf_handle_frame_unspec (const char *label,
8985 rtx pattern ATTRIBUTE_UNUSED,
8986 int index ATTRIBUTE_UNUSED)
8988 gcc_assert (index == UNSPECV_SAVEW);
8989 dwarf2out_window_save (label);
8992 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8993 We need to emit DTP-relative relocations. */
8996 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
9001 fputs ("\t.word\t%r_tls_dtpoff32(", file);
9004 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
9009 output_addr_const (file, x);
9013 /* Do whatever processing is required at the end of a file. */
9016 sparc_file_end (void)
9018 /* If we haven't emitted the special PIC helper function, do so now. */
9019 if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
9022 if (NEED_INDICATE_EXEC_STACK)
9023 file_end_indicate_exec_stack ();
9026 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9027 /* Implement TARGET_MANGLE_TYPE. */
9030 sparc_mangle_type (const_tree type)
9033 && TYPE_MAIN_VARIANT (type) == long_double_type_node
9034 && TARGET_LONG_DOUBLE_128)
9037 /* For all other types, use normal C++ mangling. */
9042 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
9043 compare and swap on the word containing the byte or half-word. */
9046 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
9048 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
9049 rtx addr = gen_reg_rtx (Pmode);
9050 rtx off = gen_reg_rtx (SImode);
9051 rtx oldv = gen_reg_rtx (SImode);
9052 rtx newv = gen_reg_rtx (SImode);
9053 rtx oldvalue = gen_reg_rtx (SImode);
9054 rtx newvalue = gen_reg_rtx (SImode);
9055 rtx res = gen_reg_rtx (SImode);
9056 rtx resv = gen_reg_rtx (SImode);
9057 rtx memsi, val, mask, end_label, loop_label, cc;
9059 emit_insn (gen_rtx_SET (VOIDmode, addr,
9060 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
9062 if (Pmode != SImode)
9063 addr1 = gen_lowpart (SImode, addr1);
9064 emit_insn (gen_rtx_SET (VOIDmode, off,
9065 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
9067 memsi = gen_rtx_MEM (SImode, addr);
9068 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
9069 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
9071 val = force_reg (SImode, memsi);
9073 emit_insn (gen_rtx_SET (VOIDmode, off,
9074 gen_rtx_XOR (SImode, off,
9075 GEN_INT (GET_MODE (mem) == QImode
9078 emit_insn (gen_rtx_SET (VOIDmode, off,
9079 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
9081 if (GET_MODE (mem) == QImode)
9082 mask = force_reg (SImode, GEN_INT (0xff));
9084 mask = force_reg (SImode, GEN_INT (0xffff));
9086 emit_insn (gen_rtx_SET (VOIDmode, mask,
9087 gen_rtx_ASHIFT (SImode, mask, off)));
9089 emit_insn (gen_rtx_SET (VOIDmode, val,
9090 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9093 oldval = gen_lowpart (SImode, oldval);
9094 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9095 gen_rtx_ASHIFT (SImode, oldval, off)));
9097 newval = gen_lowpart_common (SImode, newval);
9098 emit_insn (gen_rtx_SET (VOIDmode, newv,
9099 gen_rtx_ASHIFT (SImode, newval, off)));
9101 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9102 gen_rtx_AND (SImode, oldv, mask)));
9104 emit_insn (gen_rtx_SET (VOIDmode, newv,
9105 gen_rtx_AND (SImode, newv, mask)));
9107 end_label = gen_label_rtx ();
9108 loop_label = gen_label_rtx ();
9109 emit_label (loop_label);
9111 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
9112 gen_rtx_IOR (SImode, oldv, val)));
9114 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
9115 gen_rtx_IOR (SImode, newv, val)));
9117 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
9119 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
9121 emit_insn (gen_rtx_SET (VOIDmode, resv,
9122 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9125 cc = gen_compare_reg_1 (NE, resv, val);
9126 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
9128 /* Use cbranchcc4 to separate the compare and branch! */
9129 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
9130 cc, const0_rtx, loop_label));
9132 emit_label (end_label);
9134 emit_insn (gen_rtx_SET (VOIDmode, res,
9135 gen_rtx_AND (SImode, res, mask)));
9137 emit_insn (gen_rtx_SET (VOIDmode, res,
9138 gen_rtx_LSHIFTRT (SImode, res, off)));
9140 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
9143 #include "gt-sparc.h"