1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
38 #include "insn-attr.h"
49 #include "target-def.h"
50 #include "cfglayout.h"
52 #include "langhooks.h"
58 struct processor_costs cypress_costs = {
59 COSTS_N_INSNS (2), /* int load */
60 COSTS_N_INSNS (2), /* int signed load */
61 COSTS_N_INSNS (2), /* int zeroed load */
62 COSTS_N_INSNS (2), /* float load */
63 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
64 COSTS_N_INSNS (5), /* fadd, fsub */
65 COSTS_N_INSNS (1), /* fcmp */
66 COSTS_N_INSNS (1), /* fmov, fmovr */
67 COSTS_N_INSNS (7), /* fmul */
68 COSTS_N_INSNS (37), /* fdivs */
69 COSTS_N_INSNS (37), /* fdivd */
70 COSTS_N_INSNS (63), /* fsqrts */
71 COSTS_N_INSNS (63), /* fsqrtd */
72 COSTS_N_INSNS (1), /* imul */
73 COSTS_N_INSNS (1), /* imulX */
74 0, /* imul bit factor */
75 COSTS_N_INSNS (1), /* idiv */
76 COSTS_N_INSNS (1), /* idivX */
77 COSTS_N_INSNS (1), /* movcc/movr */
78 0, /* shift penalty */
82 struct processor_costs supersparc_costs = {
83 COSTS_N_INSNS (1), /* int load */
84 COSTS_N_INSNS (1), /* int signed load */
85 COSTS_N_INSNS (1), /* int zeroed load */
86 COSTS_N_INSNS (0), /* float load */
87 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
88 COSTS_N_INSNS (3), /* fadd, fsub */
89 COSTS_N_INSNS (3), /* fcmp */
90 COSTS_N_INSNS (1), /* fmov, fmovr */
91 COSTS_N_INSNS (3), /* fmul */
92 COSTS_N_INSNS (6), /* fdivs */
93 COSTS_N_INSNS (9), /* fdivd */
94 COSTS_N_INSNS (12), /* fsqrts */
95 COSTS_N_INSNS (12), /* fsqrtd */
96 COSTS_N_INSNS (4), /* imul */
97 COSTS_N_INSNS (4), /* imulX */
98 0, /* imul bit factor */
99 COSTS_N_INSNS (4), /* idiv */
100 COSTS_N_INSNS (4), /* idivX */
101 COSTS_N_INSNS (1), /* movcc/movr */
102 1, /* shift penalty */
106 struct processor_costs hypersparc_costs = {
107 COSTS_N_INSNS (1), /* int load */
108 COSTS_N_INSNS (1), /* int signed load */
109 COSTS_N_INSNS (1), /* int zeroed load */
110 COSTS_N_INSNS (1), /* float load */
111 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
112 COSTS_N_INSNS (1), /* fadd, fsub */
113 COSTS_N_INSNS (1), /* fcmp */
114 COSTS_N_INSNS (1), /* fmov, fmovr */
115 COSTS_N_INSNS (1), /* fmul */
116 COSTS_N_INSNS (8), /* fdivs */
117 COSTS_N_INSNS (12), /* fdivd */
118 COSTS_N_INSNS (17), /* fsqrts */
119 COSTS_N_INSNS (17), /* fsqrtd */
120 COSTS_N_INSNS (17), /* imul */
121 COSTS_N_INSNS (17), /* imulX */
122 0, /* imul bit factor */
123 COSTS_N_INSNS (17), /* idiv */
124 COSTS_N_INSNS (17), /* idivX */
125 COSTS_N_INSNS (1), /* movcc/movr */
126 0, /* shift penalty */
130 struct processor_costs sparclet_costs = {
131 COSTS_N_INSNS (3), /* int load */
132 COSTS_N_INSNS (3), /* int signed load */
133 COSTS_N_INSNS (1), /* int zeroed load */
134 COSTS_N_INSNS (1), /* float load */
135 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
136 COSTS_N_INSNS (1), /* fadd, fsub */
137 COSTS_N_INSNS (1), /* fcmp */
138 COSTS_N_INSNS (1), /* fmov, fmovr */
139 COSTS_N_INSNS (1), /* fmul */
140 COSTS_N_INSNS (1), /* fdivs */
141 COSTS_N_INSNS (1), /* fdivd */
142 COSTS_N_INSNS (1), /* fsqrts */
143 COSTS_N_INSNS (1), /* fsqrtd */
144 COSTS_N_INSNS (5), /* imul */
145 COSTS_N_INSNS (5), /* imulX */
146 0, /* imul bit factor */
147 COSTS_N_INSNS (5), /* idiv */
148 COSTS_N_INSNS (5), /* idivX */
149 COSTS_N_INSNS (1), /* movcc/movr */
150 0, /* shift penalty */
154 struct processor_costs ultrasparc_costs = {
155 COSTS_N_INSNS (2), /* int load */
156 COSTS_N_INSNS (3), /* int signed load */
157 COSTS_N_INSNS (2), /* int zeroed load */
158 COSTS_N_INSNS (2), /* float load */
159 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
160 COSTS_N_INSNS (4), /* fadd, fsub */
161 COSTS_N_INSNS (1), /* fcmp */
162 COSTS_N_INSNS (2), /* fmov, fmovr */
163 COSTS_N_INSNS (4), /* fmul */
164 COSTS_N_INSNS (13), /* fdivs */
165 COSTS_N_INSNS (23), /* fdivd */
166 COSTS_N_INSNS (13), /* fsqrts */
167 COSTS_N_INSNS (23), /* fsqrtd */
168 COSTS_N_INSNS (4), /* imul */
169 COSTS_N_INSNS (4), /* imulX */
170 2, /* imul bit factor */
171 COSTS_N_INSNS (37), /* idiv */
172 COSTS_N_INSNS (68), /* idivX */
173 COSTS_N_INSNS (2), /* movcc/movr */
174 2, /* shift penalty */
178 struct processor_costs ultrasparc3_costs = {
179 COSTS_N_INSNS (2), /* int load */
180 COSTS_N_INSNS (3), /* int signed load */
181 COSTS_N_INSNS (3), /* int zeroed load */
182 COSTS_N_INSNS (2), /* float load */
183 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
184 COSTS_N_INSNS (4), /* fadd, fsub */
185 COSTS_N_INSNS (5), /* fcmp */
186 COSTS_N_INSNS (3), /* fmov, fmovr */
187 COSTS_N_INSNS (4), /* fmul */
188 COSTS_N_INSNS (17), /* fdivs */
189 COSTS_N_INSNS (20), /* fdivd */
190 COSTS_N_INSNS (20), /* fsqrts */
191 COSTS_N_INSNS (29), /* fsqrtd */
192 COSTS_N_INSNS (6), /* imul */
193 COSTS_N_INSNS (6), /* imulX */
194 0, /* imul bit factor */
195 COSTS_N_INSNS (40), /* idiv */
196 COSTS_N_INSNS (71), /* idivX */
197 COSTS_N_INSNS (2), /* movcc/movr */
198 0, /* shift penalty */
202 struct processor_costs niagara_costs = {
203 COSTS_N_INSNS (3), /* int load */
204 COSTS_N_INSNS (3), /* int signed load */
205 COSTS_N_INSNS (3), /* int zeroed load */
206 COSTS_N_INSNS (9), /* float load */
207 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
208 COSTS_N_INSNS (8), /* fadd, fsub */
209 COSTS_N_INSNS (26), /* fcmp */
210 COSTS_N_INSNS (8), /* fmov, fmovr */
211 COSTS_N_INSNS (29), /* fmul */
212 COSTS_N_INSNS (54), /* fdivs */
213 COSTS_N_INSNS (83), /* fdivd */
214 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
215 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
216 COSTS_N_INSNS (11), /* imul */
217 COSTS_N_INSNS (11), /* imulX */
218 0, /* imul bit factor */
219 COSTS_N_INSNS (72), /* idiv */
220 COSTS_N_INSNS (72), /* idivX */
221 COSTS_N_INSNS (1), /* movcc/movr */
222 0, /* shift penalty */
226 struct processor_costs niagara2_costs = {
227 COSTS_N_INSNS (3), /* int load */
228 COSTS_N_INSNS (3), /* int signed load */
229 COSTS_N_INSNS (3), /* int zeroed load */
230 COSTS_N_INSNS (3), /* float load */
231 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
232 COSTS_N_INSNS (6), /* fadd, fsub */
233 COSTS_N_INSNS (6), /* fcmp */
234 COSTS_N_INSNS (6), /* fmov, fmovr */
235 COSTS_N_INSNS (6), /* fmul */
236 COSTS_N_INSNS (19), /* fdivs */
237 COSTS_N_INSNS (33), /* fdivd */
238 COSTS_N_INSNS (19), /* fsqrts */
239 COSTS_N_INSNS (33), /* fsqrtd */
240 COSTS_N_INSNS (5), /* imul */
241 COSTS_N_INSNS (5), /* imulX */
242 0, /* imul bit factor */
243 COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
244 COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
245 COSTS_N_INSNS (1), /* movcc/movr */
246 0, /* shift penalty */
249 const struct processor_costs *sparc_costs = &cypress_costs;
251 #ifdef HAVE_AS_RELAX_OPTION
252 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
253 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
254 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
255 somebody does not branch between the sethi and jmp. */
256 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
258 #define LEAF_SIBCALL_SLOT_RESERVED_P \
259 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
262 /* Global variables for machine-dependent things. */
264 /* Size of frame. Need to know this to emit return insns from leaf procedures.
265 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
266 reload pass. This is important as the value is later used for scheduling
267 (to see what can go in a delay slot).
268 APPARENT_FSIZE is the size of the stack less the register save area and less
269 the outgoing argument area. It is used when saving call preserved regs. */
270 static HOST_WIDE_INT apparent_fsize;
271 static HOST_WIDE_INT actual_fsize;
273 /* Number of live general or floating point registers needed to be
274 saved (as 4-byte quantities). */
275 static int num_gfregs;
277 /* The alias set for prologue/epilogue register save/restore. */
278 static GTY(()) alias_set_type sparc_sr_alias_set;
280 /* The alias set for the structure return value. */
281 static GTY(()) alias_set_type struct_value_alias_set;
283 /* Vector to say how input registers are mapped to output registers.
284 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
285 eliminate it. You must use -fomit-frame-pointer to get that. */
286 char leaf_reg_remap[] =
287 { 0, 1, 2, 3, 4, 5, 6, 7,
288 -1, -1, -1, -1, -1, -1, 14, -1,
289 -1, -1, -1, -1, -1, -1, -1, -1,
290 8, 9, 10, 11, 12, 13, -1, 15,
292 32, 33, 34, 35, 36, 37, 38, 39,
293 40, 41, 42, 43, 44, 45, 46, 47,
294 48, 49, 50, 51, 52, 53, 54, 55,
295 56, 57, 58, 59, 60, 61, 62, 63,
296 64, 65, 66, 67, 68, 69, 70, 71,
297 72, 73, 74, 75, 76, 77, 78, 79,
298 80, 81, 82, 83, 84, 85, 86, 87,
299 88, 89, 90, 91, 92, 93, 94, 95,
300 96, 97, 98, 99, 100};
302 /* Vector, indexed by hard register number, which contains 1
303 for a register that is allowable in a candidate for leaf
304 function treatment. */
305 char sparc_leaf_regs[] =
306 { 1, 1, 1, 1, 1, 1, 1, 1,
307 0, 0, 0, 0, 0, 0, 1, 0,
308 0, 0, 0, 0, 0, 0, 0, 0,
309 1, 1, 1, 1, 1, 1, 0, 1,
310 1, 1, 1, 1, 1, 1, 1, 1,
311 1, 1, 1, 1, 1, 1, 1, 1,
312 1, 1, 1, 1, 1, 1, 1, 1,
313 1, 1, 1, 1, 1, 1, 1, 1,
314 1, 1, 1, 1, 1, 1, 1, 1,
315 1, 1, 1, 1, 1, 1, 1, 1,
316 1, 1, 1, 1, 1, 1, 1, 1,
317 1, 1, 1, 1, 1, 1, 1, 1,
320 struct GTY(()) machine_function
322 /* Some local-dynamic TLS symbol name. */
323 const char *some_ld_name;
325 /* True if the current function is leaf and uses only leaf regs,
326 so that the SPARC leaf function optimization can be applied.
327 Private version of current_function_uses_only_leaf_regs, see
328 sparc_expand_prologue for the rationale. */
331 /* True if the data calculated by sparc_expand_prologue are valid. */
332 bool prologue_data_valid_p;
335 #define sparc_leaf_function_p cfun->machine->leaf_function_p
336 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
338 /* Register we pretend to think the frame pointer is allocated to.
339 Normally, this is %fp, but if we are in a leaf procedure, this
340 is %sp+"something". We record "something" separately as it may
341 be too big for reg+constant addressing. */
342 static rtx frame_base_reg;
343 static HOST_WIDE_INT frame_base_offset;
345 /* 1 if the next opcode is to be specially indented. */
346 int sparc_indent_opcode = 0;
348 static bool sparc_handle_option (size_t, const char *, int);
349 static void sparc_init_modes (void);
350 static void scan_record_type (tree, int *, int *, int *);
351 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
352 tree, int, int, int *, int *);
354 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
355 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
357 static void sparc_output_addr_vec (rtx);
358 static void sparc_output_addr_diff_vec (rtx);
359 static void sparc_output_deferred_case_vectors (void);
360 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
361 static rtx sparc_builtin_saveregs (void);
362 static int epilogue_renumber (rtx *, int);
363 static bool sparc_assemble_integer (rtx, unsigned int, int);
364 static int set_extends (rtx);
365 static void emit_pic_helper (void);
366 static void load_pic_register (bool);
367 static int save_or_restore_regs (int, int, rtx, int, int);
368 static void emit_save_or_restore_regs (int);
369 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
370 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
371 #ifdef OBJECT_FORMAT_ELF
372 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
375 static int sparc_adjust_cost (rtx, rtx, rtx, int);
376 static int sparc_issue_rate (void);
377 static void sparc_sched_init (FILE *, int, int);
378 static int sparc_use_sched_lookahead (void);
380 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
381 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
382 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
383 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
384 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
386 static bool sparc_function_ok_for_sibcall (tree, tree);
387 static void sparc_init_libfuncs (void);
388 static void sparc_init_builtins (void);
389 static void sparc_vis_init_builtins (void);
390 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
391 static tree sparc_fold_builtin (tree, tree, bool);
392 static int sparc_vis_mul8x16 (int, int);
393 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
394 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
395 HOST_WIDE_INT, tree);
396 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
397 HOST_WIDE_INT, const_tree);
398 static struct machine_function * sparc_init_machine_status (void);
399 static bool sparc_cannot_force_const_mem (rtx);
400 static rtx sparc_tls_get_addr (void);
401 static rtx sparc_tls_got (void);
402 static const char *get_some_local_dynamic_name (void);
403 static int get_some_local_dynamic_name_1 (rtx *, void *);
404 static bool sparc_rtx_costs (rtx, int, int, int *, bool);
405 static bool sparc_promote_prototypes (const_tree);
406 static rtx sparc_struct_value_rtx (tree, int);
407 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
408 int *, const_tree, int);
409 static bool sparc_return_in_memory (const_tree, const_tree);
410 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
411 static void sparc_va_start (tree, rtx);
412 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
413 static bool sparc_vector_mode_supported_p (enum machine_mode);
414 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
415 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
416 enum machine_mode, const_tree, bool);
417 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
418 enum machine_mode, tree, bool);
419 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
420 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
421 static void sparc_file_end (void);
422 static bool sparc_frame_pointer_required (void);
423 static bool sparc_can_eliminate (const int, const int);
424 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
425 static const char *sparc_mangle_type (const_tree);
427 static void sparc_trampoline_init (rtx, tree, rtx);
429 #ifdef SUBTARGET_ATTRIBUTE_TABLE
430 /* Table of valid machine attributes. */
431 static const struct attribute_spec sparc_attribute_table[] =
433 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
434 SUBTARGET_ATTRIBUTE_TABLE,
435 { NULL, 0, 0, false, false, false, NULL }
439 /* Option handling. */
442 enum cmodel sparc_cmodel;
444 char sparc_hard_reg_printed[8];
446 struct sparc_cpu_select sparc_select[] =
448 /* switch name, tune arch */
449 { (char *)0, "default", 1, 1 },
450 { (char *)0, "-mcpu=", 1, 1 },
451 { (char *)0, "-mtune=", 1, 0 },
455 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
456 enum processor_type sparc_cpu;
458 /* Whether
\fan FPU option was specified. */
459 static bool fpu_option_set = false;
461 /* Initialize the GCC target structure. */
463 /* The sparc default is to use .half rather than .short for aligned
464 HI objects. Use .word instead of .long on non-ELF systems. */
465 #undef TARGET_ASM_ALIGNED_HI_OP
466 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
467 #ifndef OBJECT_FORMAT_ELF
468 #undef TARGET_ASM_ALIGNED_SI_OP
469 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
472 #undef TARGET_ASM_UNALIGNED_HI_OP
473 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
474 #undef TARGET_ASM_UNALIGNED_SI_OP
475 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
476 #undef TARGET_ASM_UNALIGNED_DI_OP
477 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
479 /* The target hook has to handle DI-mode values. */
480 #undef TARGET_ASM_INTEGER
481 #define TARGET_ASM_INTEGER sparc_assemble_integer
483 #undef TARGET_ASM_FUNCTION_PROLOGUE
484 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
485 #undef TARGET_ASM_FUNCTION_EPILOGUE
486 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
488 #undef TARGET_SCHED_ADJUST_COST
489 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
490 #undef TARGET_SCHED_ISSUE_RATE
491 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
492 #undef TARGET_SCHED_INIT
493 #define TARGET_SCHED_INIT sparc_sched_init
494 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
495 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
497 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
498 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
500 #undef TARGET_INIT_LIBFUNCS
501 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
502 #undef TARGET_INIT_BUILTINS
503 #define TARGET_INIT_BUILTINS sparc_init_builtins
505 #undef TARGET_LEGITIMIZE_ADDRESS
506 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
508 #undef TARGET_EXPAND_BUILTIN
509 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
510 #undef TARGET_FOLD_BUILTIN
511 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
514 #undef TARGET_HAVE_TLS
515 #define TARGET_HAVE_TLS true
518 #undef TARGET_CANNOT_FORCE_CONST_MEM
519 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
521 #undef TARGET_ASM_OUTPUT_MI_THUNK
522 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
523 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
524 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
526 #undef TARGET_RTX_COSTS
527 #define TARGET_RTX_COSTS sparc_rtx_costs
528 #undef TARGET_ADDRESS_COST
529 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
531 #undef TARGET_PROMOTE_FUNCTION_MODE
532 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
534 #undef TARGET_PROMOTE_PROTOTYPES
535 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
537 #undef TARGET_STRUCT_VALUE_RTX
538 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
539 #undef TARGET_RETURN_IN_MEMORY
540 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
541 #undef TARGET_MUST_PASS_IN_STACK
542 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
543 #undef TARGET_PASS_BY_REFERENCE
544 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
545 #undef TARGET_ARG_PARTIAL_BYTES
546 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
548 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
549 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
550 #undef TARGET_STRICT_ARGUMENT_NAMING
551 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
553 #undef TARGET_EXPAND_BUILTIN_VA_START
554 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
555 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
556 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
558 #undef TARGET_VECTOR_MODE_SUPPORTED_P
559 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
561 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
562 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
564 #ifdef SUBTARGET_INSERT_ATTRIBUTES
565 #undef TARGET_INSERT_ATTRIBUTES
566 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
569 #ifdef SUBTARGET_ATTRIBUTE_TABLE
570 #undef TARGET_ATTRIBUTE_TABLE
571 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
574 #undef TARGET_RELAXED_ORDERING
575 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
577 #undef TARGET_DEFAULT_TARGET_FLAGS
578 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
579 #undef TARGET_HANDLE_OPTION
580 #define TARGET_HANDLE_OPTION sparc_handle_option
582 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
583 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
584 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
587 #undef TARGET_ASM_FILE_END
588 #define TARGET_ASM_FILE_END sparc_file_end
590 #undef TARGET_FRAME_POINTER_REQUIRED
591 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
593 #undef TARGET_CAN_ELIMINATE
594 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
596 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
597 #undef TARGET_MANGLE_TYPE
598 #define TARGET_MANGLE_TYPE sparc_mangle_type
601 #undef TARGET_LEGITIMATE_ADDRESS_P
602 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
604 #undef TARGET_TRAMPOLINE_INIT
605 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
607 struct gcc_target targetm = TARGET_INITIALIZER;
609 /* Implement TARGET_HANDLE_OPTION. */
612 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
617 case OPT_mhard_float:
618 case OPT_msoft_float:
619 fpu_option_set = true;
623 sparc_select[1].string = arg;
627 sparc_select[2].string = arg;
634 /* Validate and override various options, and do some machine dependent
638 sparc_override_options (void)
640 static struct code_model {
641 const char *const name;
642 const enum cmodel value;
643 } const cmodels[] = {
645 { "medlow", CM_MEDLOW },
646 { "medmid", CM_MEDMID },
647 { "medany", CM_MEDANY },
648 { "embmedany", CM_EMBMEDANY },
649 { NULL, (enum cmodel) 0 }
651 const struct code_model *cmodel;
652 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
653 static struct cpu_default {
655 const char *const name;
656 } const cpu_default[] = {
657 /* There must be one entry here for each TARGET_CPU value. */
658 { TARGET_CPU_sparc, "cypress" },
659 { TARGET_CPU_sparclet, "tsc701" },
660 { TARGET_CPU_sparclite, "f930" },
661 { TARGET_CPU_v8, "v8" },
662 { TARGET_CPU_hypersparc, "hypersparc" },
663 { TARGET_CPU_sparclite86x, "sparclite86x" },
664 { TARGET_CPU_supersparc, "supersparc" },
665 { TARGET_CPU_v9, "v9" },
666 { TARGET_CPU_ultrasparc, "ultrasparc" },
667 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
668 { TARGET_CPU_niagara, "niagara" },
669 { TARGET_CPU_niagara2, "niagara2" },
672 const struct cpu_default *def;
673 /* Table of values for -m{cpu,tune}=. */
674 static struct cpu_table {
675 const char *const name;
676 const enum processor_type processor;
679 } const cpu_table[] = {
680 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
681 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
682 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
683 /* TI TMS390Z55 supersparc */
684 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
685 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
686 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
687 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
688 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
689 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
690 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
691 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
693 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
695 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
696 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
697 /* TI ultrasparc I, II, IIi */
698 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
699 /* Although insns using %y are deprecated, it is a clear win on current
701 |MASK_DEPRECATED_V8_INSNS},
702 /* TI ultrasparc III */
703 /* ??? Check if %y issue still holds true in ultra3. */
704 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
706 { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
707 { "niagara2", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9},
708 { 0, (enum processor_type) 0, 0, 0 }
710 const struct cpu_table *cpu;
711 const struct sparc_cpu_select *sel;
714 #ifndef SPARC_BI_ARCH
715 /* Check for unsupported architecture size. */
716 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
717 error ("%s is not supported by this configuration",
718 DEFAULT_ARCH32_P ? "-m64" : "-m32");
721 /* We force all 64bit archs to use 128 bit long double */
722 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
724 error ("-mlong-double-64 not allowed with -m64");
725 target_flags |= MASK_LONG_DOUBLE_128;
728 /* Code model selection. */
729 sparc_cmodel = SPARC_DEFAULT_CMODEL;
733 sparc_cmodel = CM_32;
736 if (sparc_cmodel_string != NULL)
740 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
741 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
743 if (cmodel->name == NULL)
744 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
746 sparc_cmodel = cmodel->value;
749 error ("-mcmodel= is not supported on 32 bit systems");
752 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
754 /* Set the default CPU. */
755 for (def = &cpu_default[0]; def->name; ++def)
756 if (def->cpu == TARGET_CPU_DEFAULT)
758 gcc_assert (def->name);
759 sparc_select[0].string = def->name;
761 for (sel = &sparc_select[0]; sel->name; ++sel)
765 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
766 if (! strcmp (sel->string, cpu->name))
769 sparc_cpu = cpu->processor;
773 target_flags &= ~cpu->disable;
774 target_flags |= cpu->enable;
780 error ("bad value (%s) for %s switch", sel->string, sel->name);
784 /* If -mfpu or -mno-fpu was explicitly used, don't override with
785 the processor default. */
787 target_flags = (target_flags & ~MASK_FPU) | fpu;
789 /* Don't allow -mvis if FPU is disabled. */
791 target_flags &= ~MASK_VIS;
793 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
795 -m64 also implies v9. */
796 if (TARGET_VIS || TARGET_ARCH64)
798 target_flags |= MASK_V9;
799 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
802 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
803 if (TARGET_V9 && TARGET_ARCH32)
804 target_flags |= MASK_DEPRECATED_V8_INSNS;
806 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
807 if (! TARGET_V9 || TARGET_ARCH64)
808 target_flags &= ~MASK_V8PLUS;
810 /* Don't use stack biasing in 32 bit mode. */
812 target_flags &= ~MASK_STACK_BIAS;
814 /* Supply a default value for align_functions. */
815 if (align_functions == 0
816 && (sparc_cpu == PROCESSOR_ULTRASPARC
817 || sparc_cpu == PROCESSOR_ULTRASPARC3
818 || sparc_cpu == PROCESSOR_NIAGARA
819 || sparc_cpu == PROCESSOR_NIAGARA2))
820 align_functions = 32;
822 /* Validate PCC_STRUCT_RETURN. */
823 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
824 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
826 /* Only use .uaxword when compiling for a 64-bit target. */
828 targetm.asm_out.unaligned_op.di = NULL;
830 /* Do various machine dependent initializations. */
833 /* Acquire unique alias sets for our private stuff. */
834 sparc_sr_alias_set = new_alias_set ();
835 struct_value_alias_set = new_alias_set ();
837 /* Set up function hooks. */
838 init_machine_status = sparc_init_machine_status;
843 case PROCESSOR_CYPRESS:
844 sparc_costs = &cypress_costs;
847 case PROCESSOR_SPARCLITE:
848 case PROCESSOR_SUPERSPARC:
849 sparc_costs = &supersparc_costs;
853 case PROCESSOR_HYPERSPARC:
854 case PROCESSOR_SPARCLITE86X:
855 sparc_costs = &hypersparc_costs;
857 case PROCESSOR_SPARCLET:
858 case PROCESSOR_TSC701:
859 sparc_costs = &sparclet_costs;
862 case PROCESSOR_ULTRASPARC:
863 sparc_costs = &ultrasparc_costs;
865 case PROCESSOR_ULTRASPARC3:
866 sparc_costs = &ultrasparc3_costs;
868 case PROCESSOR_NIAGARA:
869 sparc_costs = &niagara_costs;
871 case PROCESSOR_NIAGARA2:
872 sparc_costs = &niagara2_costs;
876 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
877 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
878 target_flags |= MASK_LONG_DOUBLE_128;
881 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
882 set_param_value ("simultaneous-prefetches",
883 ((sparc_cpu == PROCESSOR_ULTRASPARC
884 || sparc_cpu == PROCESSOR_NIAGARA
885 || sparc_cpu == PROCESSOR_NIAGARA2)
887 : (sparc_cpu == PROCESSOR_ULTRASPARC3
889 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
890 set_param_value ("l1-cache-line-size",
891 ((sparc_cpu == PROCESSOR_ULTRASPARC
892 || sparc_cpu == PROCESSOR_ULTRASPARC3
893 || sparc_cpu == PROCESSOR_NIAGARA
894 || sparc_cpu == PROCESSOR_NIAGARA2)
898 /* Miscellaneous utilities. */
900 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
901 or branch on register contents instructions. */
904 v9_regcmp_p (enum rtx_code code)
906 return (code == EQ || code == NE || code == GE || code == LT
907 || code == LE || code == GT);
910 /* Nonzero if OP is a floating point constant which can
911 be loaded into an integer register using a single
912 sethi instruction. */
917 if (GET_CODE (op) == CONST_DOUBLE)
922 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
923 REAL_VALUE_TO_TARGET_SINGLE (r, i);
924 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
930 /* Nonzero if OP is a floating point constant which can
931 be loaded into an integer register using a single
937 if (GET_CODE (op) == CONST_DOUBLE)
942 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
943 REAL_VALUE_TO_TARGET_SINGLE (r, i);
944 return SPARC_SIMM13_P (i);
950 /* Nonzero if OP is a floating point constant which can
951 be loaded into an integer register using a high/losum
952 instruction sequence. */
955 fp_high_losum_p (rtx op)
957 /* The constraints calling this should only be in
958 SFmode move insns, so any constant which cannot
959 be moved using a single insn will do. */
960 if (GET_CODE (op) == CONST_DOUBLE)
965 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
966 REAL_VALUE_TO_TARGET_SINGLE (r, i);
967 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
973 /* Expand a move instruction. Return true if all work is done. */
976 sparc_expand_move (enum machine_mode mode, rtx *operands)
978 /* Handle sets of MEM first. */
979 if (GET_CODE (operands[0]) == MEM)
981 /* 0 is a register (or a pair of registers) on SPARC. */
982 if (register_or_zero_operand (operands[1], mode))
985 if (!reload_in_progress)
987 operands[0] = validize_mem (operands[0]);
988 operands[1] = force_reg (mode, operands[1]);
992 /* Fixup TLS cases. */
994 && CONSTANT_P (operands[1])
995 && GET_CODE (operands[1]) != HIGH
996 && sparc_tls_referenced_p (operands [1]))
998 rtx sym = operands[1];
1001 if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == PLUS)
1003 addend = XEXP (XEXP (sym, 0), 1);
1004 sym = XEXP (XEXP (sym, 0), 0);
1007 gcc_assert (SPARC_SYMBOL_REF_TLS_P (sym));
1009 sym = legitimize_tls_address (sym);
1012 sym = gen_rtx_PLUS (mode, sym, addend);
1013 sym = force_operand (sym, operands[0]);
1018 /* Fixup PIC cases. */
1019 if (flag_pic && CONSTANT_P (operands[1]))
1021 if (pic_address_needs_scratch (operands[1]))
1022 operands[1] = legitimize_pic_address (operands[1], mode, 0);
1024 /* VxWorks does not impose a fixed gap between segments; the run-time
1025 gap can be different from the object-file gap. We therefore can't
1026 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1027 are absolutely sure that X is in the same segment as the GOT.
1028 Unfortunately, the flexibility of linker scripts means that we
1029 can't be sure of that in general, so assume that _G_O_T_-relative
1030 accesses are never valid on VxWorks. */
1031 if (GET_CODE (operands[1]) == LABEL_REF && !TARGET_VXWORKS_RTP)
1035 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1041 gcc_assert (TARGET_ARCH64);
1042 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1047 if (symbolic_operand (operands[1], mode))
1049 operands[1] = legitimize_pic_address (operands[1],
1051 (reload_in_progress ?
1058 /* If we are trying to toss an integer constant into FP registers,
1059 or loading a FP or vector constant, force it into memory. */
1060 if (CONSTANT_P (operands[1])
1061 && REG_P (operands[0])
1062 && (SPARC_FP_REG_P (REGNO (operands[0]))
1063 || SCALAR_FLOAT_MODE_P (mode)
1064 || VECTOR_MODE_P (mode)))
1066 /* emit_group_store will send such bogosity to us when it is
1067 not storing directly into memory. So fix this up to avoid
1068 crashes in output_constant_pool. */
1069 if (operands [1] == const0_rtx)
1070 operands[1] = CONST0_RTX (mode);
1072 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1073 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1074 && const_zero_operand (operands[1], mode))
1077 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1078 /* We are able to build any SF constant in integer registers
1079 with at most 2 instructions. */
1081 /* And any DF constant in integer registers. */
1083 && (reload_completed || reload_in_progress))))
1086 operands[1] = force_const_mem (mode, operands[1]);
1087 if (!reload_in_progress)
1088 operands[1] = validize_mem (operands[1]);
1092 /* Accept non-constants and valid constants unmodified. */
1093 if (!CONSTANT_P (operands[1])
1094 || GET_CODE (operands[1]) == HIGH
1095 || input_operand (operands[1], mode))
1101 /* All QImode constants require only one insn, so proceed. */
1106 sparc_emit_set_const32 (operands[0], operands[1]);
1110 /* input_operand should have filtered out 32-bit mode. */
1111 sparc_emit_set_const64 (operands[0], operands[1]);
1121 /* Load OP1, a 32-bit constant, into OP0, a register.
1122 We know it can't be done in one insn when we get
1123 here, the move expander guarantees this. */
1126 sparc_emit_set_const32 (rtx op0, rtx op1)
1128 enum machine_mode mode = GET_MODE (op0);
1131 if (reload_in_progress || reload_completed)
1134 temp = gen_reg_rtx (mode);
1136 if (GET_CODE (op1) == CONST_INT)
1138 gcc_assert (!small_int_operand (op1, mode)
1139 && !const_high_operand (op1, mode));
1141 /* Emit them as real moves instead of a HIGH/LO_SUM,
1142 this way CSE can see everything and reuse intermediate
1143 values if it wants. */
1144 emit_insn (gen_rtx_SET (VOIDmode, temp,
1145 GEN_INT (INTVAL (op1)
1146 & ~(HOST_WIDE_INT)0x3ff)));
1148 emit_insn (gen_rtx_SET (VOIDmode,
1150 gen_rtx_IOR (mode, temp,
1151 GEN_INT (INTVAL (op1) & 0x3ff))));
1155 /* A symbol, emit in the traditional way. */
1156 emit_insn (gen_rtx_SET (VOIDmode, temp,
1157 gen_rtx_HIGH (mode, op1)));
1158 emit_insn (gen_rtx_SET (VOIDmode,
1159 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1163 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1164 If TEMP is nonzero, we are forbidden to use any other scratch
1165 registers. Otherwise, we are allowed to generate them as needed.
1167 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1168 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1171 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1173 rtx temp1, temp2, temp3, temp4, temp5;
1176 if (temp && GET_MODE (temp) == TImode)
1179 temp = gen_rtx_REG (DImode, REGNO (temp));
1182 /* SPARC-V9 code-model support. */
1183 switch (sparc_cmodel)
1186 /* The range spanned by all instructions in the object is less
1187 than 2^31 bytes (2GB) and the distance from any instruction
1188 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1189 than 2^31 bytes (2GB).
1191 The executable must be in the low 4TB of the virtual address
1194 sethi %hi(symbol), %temp1
1195 or %temp1, %lo(symbol), %reg */
1197 temp1 = temp; /* op0 is allowed. */
1199 temp1 = gen_reg_rtx (DImode);
1201 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1202 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1206 /* The range spanned by all instructions in the object is less
1207 than 2^31 bytes (2GB) and the distance from any instruction
1208 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1209 than 2^31 bytes (2GB).
1211 The executable must be in the low 16TB of the virtual address
1214 sethi %h44(symbol), %temp1
1215 or %temp1, %m44(symbol), %temp2
1216 sllx %temp2, 12, %temp3
1217 or %temp3, %l44(symbol), %reg */
1222 temp3 = temp; /* op0 is allowed. */
1226 temp1 = gen_reg_rtx (DImode);
1227 temp2 = gen_reg_rtx (DImode);
1228 temp3 = gen_reg_rtx (DImode);
1231 emit_insn (gen_seth44 (temp1, op1));
1232 emit_insn (gen_setm44 (temp2, temp1, op1));
1233 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1234 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1235 emit_insn (gen_setl44 (op0, temp3, op1));
1239 /* The range spanned by all instructions in the object is less
1240 than 2^31 bytes (2GB) and the distance from any instruction
1241 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1242 than 2^31 bytes (2GB).
1244 The executable can be placed anywhere in the virtual address
1247 sethi %hh(symbol), %temp1
1248 sethi %lm(symbol), %temp2
1249 or %temp1, %hm(symbol), %temp3
1250 sllx %temp3, 32, %temp4
1251 or %temp4, %temp2, %temp5
1252 or %temp5, %lo(symbol), %reg */
1255 /* It is possible that one of the registers we got for operands[2]
1256 might coincide with that of operands[0] (which is why we made
1257 it TImode). Pick the other one to use as our scratch. */
1258 if (rtx_equal_p (temp, op0))
1260 gcc_assert (ti_temp);
1261 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1264 temp2 = temp; /* op0 is _not_ allowed, see above. */
1271 temp1 = gen_reg_rtx (DImode);
1272 temp2 = gen_reg_rtx (DImode);
1273 temp3 = gen_reg_rtx (DImode);
1274 temp4 = gen_reg_rtx (DImode);
1275 temp5 = gen_reg_rtx (DImode);
1278 emit_insn (gen_sethh (temp1, op1));
1279 emit_insn (gen_setlm (temp2, op1));
1280 emit_insn (gen_sethm (temp3, temp1, op1));
1281 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1282 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1283 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1284 gen_rtx_PLUS (DImode, temp4, temp2)));
1285 emit_insn (gen_setlo (op0, temp5, op1));
1289 /* Old old old backwards compatibility kruft here.
1290 Essentially it is MEDLOW with a fixed 64-bit
1291 virtual base added to all data segment addresses.
1292 Text-segment stuff is computed like MEDANY, we can't
1293 reuse the code above because the relocation knobs
1296 Data segment: sethi %hi(symbol), %temp1
1297 add %temp1, EMBMEDANY_BASE_REG, %temp2
1298 or %temp2, %lo(symbol), %reg */
1299 if (data_segment_operand (op1, GET_MODE (op1)))
1303 temp1 = temp; /* op0 is allowed. */
1308 temp1 = gen_reg_rtx (DImode);
1309 temp2 = gen_reg_rtx (DImode);
1312 emit_insn (gen_embmedany_sethi (temp1, op1));
1313 emit_insn (gen_embmedany_brsum (temp2, temp1));
1314 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1317 /* Text segment: sethi %uhi(symbol), %temp1
1318 sethi %hi(symbol), %temp2
1319 or %temp1, %ulo(symbol), %temp3
1320 sllx %temp3, 32, %temp4
1321 or %temp4, %temp2, %temp5
1322 or %temp5, %lo(symbol), %reg */
1327 /* It is possible that one of the registers we got for operands[2]
1328 might coincide with that of operands[0] (which is why we made
1329 it TImode). Pick the other one to use as our scratch. */
1330 if (rtx_equal_p (temp, op0))
1332 gcc_assert (ti_temp);
1333 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1336 temp2 = temp; /* op0 is _not_ allowed, see above. */
1343 temp1 = gen_reg_rtx (DImode);
1344 temp2 = gen_reg_rtx (DImode);
1345 temp3 = gen_reg_rtx (DImode);
1346 temp4 = gen_reg_rtx (DImode);
1347 temp5 = gen_reg_rtx (DImode);
1350 emit_insn (gen_embmedany_textuhi (temp1, op1));
1351 emit_insn (gen_embmedany_texthi (temp2, op1));
1352 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1353 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1354 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1355 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1356 gen_rtx_PLUS (DImode, temp4, temp2)));
1357 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1366 #if HOST_BITS_PER_WIDE_INT == 32
1368 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1373 /* These avoid problems when cross compiling. If we do not
1374 go through all this hair then the optimizer will see
1375 invalid REG_EQUAL notes or in some cases none at all. */
1376 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1377 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1378 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1379 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1381 /* The optimizer is not to assume anything about exactly
1382 which bits are set for a HIGH, they are unspecified.
1383 Unfortunately this leads to many missed optimizations
1384 during CSE. We mask out the non-HIGH bits, and matches
1385 a plain movdi, to alleviate this problem. */
1387 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1389 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1393 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1395 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1399 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1401 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1405 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1407 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1410 /* Worker routines for 64-bit constant formation on arch64.
1411 One of the key things to be doing in these emissions is
1412 to create as many temp REGs as possible. This makes it
1413 possible for half-built constants to be used later when
1414 such values are similar to something required later on.
1415 Without doing this, the optimizer cannot see such
1418 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1419 unsigned HOST_WIDE_INT, int);
1422 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1423 unsigned HOST_WIDE_INT low_bits, int is_neg)
1425 unsigned HOST_WIDE_INT high_bits;
1428 high_bits = (~low_bits) & 0xffffffff;
1430 high_bits = low_bits;
1432 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1435 emit_insn (gen_rtx_SET (VOIDmode, op0,
1436 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1440 /* If we are XOR'ing with -1, then we should emit a one's complement
1441 instead. This way the combiner will notice logical operations
1442 such as ANDN later on and substitute. */
1443 if ((low_bits & 0x3ff) == 0x3ff)
1445 emit_insn (gen_rtx_SET (VOIDmode, op0,
1446 gen_rtx_NOT (DImode, temp)));
1450 emit_insn (gen_rtx_SET (VOIDmode, op0,
1451 gen_safe_XOR64 (temp,
1452 (-(HOST_WIDE_INT)0x400
1453 | (low_bits & 0x3ff)))));
1458 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1459 unsigned HOST_WIDE_INT, int);
1462 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1463 unsigned HOST_WIDE_INT high_bits,
1464 unsigned HOST_WIDE_INT low_immediate,
1469 if ((high_bits & 0xfffffc00) != 0)
1471 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1472 if ((high_bits & ~0xfffffc00) != 0)
1473 emit_insn (gen_rtx_SET (VOIDmode, op0,
1474 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1480 emit_insn (gen_safe_SET64 (temp, high_bits));
1484 /* Now shift it up into place. */
1485 emit_insn (gen_rtx_SET (VOIDmode, op0,
1486 gen_rtx_ASHIFT (DImode, temp2,
1487 GEN_INT (shift_count))));
1489 /* If there is a low immediate part piece, finish up by
1490 putting that in as well. */
1491 if (low_immediate != 0)
1492 emit_insn (gen_rtx_SET (VOIDmode, op0,
1493 gen_safe_OR64 (op0, low_immediate)));
1496 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1497 unsigned HOST_WIDE_INT);
1499 /* Full 64-bit constant decomposition. Even though this is the
1500 'worst' case, we still optimize a few things away. */
1502 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1503 unsigned HOST_WIDE_INT high_bits,
1504 unsigned HOST_WIDE_INT low_bits)
1508 if (reload_in_progress || reload_completed)
1511 sub_temp = gen_reg_rtx (DImode);
1513 if ((high_bits & 0xfffffc00) != 0)
1515 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1516 if ((high_bits & ~0xfffffc00) != 0)
1517 emit_insn (gen_rtx_SET (VOIDmode,
1519 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1525 emit_insn (gen_safe_SET64 (temp, high_bits));
1529 if (!reload_in_progress && !reload_completed)
1531 rtx temp2 = gen_reg_rtx (DImode);
1532 rtx temp3 = gen_reg_rtx (DImode);
1533 rtx temp4 = gen_reg_rtx (DImode);
1535 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1536 gen_rtx_ASHIFT (DImode, sub_temp,
1539 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1540 if ((low_bits & ~0xfffffc00) != 0)
1542 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1543 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1544 emit_insn (gen_rtx_SET (VOIDmode, op0,
1545 gen_rtx_PLUS (DImode, temp4, temp3)));
1549 emit_insn (gen_rtx_SET (VOIDmode, op0,
1550 gen_rtx_PLUS (DImode, temp4, temp2)));
1555 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1556 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1557 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1560 /* We are in the middle of reload, so this is really
1561 painful. However we do still make an attempt to
1562 avoid emitting truly stupid code. */
1563 if (low1 != const0_rtx)
1565 emit_insn (gen_rtx_SET (VOIDmode, op0,
1566 gen_rtx_ASHIFT (DImode, sub_temp,
1567 GEN_INT (to_shift))));
1568 emit_insn (gen_rtx_SET (VOIDmode, op0,
1569 gen_rtx_IOR (DImode, op0, low1)));
1577 if (low2 != const0_rtx)
1579 emit_insn (gen_rtx_SET (VOIDmode, op0,
1580 gen_rtx_ASHIFT (DImode, sub_temp,
1581 GEN_INT (to_shift))));
1582 emit_insn (gen_rtx_SET (VOIDmode, op0,
1583 gen_rtx_IOR (DImode, op0, low2)));
1591 emit_insn (gen_rtx_SET (VOIDmode, op0,
1592 gen_rtx_ASHIFT (DImode, sub_temp,
1593 GEN_INT (to_shift))));
1594 if (low3 != const0_rtx)
1595 emit_insn (gen_rtx_SET (VOIDmode, op0,
1596 gen_rtx_IOR (DImode, op0, low3)));
1601 /* Analyze a 64-bit constant for certain properties. */
1602 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1603 unsigned HOST_WIDE_INT,
1604 int *, int *, int *);
1607 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1608 unsigned HOST_WIDE_INT low_bits,
1609 int *hbsp, int *lbsp, int *abbasp)
1611 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1614 lowest_bit_set = highest_bit_set = -1;
1618 if ((lowest_bit_set == -1)
1619 && ((low_bits >> i) & 1))
1621 if ((highest_bit_set == -1)
1622 && ((high_bits >> (32 - i - 1)) & 1))
1623 highest_bit_set = (64 - i - 1);
1626 && ((highest_bit_set == -1)
1627 || (lowest_bit_set == -1)));
1633 if ((lowest_bit_set == -1)
1634 && ((high_bits >> i) & 1))
1635 lowest_bit_set = i + 32;
1636 if ((highest_bit_set == -1)
1637 && ((low_bits >> (32 - i - 1)) & 1))
1638 highest_bit_set = 32 - i - 1;
1641 && ((highest_bit_set == -1)
1642 || (lowest_bit_set == -1)));
1644 /* If there are no bits set this should have gone out
1645 as one instruction! */
1646 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1647 all_bits_between_are_set = 1;
1648 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1652 if ((low_bits & (1 << i)) != 0)
1657 if ((high_bits & (1 << (i - 32))) != 0)
1660 all_bits_between_are_set = 0;
1663 *hbsp = highest_bit_set;
1664 *lbsp = lowest_bit_set;
1665 *abbasp = all_bits_between_are_set;
1668 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1671 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1672 unsigned HOST_WIDE_INT low_bits)
1674 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1677 || high_bits == 0xffffffff)
1680 analyze_64bit_constant (high_bits, low_bits,
1681 &highest_bit_set, &lowest_bit_set,
1682 &all_bits_between_are_set);
1684 if ((highest_bit_set == 63
1685 || lowest_bit_set == 0)
1686 && all_bits_between_are_set != 0)
1689 if ((highest_bit_set - lowest_bit_set) < 21)
1695 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1696 unsigned HOST_WIDE_INT,
1699 static unsigned HOST_WIDE_INT
1700 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1701 unsigned HOST_WIDE_INT low_bits,
1702 int lowest_bit_set, int shift)
1704 HOST_WIDE_INT hi, lo;
1706 if (lowest_bit_set < 32)
1708 lo = (low_bits >> lowest_bit_set) << shift;
1709 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1714 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1716 gcc_assert (! (hi & lo));
1720 /* Here we are sure to be arch64 and this is an integer constant
1721 being loaded into a register. Emit the most efficient
1722 insn sequence possible. Detection of all the 1-insn cases
1723 has been done already. */
1725 sparc_emit_set_const64 (rtx op0, rtx op1)
1727 unsigned HOST_WIDE_INT high_bits, low_bits;
1728 int lowest_bit_set, highest_bit_set;
1729 int all_bits_between_are_set;
1732 /* Sanity check that we know what we are working with. */
1733 gcc_assert (TARGET_ARCH64
1734 && (GET_CODE (op0) == SUBREG
1735 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1737 if (reload_in_progress || reload_completed)
1740 if (GET_CODE (op1) != CONST_INT)
1742 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1747 temp = gen_reg_rtx (DImode);
1749 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1750 low_bits = (INTVAL (op1) & 0xffffffff);
1752 /* low_bits bits 0 --> 31
1753 high_bits bits 32 --> 63 */
1755 analyze_64bit_constant (high_bits, low_bits,
1756 &highest_bit_set, &lowest_bit_set,
1757 &all_bits_between_are_set);
1759 /* First try for a 2-insn sequence. */
1761 /* These situations are preferred because the optimizer can
1762 * do more things with them:
1764 * sllx %reg, shift, %reg
1766 * srlx %reg, shift, %reg
1767 * 3) mov some_small_const, %reg
1768 * sllx %reg, shift, %reg
1770 if (((highest_bit_set == 63
1771 || lowest_bit_set == 0)
1772 && all_bits_between_are_set != 0)
1773 || ((highest_bit_set - lowest_bit_set) < 12))
1775 HOST_WIDE_INT the_const = -1;
1776 int shift = lowest_bit_set;
1778 if ((highest_bit_set != 63
1779 && lowest_bit_set != 0)
1780 || all_bits_between_are_set == 0)
1783 create_simple_focus_bits (high_bits, low_bits,
1786 else if (lowest_bit_set == 0)
1787 shift = -(63 - highest_bit_set);
1789 gcc_assert (SPARC_SIMM13_P (the_const));
1790 gcc_assert (shift != 0);
1792 emit_insn (gen_safe_SET64 (temp, the_const));
1794 emit_insn (gen_rtx_SET (VOIDmode,
1796 gen_rtx_ASHIFT (DImode,
1800 emit_insn (gen_rtx_SET (VOIDmode,
1802 gen_rtx_LSHIFTRT (DImode,
1804 GEN_INT (-shift))));
1808 /* Now a range of 22 or less bits set somewhere.
1809 * 1) sethi %hi(focus_bits), %reg
1810 * sllx %reg, shift, %reg
1811 * 2) sethi %hi(focus_bits), %reg
1812 * srlx %reg, shift, %reg
1814 if ((highest_bit_set - lowest_bit_set) < 21)
1816 unsigned HOST_WIDE_INT focus_bits =
1817 create_simple_focus_bits (high_bits, low_bits,
1818 lowest_bit_set, 10);
1820 gcc_assert (SPARC_SETHI_P (focus_bits));
1821 gcc_assert (lowest_bit_set != 10);
1823 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1825 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1826 if (lowest_bit_set < 10)
1827 emit_insn (gen_rtx_SET (VOIDmode,
1829 gen_rtx_LSHIFTRT (DImode, temp,
1830 GEN_INT (10 - lowest_bit_set))));
1831 else if (lowest_bit_set > 10)
1832 emit_insn (gen_rtx_SET (VOIDmode,
1834 gen_rtx_ASHIFT (DImode, temp,
1835 GEN_INT (lowest_bit_set - 10))));
1839 /* 1) sethi %hi(low_bits), %reg
1840 * or %reg, %lo(low_bits), %reg
1841 * 2) sethi %hi(~low_bits), %reg
1842 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1845 || high_bits == 0xffffffff)
1847 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1848 (high_bits == 0xffffffff));
1852 /* Now, try 3-insn sequences. */
1854 /* 1) sethi %hi(high_bits), %reg
1855 * or %reg, %lo(high_bits), %reg
1856 * sllx %reg, 32, %reg
1860 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1864 /* We may be able to do something quick
1865 when the constant is negated, so try that. */
1866 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1867 (~low_bits) & 0xfffffc00))
1869 /* NOTE: The trailing bits get XOR'd so we need the
1870 non-negated bits, not the negated ones. */
1871 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1873 if ((((~high_bits) & 0xffffffff) == 0
1874 && ((~low_bits) & 0x80000000) == 0)
1875 || (((~high_bits) & 0xffffffff) == 0xffffffff
1876 && ((~low_bits) & 0x80000000) != 0))
1878 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1880 if ((SPARC_SETHI_P (fast_int)
1881 && (~high_bits & 0xffffffff) == 0)
1882 || SPARC_SIMM13_P (fast_int))
1883 emit_insn (gen_safe_SET64 (temp, fast_int));
1885 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1890 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1891 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1892 sparc_emit_set_const64 (temp, negated_const);
1895 /* If we are XOR'ing with -1, then we should emit a one's complement
1896 instead. This way the combiner will notice logical operations
1897 such as ANDN later on and substitute. */
1898 if (trailing_bits == 0x3ff)
1900 emit_insn (gen_rtx_SET (VOIDmode, op0,
1901 gen_rtx_NOT (DImode, temp)));
1905 emit_insn (gen_rtx_SET (VOIDmode,
1907 gen_safe_XOR64 (temp,
1908 (-0x400 | trailing_bits))));
1913 /* 1) sethi %hi(xxx), %reg
1914 * or %reg, %lo(xxx), %reg
1915 * sllx %reg, yyy, %reg
1917 * ??? This is just a generalized version of the low_bits==0
1918 * thing above, FIXME...
1920 if ((highest_bit_set - lowest_bit_set) < 32)
1922 unsigned HOST_WIDE_INT focus_bits =
1923 create_simple_focus_bits (high_bits, low_bits,
1926 /* We can't get here in this state. */
1927 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1929 /* So what we know is that the set bits straddle the
1930 middle of the 64-bit word. */
1931 sparc_emit_set_const64_quick2 (op0, temp,
1937 /* 1) sethi %hi(high_bits), %reg
1938 * or %reg, %lo(high_bits), %reg
1939 * sllx %reg, 32, %reg
1940 * or %reg, low_bits, %reg
1942 if (SPARC_SIMM13_P(low_bits)
1943 && ((int)low_bits > 0))
1945 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1949 /* The easiest way when all else fails, is full decomposition. */
1951 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1952 high_bits, low_bits, ~high_bits, ~low_bits);
1954 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1956 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1958 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1959 return the mode to be used for the comparison. For floating-point,
1960 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1961 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1962 processing is needed. */
1965 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1967 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1993 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1994 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1996 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1997 return CCX_NOOVmode;
2003 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2010 /* Emit the compare insn and return the CC reg for a CODE comparison
2011 with operands X and Y. */
2014 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2016 enum machine_mode mode;
2019 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2022 mode = SELECT_CC_MODE (code, x, y);
2024 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2025 fcc regs (cse can't tell they're really call clobbered regs and will
2026 remove a duplicate comparison even if there is an intervening function
2027 call - it will then try to reload the cc reg via an int reg which is why
2028 we need the movcc patterns). It is possible to provide the movcc
2029 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2030 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2031 to tell cse that CCFPE mode registers (even pseudos) are call
2034 /* ??? This is an experiment. Rather than making changes to cse which may
2035 or may not be easy/clean, we do our own cse. This is possible because
2036 we will generate hard registers. Cse knows they're call clobbered (it
2037 doesn't know the same thing about pseudos). If we guess wrong, no big
2038 deal, but if we win, great! */
2040 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2041 #if 1 /* experiment */
2044 /* We cycle through the registers to ensure they're all exercised. */
2045 static int next_fcc_reg = 0;
2046 /* Previous x,y for each fcc reg. */
2047 static rtx prev_args[4][2];
2049 /* Scan prev_args for x,y. */
2050 for (reg = 0; reg < 4; reg++)
2051 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2056 prev_args[reg][0] = x;
2057 prev_args[reg][1] = y;
2058 next_fcc_reg = (next_fcc_reg + 1) & 3;
2060 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2063 cc_reg = gen_reg_rtx (mode);
2064 #endif /* ! experiment */
2065 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2066 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2068 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2070 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2071 will only result in an unrecognizable insn so no point in asserting. */
2072 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2078 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2081 gen_compare_reg (rtx cmp)
2083 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2086 /* This function is used for v9 only.
2087 DEST is the target of the Scc insn.
2088 CODE is the code for an Scc's comparison.
2089 X and Y are the values we compare.
2091 This function is needed to turn
2094 (gt (reg:CCX 100 %icc)
2098 (gt:DI (reg:CCX 100 %icc)
2101 IE: The instruction recognizer needs to see the mode of the comparison to
2102 find the right instruction. We could use "gt:DI" right in the
2103 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2106 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2109 && (GET_MODE (x) == DImode
2110 || GET_MODE (dest) == DImode))
2113 /* Try to use the movrCC insns. */
2115 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2117 && v9_regcmp_p (compare_code))
2122 /* Special case for op0 != 0. This can be done with one instruction if
2125 if (compare_code == NE
2126 && GET_MODE (dest) == DImode
2127 && rtx_equal_p (op0, dest))
2129 emit_insn (gen_rtx_SET (VOIDmode, dest,
2130 gen_rtx_IF_THEN_ELSE (DImode,
2131 gen_rtx_fmt_ee (compare_code, DImode,
2138 if (reg_overlap_mentioned_p (dest, op0))
2140 /* Handle the case where dest == x.
2141 We "early clobber" the result. */
2142 op0 = gen_reg_rtx (GET_MODE (x));
2143 emit_move_insn (op0, x);
2146 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2147 if (GET_MODE (op0) != DImode)
2149 temp = gen_reg_rtx (DImode);
2150 convert_move (temp, op0, 0);
2154 emit_insn (gen_rtx_SET (VOIDmode, dest,
2155 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2156 gen_rtx_fmt_ee (compare_code, DImode,
2164 x = gen_compare_reg_1 (compare_code, x, y);
2167 gcc_assert (GET_MODE (x) != CC_NOOVmode
2168 && GET_MODE (x) != CCX_NOOVmode);
2170 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2171 emit_insn (gen_rtx_SET (VOIDmode, dest,
2172 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2173 gen_rtx_fmt_ee (compare_code,
2174 GET_MODE (x), x, y),
2175 const1_rtx, dest)));
2181 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2182 without jumps using the addx/subx instructions. */
2185 emit_scc_insn (rtx operands[])
2192 /* The quad-word fp compare library routines all return nonzero to indicate
2193 true, which is different from the equivalent libgcc routines, so we must
2194 handle them specially here. */
2195 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2197 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2198 GET_CODE (operands[1]));
2199 operands[2] = XEXP (operands[1], 0);
2200 operands[3] = XEXP (operands[1], 1);
2203 code = GET_CODE (operands[1]);
2207 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2208 more applications). The exception to this is "reg != 0" which can
2209 be done in one instruction on v9 (so we do it). */
2212 if (GET_MODE (x) == SImode)
2214 rtx pat = gen_seqsi_special (operands[0], x, y);
2218 else if (GET_MODE (x) == DImode)
2220 rtx pat = gen_seqdi_special (operands[0], x, y);
2228 if (GET_MODE (x) == SImode)
2230 rtx pat = gen_snesi_special (operands[0], x, y);
2234 else if (GET_MODE (x) == DImode)
2236 rtx pat = gen_snedi_special (operands[0], x, y);
2242 /* For the rest, on v9 we can use conditional moves. */
2246 if (gen_v9_scc (operands[0], code, x, y))
2250 /* We can do LTU and GEU using the addx/subx instructions too. And
2251 for GTU/LEU, if both operands are registers swap them and fall
2252 back to the easy case. */
2253 if (code == GTU || code == LEU)
2255 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2256 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2261 code = swap_condition (code);
2265 if (code == LTU || code == GEU)
2267 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2268 gen_rtx_fmt_ee (code, SImode,
2269 gen_compare_reg_1 (code, x, y),
2274 /* Nope, do branches. */
2278 /* Emit a conditional jump insn for the v9 architecture using comparison code
2279 CODE and jump target LABEL.
2280 This function exists to take advantage of the v9 brxx insns. */
2283 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2285 emit_jump_insn (gen_rtx_SET (VOIDmode,
2287 gen_rtx_IF_THEN_ELSE (VOIDmode,
2288 gen_rtx_fmt_ee (code, GET_MODE (op0),
2290 gen_rtx_LABEL_REF (VOIDmode, label),
2295 emit_conditional_branch_insn (rtx operands[])
2297 /* The quad-word fp compare library routines all return nonzero to indicate
2298 true, which is different from the equivalent libgcc routines, so we must
2299 handle them specially here. */
2300 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2302 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2303 GET_CODE (operands[0]));
2304 operands[1] = XEXP (operands[0], 0);
2305 operands[2] = XEXP (operands[0], 1);
2308 if (TARGET_ARCH64 && operands[2] == const0_rtx
2309 && GET_CODE (operands[1]) == REG
2310 && GET_MODE (operands[1]) == DImode)
2312 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2316 operands[1] = gen_compare_reg (operands[0]);
2317 operands[2] = const0_rtx;
2318 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2319 operands[1], operands[2]);
2320 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2325 /* Generate a DFmode part of a hard TFmode register.
2326 REG is the TFmode hard register, LOW is 1 for the
2327 low 64bit of the register and 0 otherwise.
2330 gen_df_reg (rtx reg, int low)
2332 int regno = REGNO (reg);
2334 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2335 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2336 return gen_rtx_REG (DFmode, regno);
2339 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2340 Unlike normal calls, TFmode operands are passed by reference. It is
2341 assumed that no more than 3 operands are required. */
2344 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2346 rtx ret_slot = NULL, arg[3], func_sym;
2349 /* We only expect to be called for conversions, unary, and binary ops. */
2350 gcc_assert (nargs == 2 || nargs == 3);
2352 for (i = 0; i < nargs; ++i)
2354 rtx this_arg = operands[i];
2357 /* TFmode arguments and return values are passed by reference. */
2358 if (GET_MODE (this_arg) == TFmode)
2360 int force_stack_temp;
2362 force_stack_temp = 0;
2363 if (TARGET_BUGGY_QP_LIB && i == 0)
2364 force_stack_temp = 1;
2366 if (GET_CODE (this_arg) == MEM
2367 && ! force_stack_temp)
2368 this_arg = XEXP (this_arg, 0);
2369 else if (CONSTANT_P (this_arg)
2370 && ! force_stack_temp)
2372 this_slot = force_const_mem (TFmode, this_arg);
2373 this_arg = XEXP (this_slot, 0);
2377 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2379 /* Operand 0 is the return value. We'll copy it out later. */
2381 emit_move_insn (this_slot, this_arg);
2383 ret_slot = this_slot;
2385 this_arg = XEXP (this_slot, 0);
2392 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2394 if (GET_MODE (operands[0]) == TFmode)
2397 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2398 arg[0], GET_MODE (arg[0]),
2399 arg[1], GET_MODE (arg[1]));
2401 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2402 arg[0], GET_MODE (arg[0]),
2403 arg[1], GET_MODE (arg[1]),
2404 arg[2], GET_MODE (arg[2]));
2407 emit_move_insn (operands[0], ret_slot);
2413 gcc_assert (nargs == 2);
2415 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2416 GET_MODE (operands[0]), 1,
2417 arg[1], GET_MODE (arg[1]));
2419 if (ret != operands[0])
2420 emit_move_insn (operands[0], ret);
2424 /* Expand soft-float TFmode calls to sparc abi routines. */
2427 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2449 emit_soft_tfmode_libcall (func, 3, operands);
2453 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2457 gcc_assert (code == SQRT);
2460 emit_soft_tfmode_libcall (func, 2, operands);
2464 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2471 switch (GET_MODE (operands[1]))
2484 case FLOAT_TRUNCATE:
2485 switch (GET_MODE (operands[0]))
2499 switch (GET_MODE (operands[1]))
2504 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2514 case UNSIGNED_FLOAT:
2515 switch (GET_MODE (operands[1]))
2520 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2531 switch (GET_MODE (operands[0]))
2545 switch (GET_MODE (operands[0]))
2562 emit_soft_tfmode_libcall (func, 2, operands);
2565 /* Expand a hard-float tfmode operation. All arguments must be in
2569 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2573 if (GET_RTX_CLASS (code) == RTX_UNARY)
2575 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2576 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2580 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2581 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2582 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2583 operands[1], operands[2]);
2586 if (register_operand (operands[0], VOIDmode))
2589 dest = gen_reg_rtx (GET_MODE (operands[0]));
2591 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2593 if (dest != operands[0])
2594 emit_move_insn (operands[0], dest);
2598 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2600 if (TARGET_HARD_QUAD)
2601 emit_hard_tfmode_operation (code, operands);
2603 emit_soft_tfmode_binop (code, operands);
2607 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2609 if (TARGET_HARD_QUAD)
2610 emit_hard_tfmode_operation (code, operands);
2612 emit_soft_tfmode_unop (code, operands);
2616 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2618 if (TARGET_HARD_QUAD)
2619 emit_hard_tfmode_operation (code, operands);
2621 emit_soft_tfmode_cvt (code, operands);
2624 /* Return nonzero if a branch/jump/call instruction will be emitting
2625 nop into its delay slot. */
2628 empty_delay_slot (rtx insn)
2632 /* If no previous instruction (should not happen), return true. */
2633 if (PREV_INSN (insn) == NULL)
2636 seq = NEXT_INSN (PREV_INSN (insn));
2637 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2643 /* Return nonzero if TRIAL can go into the call delay slot. */
2646 tls_call_delay (rtx trial)
2651 call __tls_get_addr, %tgd_call (foo)
2652 add %l7, %o0, %o0, %tgd_add (foo)
2653 while Sun as/ld does not. */
2654 if (TARGET_GNU_TLS || !TARGET_TLS)
2657 pat = PATTERN (trial);
2659 /* We must reject tgd_add{32|64}, i.e.
2660 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2661 and tldm_add{32|64}, i.e.
2662 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2664 if (GET_CODE (pat) == SET
2665 && GET_CODE (SET_SRC (pat)) == PLUS)
2667 rtx unspec = XEXP (SET_SRC (pat), 1);
2669 if (GET_CODE (unspec) == UNSPEC
2670 && (XINT (unspec, 1) == UNSPEC_TLSGD
2671 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2678 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2679 instruction. RETURN_P is true if the v9 variant 'return' is to be
2680 considered in the test too.
2682 TRIAL must be a SET whose destination is a REG appropriate for the
2683 'restore' instruction or, if RETURN_P is true, for the 'return'
2687 eligible_for_restore_insn (rtx trial, bool return_p)
2689 rtx pat = PATTERN (trial);
2690 rtx src = SET_SRC (pat);
2692 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2693 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2694 && arith_operand (src, GET_MODE (src)))
2697 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2699 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2702 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2703 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2704 && arith_double_operand (src, GET_MODE (src)))
2705 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2707 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2708 else if (! TARGET_FPU && register_operand (src, SFmode))
2711 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2712 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2715 /* If we have the 'return' instruction, anything that does not use
2716 local or output registers and can go into a delay slot wins. */
2717 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2718 && (get_attr_in_uncond_branch_delay (trial)
2719 == IN_UNCOND_BRANCH_DELAY_TRUE))
2722 /* The 'restore src1,src2,dest' pattern for SImode. */
2723 else if (GET_CODE (src) == PLUS
2724 && register_operand (XEXP (src, 0), SImode)
2725 && arith_operand (XEXP (src, 1), SImode))
2728 /* The 'restore src1,src2,dest' pattern for DImode. */
2729 else if (GET_CODE (src) == PLUS
2730 && register_operand (XEXP (src, 0), DImode)
2731 && arith_double_operand (XEXP (src, 1), DImode))
2734 /* The 'restore src1,%lo(src2),dest' pattern. */
2735 else if (GET_CODE (src) == LO_SUM
2736 && ! TARGET_CM_MEDMID
2737 && ((register_operand (XEXP (src, 0), SImode)
2738 && immediate_operand (XEXP (src, 1), SImode))
2740 && register_operand (XEXP (src, 0), DImode)
2741 && immediate_operand (XEXP (src, 1), DImode))))
2744 /* The 'restore src,src,dest' pattern. */
2745 else if (GET_CODE (src) == ASHIFT
2746 && (register_operand (XEXP (src, 0), SImode)
2747 || register_operand (XEXP (src, 0), DImode))
2748 && XEXP (src, 1) == const1_rtx)
2754 /* Return nonzero if TRIAL can go into the function return's
2758 eligible_for_return_delay (rtx trial)
2762 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2765 if (get_attr_length (trial) != 1)
2768 /* If there are any call-saved registers, we should scan TRIAL if it
2769 does not reference them. For now just make it easy. */
2773 /* If the function uses __builtin_eh_return, the eh_return machinery
2774 occupies the delay slot. */
2775 if (crtl->calls_eh_return)
2778 /* In the case of a true leaf function, anything can go into the slot. */
2779 if (sparc_leaf_function_p)
2780 return get_attr_in_uncond_branch_delay (trial)
2781 == IN_UNCOND_BRANCH_DELAY_TRUE;
2783 pat = PATTERN (trial);
2785 /* Otherwise, only operations which can be done in tandem with
2786 a `restore' or `return' insn can go into the delay slot. */
2787 if (GET_CODE (SET_DEST (pat)) != REG
2788 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2791 /* If this instruction sets up floating point register and we have a return
2792 instruction, it can probably go in. But restore will not work
2794 if (REGNO (SET_DEST (pat)) >= 32)
2796 && ! epilogue_renumber (&pat, 1)
2797 && (get_attr_in_uncond_branch_delay (trial)
2798 == IN_UNCOND_BRANCH_DELAY_TRUE));
2800 return eligible_for_restore_insn (trial, true);
2803 /* Return nonzero if TRIAL can go into the sibling call's
2807 eligible_for_sibcall_delay (rtx trial)
2811 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2814 if (get_attr_length (trial) != 1)
2817 pat = PATTERN (trial);
2819 if (sparc_leaf_function_p)
2821 /* If the tail call is done using the call instruction,
2822 we have to restore %o7 in the delay slot. */
2823 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2826 /* %g1 is used to build the function address */
2827 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2833 /* Otherwise, only operations which can be done in tandem with
2834 a `restore' insn can go into the delay slot. */
2835 if (GET_CODE (SET_DEST (pat)) != REG
2836 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2837 || REGNO (SET_DEST (pat)) >= 32)
2840 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2842 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2845 return eligible_for_restore_insn (trial, false);
2849 short_branch (int uid1, int uid2)
2851 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2853 /* Leave a few words of "slop". */
2854 if (delta >= -1023 && delta <= 1022)
2860 /* Return nonzero if REG is not used after INSN.
2861 We assume REG is a reload reg, and therefore does
2862 not live past labels or calls or jumps. */
2864 reg_unused_after (rtx reg, rtx insn)
2866 enum rtx_code code, prev_code = UNKNOWN;
2868 while ((insn = NEXT_INSN (insn)))
2870 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2873 code = GET_CODE (insn);
2874 if (GET_CODE (insn) == CODE_LABEL)
2879 rtx set = single_set (insn);
2880 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2883 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2885 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2893 /* Determine if it's legal to put X into the constant pool. This
2894 is not possible if X contains the address of a symbol that is
2895 not constant (TLS) or not known at final link time (PIC). */
2898 sparc_cannot_force_const_mem (rtx x)
2900 switch (GET_CODE (x))
2905 /* Accept all non-symbolic constants. */
2909 /* Labels are OK iff we are non-PIC. */
2910 return flag_pic != 0;
2913 /* 'Naked' TLS symbol references are never OK,
2914 non-TLS symbols are OK iff we are non-PIC. */
2915 if (SYMBOL_REF_TLS_MODEL (x))
2918 return flag_pic != 0;
2921 return sparc_cannot_force_const_mem (XEXP (x, 0));
2924 return sparc_cannot_force_const_mem (XEXP (x, 0))
2925 || sparc_cannot_force_const_mem (XEXP (x, 1));
2934 static GTY(()) char pic_helper_symbol_name[256];
2935 static GTY(()) rtx pic_helper_symbol;
2936 static GTY(()) bool pic_helper_emitted_p = false;
2937 static GTY(()) rtx global_offset_table;
2939 /* Ensure that we are not using patterns that are not OK with PIC. */
2947 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2948 && (GET_CODE (recog_data.operand[i]) != CONST
2949 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2950 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2951 == global_offset_table)
2952 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2960 /* Return true if X is an address which needs a temporary register when
2961 reloaded while generating PIC code. */
2964 pic_address_needs_scratch (rtx x)
2966 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2967 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2968 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2969 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2970 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2976 /* Determine if a given RTX is a valid constant. We already know this
2977 satisfies CONSTANT_P. */
2980 legitimate_constant_p (rtx x)
2984 switch (GET_CODE (x))
2987 /* TLS symbols are not constant. */
2988 if (SYMBOL_REF_TLS_MODEL (x))
2993 inner = XEXP (x, 0);
2995 /* Offsets of TLS symbols are never valid.
2996 Discourage CSE from creating them. */
2997 if (GET_CODE (inner) == PLUS
2998 && SPARC_SYMBOL_REF_TLS_P (XEXP (inner, 0)))
3003 if (GET_MODE (x) == VOIDmode)
3006 /* Floating point constants are generally not ok.
3007 The only exception is 0.0 in VIS. */
3009 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
3010 && const_zero_operand (x, GET_MODE (x)))
3016 /* Vector constants are generally not ok.
3017 The only exception is 0 in VIS. */
3019 && const_zero_operand (x, GET_MODE (x)))
3031 /* Determine if a given RTX is a valid constant address. */
3034 constant_address_p (rtx x)
3036 switch (GET_CODE (x))
3044 if (flag_pic && pic_address_needs_scratch (x))
3046 return legitimate_constant_p (x);
3049 return !flag_pic && legitimate_constant_p (x);
3056 /* Nonzero if the constant value X is a legitimate general operand
3057 when generating PIC code. It is given that flag_pic is on and
3058 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3061 legitimate_pic_operand_p (rtx x)
3063 if (pic_address_needs_scratch (x))
3065 if (SPARC_SYMBOL_REF_TLS_P (x)
3066 || (GET_CODE (x) == CONST
3067 && GET_CODE (XEXP (x, 0)) == PLUS
3068 && SPARC_SYMBOL_REF_TLS_P (XEXP (XEXP (x, 0), 0))))
3073 /* Return nonzero if ADDR is a valid memory address.
3074 STRICT specifies whether strict register checking applies. */
3077 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3079 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3081 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3083 else if (GET_CODE (addr) == PLUS)
3085 rs1 = XEXP (addr, 0);
3086 rs2 = XEXP (addr, 1);
3088 /* Canonicalize. REG comes first, if there are no regs,
3089 LO_SUM comes first. */
3091 && GET_CODE (rs1) != SUBREG
3093 || GET_CODE (rs2) == SUBREG
3094 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3096 rs1 = XEXP (addr, 1);
3097 rs2 = XEXP (addr, 0);
3101 && rs1 == pic_offset_table_rtx
3103 && GET_CODE (rs2) != SUBREG
3104 && GET_CODE (rs2) != LO_SUM
3105 && GET_CODE (rs2) != MEM
3106 && ! SPARC_SYMBOL_REF_TLS_P (rs2)
3107 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3108 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3110 || GET_CODE (rs1) == SUBREG)
3111 && RTX_OK_FOR_OFFSET_P (rs2)))
3116 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3117 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3119 /* We prohibit REG + REG for TFmode when there are no quad move insns
3120 and we consequently need to split. We do this because REG+REG
3121 is not an offsettable address. If we get the situation in reload
3122 where source and destination of a movtf pattern are both MEMs with
3123 REG+REG address, then only one of them gets converted to an
3124 offsettable address. */
3126 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3129 /* We prohibit REG + REG on ARCH32 if not optimizing for
3130 DFmode/DImode because then mem_min_alignment is likely to be zero
3131 after reload and the forced split would lack a matching splitter
3133 if (TARGET_ARCH32 && !optimize
3134 && (mode == DFmode || mode == DImode))
3137 else if (USE_AS_OFFSETABLE_LO10
3138 && GET_CODE (rs1) == LO_SUM
3140 && ! TARGET_CM_MEDMID
3141 && RTX_OK_FOR_OLO10_P (rs2))
3144 imm1 = XEXP (rs1, 1);
3145 rs1 = XEXP (rs1, 0);
3146 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3150 else if (GET_CODE (addr) == LO_SUM)
3152 rs1 = XEXP (addr, 0);
3153 imm1 = XEXP (addr, 1);
3155 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3158 /* We can't allow TFmode in 32-bit mode, because an offset greater
3159 than the alignment (8) may cause the LO_SUM to overflow. */
3160 if (mode == TFmode && TARGET_ARCH32)
3163 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3168 if (GET_CODE (rs1) == SUBREG)
3169 rs1 = SUBREG_REG (rs1);
3175 if (GET_CODE (rs2) == SUBREG)
3176 rs2 = SUBREG_REG (rs2);
3183 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3184 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3189 if ((REGNO (rs1) >= 32
3190 && REGNO (rs1) != FRAME_POINTER_REGNUM
3191 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3193 && (REGNO (rs2) >= 32
3194 && REGNO (rs2) != FRAME_POINTER_REGNUM
3195 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3201 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3203 static GTY(()) rtx sparc_tls_symbol;
3206 sparc_tls_get_addr (void)
3208 if (!sparc_tls_symbol)
3209 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3211 return sparc_tls_symbol;
3215 sparc_tls_got (void)
3220 crtl->uses_pic_offset_table = 1;
3221 return pic_offset_table_rtx;
3224 if (!global_offset_table)
3225 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3226 temp = gen_reg_rtx (Pmode);
3227 emit_move_insn (temp, global_offset_table);
3231 /* Return 1 if *X is a thread-local symbol. */
3234 sparc_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
3236 return SPARC_SYMBOL_REF_TLS_P (*x);
3239 /* Return 1 if X contains a thread-local symbol. */
3242 sparc_tls_referenced_p (rtx x)
3244 if (!TARGET_HAVE_TLS)
3247 return for_each_rtx (&x, &sparc_tls_symbol_ref_1, 0);
3250 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3251 this (thread-local) address. */
3254 legitimize_tls_address (rtx addr)
3256 rtx temp1, temp2, temp3, ret, o0, got, insn;
3258 gcc_assert (can_create_pseudo_p ());
3260 if (GET_CODE (addr) == SYMBOL_REF)
3261 switch (SYMBOL_REF_TLS_MODEL (addr))
3263 case TLS_MODEL_GLOBAL_DYNAMIC:
3265 temp1 = gen_reg_rtx (SImode);
3266 temp2 = gen_reg_rtx (SImode);
3267 ret = gen_reg_rtx (Pmode);
3268 o0 = gen_rtx_REG (Pmode, 8);
3269 got = sparc_tls_got ();
3270 emit_insn (gen_tgd_hi22 (temp1, addr));
3271 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3274 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3275 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3280 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3281 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3284 CALL_INSN_FUNCTION_USAGE (insn)
3285 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3286 CALL_INSN_FUNCTION_USAGE (insn));
3287 insn = get_insns ();
3289 emit_libcall_block (insn, ret, o0, addr);
3292 case TLS_MODEL_LOCAL_DYNAMIC:
3294 temp1 = gen_reg_rtx (SImode);
3295 temp2 = gen_reg_rtx (SImode);
3296 temp3 = gen_reg_rtx (Pmode);
3297 ret = gen_reg_rtx (Pmode);
3298 o0 = gen_rtx_REG (Pmode, 8);
3299 got = sparc_tls_got ();
3300 emit_insn (gen_tldm_hi22 (temp1));
3301 emit_insn (gen_tldm_lo10 (temp2, temp1));
3304 emit_insn (gen_tldm_add32 (o0, got, temp2));
3305 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3310 emit_insn (gen_tldm_add64 (o0, got, temp2));
3311 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3314 CALL_INSN_FUNCTION_USAGE (insn)
3315 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3316 CALL_INSN_FUNCTION_USAGE (insn));
3317 insn = get_insns ();
3319 emit_libcall_block (insn, temp3, o0,
3320 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3321 UNSPEC_TLSLD_BASE));
3322 temp1 = gen_reg_rtx (SImode);
3323 temp2 = gen_reg_rtx (SImode);
3324 emit_insn (gen_tldo_hix22 (temp1, addr));
3325 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3327 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3329 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3332 case TLS_MODEL_INITIAL_EXEC:
3333 temp1 = gen_reg_rtx (SImode);
3334 temp2 = gen_reg_rtx (SImode);
3335 temp3 = gen_reg_rtx (Pmode);
3336 got = sparc_tls_got ();
3337 emit_insn (gen_tie_hi22 (temp1, addr));
3338 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3340 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3342 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3345 ret = gen_reg_rtx (Pmode);
3347 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3350 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3354 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3357 case TLS_MODEL_LOCAL_EXEC:
3358 temp1 = gen_reg_rtx (Pmode);
3359 temp2 = gen_reg_rtx (Pmode);
3362 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3363 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3367 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3368 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3370 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3378 gcc_unreachable (); /* for now ... */
3384 /* Legitimize PIC addresses. If the address is already position-independent,
3385 we return ORIG. Newly generated position-independent addresses go into a
3386 reg. This is REG if nonzero, otherwise we allocate register(s) as
3390 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3393 if (GET_CODE (orig) == SYMBOL_REF
3394 /* See the comment in sparc_expand_move. */
3395 || (TARGET_VXWORKS_RTP && GET_CODE (orig) == LABEL_REF))
3397 rtx pic_ref, address;
3402 gcc_assert (! reload_in_progress && ! reload_completed);
3403 reg = gen_reg_rtx (Pmode);
3408 /* If not during reload, allocate another temp reg here for loading
3409 in the address, so that these instructions can be optimized
3411 rtx temp_reg = ((reload_in_progress || reload_completed)
3412 ? reg : gen_reg_rtx (Pmode));
3414 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3415 won't get confused into thinking that these two instructions
3416 are loading in the true address of the symbol. If in the
3417 future a PIC rtx exists, that should be used instead. */
3420 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3421 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3425 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3426 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3433 pic_ref = gen_const_mem (Pmode,
3434 gen_rtx_PLUS (Pmode,
3435 pic_offset_table_rtx, address));
3436 crtl->uses_pic_offset_table = 1;
3437 insn = emit_move_insn (reg, pic_ref);
3438 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3440 set_unique_reg_note (insn, REG_EQUAL, orig);
3443 else if (GET_CODE (orig) == CONST)
3447 if (GET_CODE (XEXP (orig, 0)) == PLUS
3448 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3453 gcc_assert (! reload_in_progress && ! reload_completed);
3454 reg = gen_reg_rtx (Pmode);
3457 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3458 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3459 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3460 base == reg ? 0 : reg);
3462 if (GET_CODE (offset) == CONST_INT)
3464 if (SMALL_INT (offset))
3465 return plus_constant (base, INTVAL (offset));
3466 else if (! reload_in_progress && ! reload_completed)
3467 offset = force_reg (Pmode, offset);
3469 /* If we reach here, then something is seriously wrong. */
3472 return gen_rtx_PLUS (Pmode, base, offset);
3474 else if (GET_CODE (orig) == LABEL_REF)
3475 /* ??? Why do we do this? */
3476 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3477 the register is live instead, in case it is eliminated. */
3478 crtl->uses_pic_offset_table = 1;
3483 /* Try machine-dependent ways of modifying an illegitimate address X
3484 to be legitimate. If we find one, return the new, valid address.
3486 OLDX is the address as it was before break_out_memory_refs was called.
3487 In some cases it is useful to look at this to decide what needs to be done.
3489 MODE is the mode of the operand pointed to by X.
3491 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3494 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3495 enum machine_mode mode)
3499 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3500 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3501 force_operand (XEXP (x, 0), NULL_RTX));
3502 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3503 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3504 force_operand (XEXP (x, 1), NULL_RTX));
3505 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3506 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3508 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3509 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3510 force_operand (XEXP (x, 1), NULL_RTX));
3512 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3515 if (SPARC_SYMBOL_REF_TLS_P (x))
3516 x = legitimize_tls_address (x);
3518 x = legitimize_pic_address (x, mode, 0);
3519 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3520 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3521 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3522 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3523 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3524 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3525 else if (GET_CODE (x) == SYMBOL_REF
3526 || GET_CODE (x) == CONST
3527 || GET_CODE (x) == LABEL_REF)
3528 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3532 /* Emit the special PIC helper function. */
3535 emit_pic_helper (void)
3537 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3540 switch_to_section (text_section);
3542 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3544 ASM_OUTPUT_ALIGN (asm_out_file, align);
3545 ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3546 if (flag_delayed_branch)
3547 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3548 pic_name, pic_name);
3550 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3551 pic_name, pic_name);
3553 pic_helper_emitted_p = true;
3556 /* Emit code to load the PIC register. */
3559 load_pic_register (bool delay_pic_helper)
3561 int orig_flag_pic = flag_pic;
3563 if (TARGET_VXWORKS_RTP)
3565 emit_insn (gen_vxworks_load_got ());
3566 emit_use (pic_offset_table_rtx);
3570 /* If we haven't initialized the special PIC symbols, do so now. */
3571 if (!pic_helper_symbol_name[0])
3573 ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3574 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3575 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3578 /* If we haven't emitted the special PIC helper function, do so now unless
3579 we are requested to delay it. */
3580 if (!delay_pic_helper && !pic_helper_emitted_p)
3585 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3586 pic_helper_symbol));
3588 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3589 pic_helper_symbol));
3590 flag_pic = orig_flag_pic;
3592 /* Need to emit this whether or not we obey regdecls,
3593 since setjmp/longjmp can cause life info to screw up.
3594 ??? In the case where we don't obey regdecls, this is not sufficient
3595 since we may not fall out the bottom. */
3596 emit_use (pic_offset_table_rtx);
3599 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3600 address of the call target. */
3603 sparc_emit_call_insn (rtx pat, rtx addr)
3607 insn = emit_call_insn (pat);
3609 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3610 if (TARGET_VXWORKS_RTP
3612 && GET_CODE (addr) == SYMBOL_REF
3613 && (SYMBOL_REF_DECL (addr)
3614 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3615 : !SYMBOL_REF_LOCAL_P (addr)))
3617 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3618 crtl->uses_pic_offset_table = 1;
3622 /* Return 1 if RTX is a MEM which is known to be aligned to at
3623 least a DESIRED byte boundary. */
3626 mem_min_alignment (rtx mem, int desired)
3628 rtx addr, base, offset;
3630 /* If it's not a MEM we can't accept it. */
3631 if (GET_CODE (mem) != MEM)
3635 if (!TARGET_UNALIGNED_DOUBLES
3636 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3639 /* ??? The rest of the function predates MEM_ALIGN so
3640 there is probably a bit of redundancy. */
3641 addr = XEXP (mem, 0);
3642 base = offset = NULL_RTX;
3643 if (GET_CODE (addr) == PLUS)
3645 if (GET_CODE (XEXP (addr, 0)) == REG)
3647 base = XEXP (addr, 0);
3649 /* What we are saying here is that if the base
3650 REG is aligned properly, the compiler will make
3651 sure any REG based index upon it will be so
3653 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3654 offset = XEXP (addr, 1);
3656 offset = const0_rtx;
3659 else if (GET_CODE (addr) == REG)
3662 offset = const0_rtx;
3665 if (base != NULL_RTX)
3667 int regno = REGNO (base);
3669 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3671 /* Check if the compiler has recorded some information
3672 about the alignment of the base REG. If reload has
3673 completed, we already matched with proper alignments.
3674 If not running global_alloc, reload might give us
3675 unaligned pointer to local stack though. */
3677 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3678 || (optimize && reload_completed))
3679 && (INTVAL (offset) & (desired - 1)) == 0)
3684 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3688 else if (! TARGET_UNALIGNED_DOUBLES
3689 || CONSTANT_P (addr)
3690 || GET_CODE (addr) == LO_SUM)
3692 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3693 is true, in which case we can only assume that an access is aligned if
3694 it is to a constant address, or the address involves a LO_SUM. */
3698 /* An obviously unaligned address. */
3703 /* Vectors to keep interesting information about registers where it can easily
3704 be got. We used to use the actual mode value as the bit number, but there
3705 are more than 32 modes now. Instead we use two tables: one indexed by
3706 hard register number, and one indexed by mode. */
3708 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3709 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3710 mapped into one sparc_mode_class mode. */
3712 enum sparc_mode_class {
3713 S_MODE, D_MODE, T_MODE, O_MODE,
3714 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3718 /* Modes for single-word and smaller quantities. */
3719 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3721 /* Modes for double-word and smaller quantities. */
3722 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3724 /* Modes for quad-word and smaller quantities. */
3725 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3727 /* Modes for 8-word and smaller quantities. */
3728 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3730 /* Modes for single-float quantities. We must allow any single word or
3731 smaller quantity. This is because the fix/float conversion instructions
3732 take integer inputs/outputs from the float registers. */
3733 #define SF_MODES (S_MODES)
3735 /* Modes for double-float and smaller quantities. */
3736 #define DF_MODES (D_MODES)
3738 /* Modes for quad-float and smaller quantities. */
3739 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
3741 /* Modes for quad-float pairs and smaller quantities. */
3742 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
3744 /* Modes for double-float only quantities. */
3745 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3747 /* Modes for quad-float and double-float only quantities. */
3748 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
3750 /* Modes for quad-float pairs and double-float only quantities. */
3751 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
3753 /* Modes for condition codes. */
3754 #define CC_MODES (1 << (int) CC_MODE)
3755 #define CCFP_MODES (1 << (int) CCFP_MODE)
3757 /* Value is 1 if register/mode pair is acceptable on sparc.
3758 The funny mixture of D and T modes is because integer operations
3759 do not specially operate on tetra quantities, so non-quad-aligned
3760 registers can hold quadword quantities (except %o4 and %i4 because
3761 they cross fixed registers). */
3763 /* This points to either the 32 bit or the 64 bit version. */
3764 const int *hard_regno_mode_classes;
3766 static const int hard_32bit_mode_classes[] = {
3767 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3768 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3769 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3770 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3772 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3773 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3774 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3775 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3777 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3778 and none can hold SFmode/SImode values. */
3779 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3780 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3781 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3782 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3785 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3791 static const int hard_64bit_mode_classes[] = {
3792 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3793 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3794 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3795 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3797 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3798 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3799 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3800 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3802 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3803 and none can hold SFmode/SImode values. */
3804 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3805 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3806 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3807 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3810 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3816 int sparc_mode_class [NUM_MACHINE_MODES];
3818 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3821 sparc_init_modes (void)
3825 for (i = 0; i < NUM_MACHINE_MODES; i++)
3827 switch (GET_MODE_CLASS (i))
3830 case MODE_PARTIAL_INT:
3831 case MODE_COMPLEX_INT:
3832 if (GET_MODE_SIZE (i) <= 4)
3833 sparc_mode_class[i] = 1 << (int) S_MODE;
3834 else if (GET_MODE_SIZE (i) == 8)
3835 sparc_mode_class[i] = 1 << (int) D_MODE;
3836 else if (GET_MODE_SIZE (i) == 16)
3837 sparc_mode_class[i] = 1 << (int) T_MODE;
3838 else if (GET_MODE_SIZE (i) == 32)
3839 sparc_mode_class[i] = 1 << (int) O_MODE;
3841 sparc_mode_class[i] = 0;
3843 case MODE_VECTOR_INT:
3844 if (GET_MODE_SIZE (i) <= 4)
3845 sparc_mode_class[i] = 1 << (int)SF_MODE;
3846 else if (GET_MODE_SIZE (i) == 8)
3847 sparc_mode_class[i] = 1 << (int)DF_MODE;
3850 case MODE_COMPLEX_FLOAT:
3851 if (GET_MODE_SIZE (i) <= 4)
3852 sparc_mode_class[i] = 1 << (int) SF_MODE;
3853 else if (GET_MODE_SIZE (i) == 8)
3854 sparc_mode_class[i] = 1 << (int) DF_MODE;
3855 else if (GET_MODE_SIZE (i) == 16)
3856 sparc_mode_class[i] = 1 << (int) TF_MODE;
3857 else if (GET_MODE_SIZE (i) == 32)
3858 sparc_mode_class[i] = 1 << (int) OF_MODE;
3860 sparc_mode_class[i] = 0;
3863 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3864 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3866 sparc_mode_class[i] = 1 << (int) CC_MODE;
3869 sparc_mode_class[i] = 0;
3875 hard_regno_mode_classes = hard_64bit_mode_classes;
3877 hard_regno_mode_classes = hard_32bit_mode_classes;
3879 /* Initialize the array used by REGNO_REG_CLASS. */
3880 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3882 if (i < 16 && TARGET_V8PLUS)
3883 sparc_regno_reg_class[i] = I64_REGS;
3884 else if (i < 32 || i == FRAME_POINTER_REGNUM)
3885 sparc_regno_reg_class[i] = GENERAL_REGS;
3887 sparc_regno_reg_class[i] = FP_REGS;
3889 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3891 sparc_regno_reg_class[i] = FPCC_REGS;
3893 sparc_regno_reg_class[i] = NO_REGS;
3897 /* Compute the frame size required by the function. This function is called
3898 during the reload pass and also by sparc_expand_prologue. */
3901 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3903 int outgoing_args_size = (crtl->outgoing_args_size
3904 + REG_PARM_STACK_SPACE (current_function_decl));
3905 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
3910 for (i = 0; i < 8; i++)
3911 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3916 for (i = 0; i < 8; i += 2)
3917 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3918 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3922 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3923 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3924 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3927 /* Set up values for use in prologue and epilogue. */
3928 num_gfregs = n_regs;
3933 && crtl->outgoing_args_size == 0)
3934 actual_fsize = apparent_fsize = 0;
3937 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
3938 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3939 apparent_fsize += n_regs * 4;
3940 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3943 /* Make sure nothing can clobber our register windows.
3944 If a SAVE must be done, or there is a stack-local variable,
3945 the register window area must be allocated. */
3946 if (! leaf_function_p || size > 0)
3947 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3949 return SPARC_STACK_ALIGN (actual_fsize);
3952 /* Output any necessary .register pseudo-ops. */
3955 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3957 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3963 /* Check if %g[2367] were used without
3964 .register being printed for them already. */
3965 for (i = 2; i < 8; i++)
3967 if (df_regs_ever_live_p (i)
3968 && ! sparc_hard_reg_printed [i])
3970 sparc_hard_reg_printed [i] = 1;
3971 /* %g7 is used as TLS base register, use #ignore
3972 for it instead of #scratch. */
3973 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
3974 i == 7 ? "ignore" : "scratch");
3981 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3982 as needed. LOW should be double-word aligned for 32-bit registers.
3983 Return the new OFFSET. */
3986 #define SORR_RESTORE 1
3989 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3994 if (TARGET_ARCH64 && high <= 32)
3996 for (i = low; i < high; i++)
3998 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4000 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
4001 set_mem_alias_set (mem, sparc_sr_alias_set);
4002 if (action == SORR_SAVE)
4004 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4005 RTX_FRAME_RELATED_P (insn) = 1;
4007 else /* action == SORR_RESTORE */
4008 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4015 for (i = low; i < high; i += 2)
4017 bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i];
4018 bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1];
4019 enum machine_mode mode;
4024 mode = i < 32 ? DImode : DFmode;
4029 mode = i < 32 ? SImode : SFmode;
4034 mode = i < 32 ? SImode : SFmode;
4041 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
4042 set_mem_alias_set (mem, sparc_sr_alias_set);
4043 if (action == SORR_SAVE)
4045 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4046 RTX_FRAME_RELATED_P (insn) = 1;
4048 else /* action == SORR_RESTORE */
4049 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4051 /* Always preserve double-word alignment. */
4052 offset = (offset + 7) & -8;
4059 /* Emit code to save call-saved registers. */
4062 emit_save_or_restore_regs (int action)
4064 HOST_WIDE_INT offset;
4067 offset = frame_base_offset - apparent_fsize;
4069 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
4071 /* ??? This might be optimized a little as %g1 might already have a
4072 value close enough that a single add insn will do. */
4073 /* ??? Although, all of this is probably only a temporary fix
4074 because if %g1 can hold a function result, then
4075 sparc_expand_epilogue will lose (the result will be
4077 base = gen_rtx_REG (Pmode, 1);
4078 emit_move_insn (base, GEN_INT (offset));
4079 emit_insn (gen_rtx_SET (VOIDmode,
4081 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
4085 base = frame_base_reg;
4087 offset = save_or_restore_regs (0, 8, base, offset, action);
4088 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
4091 /* Generate a save_register_window insn. */
4094 gen_save_register_window (rtx increment)
4097 return gen_save_register_windowdi (increment);
4099 return gen_save_register_windowsi (increment);
4102 /* Generate an increment for the stack pointer. */
4105 gen_stack_pointer_inc (rtx increment)
4107 return gen_rtx_SET (VOIDmode,
4109 gen_rtx_PLUS (Pmode,
4114 /* Generate a decrement for the stack pointer. */
4117 gen_stack_pointer_dec (rtx decrement)
4119 return gen_rtx_SET (VOIDmode,
4121 gen_rtx_MINUS (Pmode,
4126 /* Expand the function prologue. The prologue is responsible for reserving
4127 storage for the frame, saving the call-saved registers and loading the
4128 PIC register if needed. */
4131 sparc_expand_prologue (void)
4136 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4137 on the final value of the flag means deferring the prologue/epilogue
4138 expansion until just before the second scheduling pass, which is too
4139 late to emit multiple epilogues or return insns.
4141 Of course we are making the assumption that the value of the flag
4142 will not change between now and its final value. Of the three parts
4143 of the formula, only the last one can reasonably vary. Let's take a
4144 closer look, after assuming that the first two ones are set to true
4145 (otherwise the last value is effectively silenced).
4147 If only_leaf_regs_used returns false, the global predicate will also
4148 be false so the actual frame size calculated below will be positive.
4149 As a consequence, the save_register_window insn will be emitted in
4150 the instruction stream; now this insn explicitly references %fp
4151 which is not a leaf register so only_leaf_regs_used will always
4152 return false subsequently.
4154 If only_leaf_regs_used returns true, we hope that the subsequent
4155 optimization passes won't cause non-leaf registers to pop up. For
4156 example, the regrename pass has special provisions to not rename to
4157 non-leaf registers in a leaf function. */
4158 sparc_leaf_function_p
4159 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
4161 /* Need to use actual_fsize, since we are also allocating
4162 space for our callee (and our own register save area). */
4164 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4166 /* Advertise that the data calculated just above are now valid. */
4167 sparc_prologue_data_valid_p = true;
4169 if (sparc_leaf_function_p)
4171 frame_base_reg = stack_pointer_rtx;
4172 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4176 frame_base_reg = hard_frame_pointer_rtx;
4177 frame_base_offset = SPARC_STACK_BIAS;
4180 if (actual_fsize == 0)
4182 else if (sparc_leaf_function_p)
4184 if (actual_fsize <= 4096)
4185 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4186 else if (actual_fsize <= 8192)
4188 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4189 /* %sp is still the CFA register. */
4190 RTX_FRAME_RELATED_P (insn) = 1;
4192 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4196 rtx reg = gen_rtx_REG (Pmode, 1);
4197 emit_move_insn (reg, GEN_INT (-actual_fsize));
4198 insn = emit_insn (gen_stack_pointer_inc (reg));
4199 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4200 gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4203 RTX_FRAME_RELATED_P (insn) = 1;
4207 if (actual_fsize <= 4096)
4208 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4209 else if (actual_fsize <= 8192)
4211 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
4212 /* %sp is not the CFA register anymore. */
4213 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4217 rtx reg = gen_rtx_REG (Pmode, 1);
4218 emit_move_insn (reg, GEN_INT (-actual_fsize));
4219 insn = emit_insn (gen_save_register_window (reg));
4222 RTX_FRAME_RELATED_P (insn) = 1;
4223 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4224 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4228 emit_save_or_restore_regs (SORR_SAVE);
4230 /* Load the PIC register if needed. */
4231 if (flag_pic && crtl->uses_pic_offset_table)
4232 load_pic_register (false);
4235 /* This function generates the assembly code for function entry, which boils
4236 down to emitting the necessary .register directives. */
4239 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4241 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4242 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4244 sparc_output_scratch_registers (file);
4247 /* Expand the function epilogue, either normal or part of a sibcall.
4248 We emit all the instructions except the return or the call. */
4251 sparc_expand_epilogue (void)
4254 emit_save_or_restore_regs (SORR_RESTORE);
4256 if (actual_fsize == 0)
4258 else if (sparc_leaf_function_p)
4260 if (actual_fsize <= 4096)
4261 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4262 else if (actual_fsize <= 8192)
4264 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4265 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4269 rtx reg = gen_rtx_REG (Pmode, 1);
4270 emit_move_insn (reg, GEN_INT (-actual_fsize));
4271 emit_insn (gen_stack_pointer_dec (reg));
4276 /* Return true if it is appropriate to emit `return' instructions in the
4277 body of a function. */
4280 sparc_can_use_return_insn_p (void)
4282 return sparc_prologue_data_valid_p
4283 && (actual_fsize == 0 || !sparc_leaf_function_p);
4286 /* This function generates the assembly code for function exit. */
4289 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4291 /* If code does not drop into the epilogue, we have to still output
4292 a dummy nop for the sake of sane backtraces. Otherwise, if the
4293 last two instructions of a function were "call foo; dslot;" this
4294 can make the return PC of foo (i.e. address of call instruction
4295 plus 8) point to the first instruction in the next function. */
4297 rtx insn, last_real_insn;
4299 insn = get_last_insn ();
4301 last_real_insn = prev_real_insn (insn);
4303 && GET_CODE (last_real_insn) == INSN
4304 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4305 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4307 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4308 fputs("\tnop\n", file);
4310 sparc_output_deferred_case_vectors ();
4313 /* Output a 'restore' instruction. */
4316 output_restore (rtx pat)
4322 fputs ("\t restore\n", asm_out_file);
4326 gcc_assert (GET_CODE (pat) == SET);
4328 operands[0] = SET_DEST (pat);
4329 pat = SET_SRC (pat);
4331 switch (GET_CODE (pat))
4334 operands[1] = XEXP (pat, 0);
4335 operands[2] = XEXP (pat, 1);
4336 output_asm_insn (" restore %r1, %2, %Y0", operands);
4339 operands[1] = XEXP (pat, 0);
4340 operands[2] = XEXP (pat, 1);
4341 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4344 operands[1] = XEXP (pat, 0);
4345 gcc_assert (XEXP (pat, 1) == const1_rtx);
4346 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4350 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4355 /* Output a return. */
4358 output_return (rtx insn)
4360 if (sparc_leaf_function_p)
4362 /* This is a leaf function so we don't have to bother restoring the
4363 register window, which frees us from dealing with the convoluted
4364 semantics of restore/return. We simply output the jump to the
4365 return address and the insn in the delay slot (if any). */
4367 gcc_assert (! crtl->calls_eh_return);
4369 return "jmp\t%%o7+%)%#";
4373 /* This is a regular function so we have to restore the register window.
4374 We may have a pending insn for the delay slot, which will be either
4375 combined with the 'restore' instruction or put in the delay slot of
4376 the 'return' instruction. */
4378 if (crtl->calls_eh_return)
4380 /* If the function uses __builtin_eh_return, the eh_return
4381 machinery occupies the delay slot. */
4382 gcc_assert (! final_sequence);
4384 if (! flag_delayed_branch)
4385 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4388 fputs ("\treturn\t%i7+8\n", asm_out_file);
4390 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4392 if (flag_delayed_branch)
4393 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4395 fputs ("\t nop\n", asm_out_file);
4397 else if (final_sequence)
4401 delay = NEXT_INSN (insn);
4404 pat = PATTERN (delay);
4406 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4408 epilogue_renumber (&pat, 0);
4409 return "return\t%%i7+%)%#";
4413 output_asm_insn ("jmp\t%%i7+%)", NULL);
4414 output_restore (pat);
4415 PATTERN (delay) = gen_blockage ();
4416 INSN_CODE (delay) = -1;
4421 /* The delay slot is empty. */
4423 return "return\t%%i7+%)\n\t nop";
4424 else if (flag_delayed_branch)
4425 return "jmp\t%%i7+%)\n\t restore";
4427 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4434 /* Output a sibling call. */
4437 output_sibcall (rtx insn, rtx call_operand)
4441 gcc_assert (flag_delayed_branch);
4443 operands[0] = call_operand;
4445 if (sparc_leaf_function_p)
4447 /* This is a leaf function so we don't have to bother restoring the
4448 register window. We simply output the jump to the function and
4449 the insn in the delay slot (if any). */
4451 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4454 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4457 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4458 it into branch if possible. */
4459 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4464 /* This is a regular function so we have to restore the register window.
4465 We may have a pending insn for the delay slot, which will be combined
4466 with the 'restore' instruction. */
4468 output_asm_insn ("call\t%a0, 0", operands);
4472 rtx delay = NEXT_INSN (insn);
4475 output_restore (PATTERN (delay));
4477 PATTERN (delay) = gen_blockage ();
4478 INSN_CODE (delay) = -1;
4481 output_restore (NULL_RTX);
4487 /* Functions for handling argument passing.
4489 For 32-bit, the first 6 args are normally in registers and the rest are
4490 pushed. Any arg that starts within the first 6 words is at least
4491 partially passed in a register unless its data type forbids.
4493 For 64-bit, the argument registers are laid out as an array of 16 elements
4494 and arguments are added sequentially. The first 6 int args and up to the
4495 first 16 fp args (depending on size) are passed in regs.
4497 Slot Stack Integral Float Float in structure Double Long Double
4498 ---- ----- -------- ----- ------------------ ------ -----------
4499 15 [SP+248] %f31 %f30,%f31 %d30
4500 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4501 13 [SP+232] %f27 %f26,%f27 %d26
4502 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4503 11 [SP+216] %f23 %f22,%f23 %d22
4504 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4505 9 [SP+200] %f19 %f18,%f19 %d18
4506 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4507 7 [SP+184] %f15 %f14,%f15 %d14
4508 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4509 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4510 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4511 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4512 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4513 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4514 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4516 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4518 Integral arguments are always passed as 64-bit quantities appropriately
4521 Passing of floating point values is handled as follows.
4522 If a prototype is in scope:
4523 If the value is in a named argument (i.e. not a stdarg function or a
4524 value not part of the `...') then the value is passed in the appropriate
4526 If the value is part of the `...' and is passed in one of the first 6
4527 slots then the value is passed in the appropriate int reg.
4528 If the value is part of the `...' and is not passed in one of the first 6
4529 slots then the value is passed in memory.
4530 If a prototype is not in scope:
4531 If the value is one of the first 6 arguments the value is passed in the
4532 appropriate integer reg and the appropriate fp reg.
4533 If the value is not one of the first 6 arguments the value is passed in
4534 the appropriate fp reg and in memory.
4537 Summary of the calling conventions implemented by GCC on SPARC:
4540 size argument return value
4542 small integer <4 int. reg. int. reg.
4543 word 4 int. reg. int. reg.
4544 double word 8 int. reg. int. reg.
4546 _Complex small integer <8 int. reg. int. reg.
4547 _Complex word 8 int. reg. int. reg.
4548 _Complex double word 16 memory int. reg.
4550 vector integer <=8 int. reg. FP reg.
4551 vector integer >8 memory memory
4553 float 4 int. reg. FP reg.
4554 double 8 int. reg. FP reg.
4555 long double 16 memory memory
4557 _Complex float 8 memory FP reg.
4558 _Complex double 16 memory FP reg.
4559 _Complex long double 32 memory FP reg.
4561 vector float any memory memory
4563 aggregate any memory memory
4568 size argument return value
4570 small integer <8 int. reg. int. reg.
4571 word 8 int. reg. int. reg.
4572 double word 16 int. reg. int. reg.
4574 _Complex small integer <16 int. reg. int. reg.
4575 _Complex word 16 int. reg. int. reg.
4576 _Complex double word 32 memory int. reg.
4578 vector integer <=16 FP reg. FP reg.
4579 vector integer 16<s<=32 memory FP reg.
4580 vector integer >32 memory memory
4582 float 4 FP reg. FP reg.
4583 double 8 FP reg. FP reg.
4584 long double 16 FP reg. FP reg.
4586 _Complex float 8 FP reg. FP reg.
4587 _Complex double 16 FP reg. FP reg.
4588 _Complex long double 32 memory FP reg.
4590 vector float <=16 FP reg. FP reg.
4591 vector float 16<s<=32 memory FP reg.
4592 vector float >32 memory memory
4594 aggregate <=16 reg. reg.
4595 aggregate 16<s<=32 memory reg.
4596 aggregate >32 memory memory
4600 Note #1: complex floating-point types follow the extended SPARC ABIs as
4601 implemented by the Sun compiler.
4603 Note #2: integral vector types follow the scalar floating-point types
4604 conventions to match what is implemented by the Sun VIS SDK.
4606 Note #3: floating-point vector types follow the aggregate types
4610 /* Maximum number of int regs for args. */
4611 #define SPARC_INT_ARG_MAX 6
4612 /* Maximum number of fp regs for args. */
4613 #define SPARC_FP_ARG_MAX 16
4615 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4617 /* Handle the INIT_CUMULATIVE_ARGS macro.
4618 Initialize a variable CUM of type CUMULATIVE_ARGS
4619 for a call to a function whose data type is FNTYPE.
4620 For a library call, FNTYPE is 0. */
4623 init_cumulative_args (struct sparc_args *cum, tree fntype,
4624 rtx libname ATTRIBUTE_UNUSED,
4625 tree fndecl ATTRIBUTE_UNUSED)
4628 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4629 cum->libcall_p = fntype == 0;
4632 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4633 When a prototype says `char' or `short', really pass an `int'. */
4636 sparc_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED)
4638 return TARGET_ARCH32 ? true : false;
4641 /* Handle promotion of pointer and integer arguments. */
4643 static enum machine_mode
4644 sparc_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
4645 enum machine_mode mode,
4646 int *punsignedp ATTRIBUTE_UNUSED,
4647 const_tree fntype ATTRIBUTE_UNUSED,
4648 int for_return ATTRIBUTE_UNUSED)
4650 if (POINTER_TYPE_P (type))
4652 *punsignedp = POINTERS_EXTEND_UNSIGNED;
4656 /* For TARGET_ARCH64 we need this, as we don't have instructions
4657 for arithmetic operations which do zero/sign extension at the same time,
4658 so without this we end up with a srl/sra after every assignment to an
4659 user variable, which means very very bad code. */
4662 && GET_MODE_CLASS (mode) == MODE_INT
4663 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4671 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4674 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4676 return TARGET_ARCH64 ? true : false;
4679 /* Scan the record type TYPE and return the following predicates:
4680 - INTREGS_P: the record contains at least one field or sub-field
4681 that is eligible for promotion in integer registers.
4682 - FP_REGS_P: the record contains at least one field or sub-field
4683 that is eligible for promotion in floating-point registers.
4684 - PACKED_P: the record contains at least one field that is packed.
4686 Sub-fields are not taken into account for the PACKED_P predicate. */
4689 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4693 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4695 if (TREE_CODE (field) == FIELD_DECL)
4697 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4698 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4699 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4700 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4706 if (packed_p && DECL_PACKED (field))
4712 /* Compute the slot number to pass an argument in.
4713 Return the slot number or -1 if passing on the stack.
4715 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4716 the preceding args and about the function being called.
4717 MODE is the argument's machine mode.
4718 TYPE is the data type of the argument (as a tree).
4719 This is null for libcalls where that information may
4721 NAMED is nonzero if this argument is a named parameter
4722 (otherwise it is an extra parameter matching an ellipsis).
4723 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4724 *PREGNO records the register number to use if scalar type.
4725 *PPADDING records the amount of padding needed in words. */
4728 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4729 tree type, int named, int incoming_p,
4730 int *pregno, int *ppadding)
4732 int regbase = (incoming_p
4733 ? SPARC_INCOMING_INT_ARG_FIRST
4734 : SPARC_OUTGOING_INT_ARG_FIRST);
4735 int slotno = cum->words;
4736 enum mode_class mclass;
4741 if (type && TREE_ADDRESSABLE (type))
4747 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4750 /* For SPARC64, objects requiring 16-byte alignment get it. */
4752 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4753 && (slotno & 1) != 0)
4754 slotno++, *ppadding = 1;
4756 mclass = GET_MODE_CLASS (mode);
4757 if (type && TREE_CODE (type) == VECTOR_TYPE)
4759 /* Vector types deserve special treatment because they are
4760 polymorphic wrt their mode, depending upon whether VIS
4761 instructions are enabled. */
4762 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4764 /* The SPARC port defines no floating-point vector modes. */
4765 gcc_assert (mode == BLKmode);
4769 /* Integral vector types should either have a vector
4770 mode or an integral mode, because we are guaranteed
4771 by pass_by_reference that their size is not greater
4772 than 16 bytes and TImode is 16-byte wide. */
4773 gcc_assert (mode != BLKmode);
4775 /* Vector integers are handled like floats according to
4777 mclass = MODE_FLOAT;
4784 case MODE_COMPLEX_FLOAT:
4785 case MODE_VECTOR_INT:
4786 if (TARGET_ARCH64 && TARGET_FPU && named)
4788 if (slotno >= SPARC_FP_ARG_MAX)
4790 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4791 /* Arguments filling only one single FP register are
4792 right-justified in the outer double FP register. */
4793 if (GET_MODE_SIZE (mode) <= 4)
4800 case MODE_COMPLEX_INT:
4801 if (slotno >= SPARC_INT_ARG_MAX)
4803 regno = regbase + slotno;
4807 if (mode == VOIDmode)
4808 /* MODE is VOIDmode when generating the actual call. */
4811 gcc_assert (mode == BLKmode);
4815 || (TREE_CODE (type) != VECTOR_TYPE
4816 && TREE_CODE (type) != RECORD_TYPE))
4818 if (slotno >= SPARC_INT_ARG_MAX)
4820 regno = regbase + slotno;
4822 else /* TARGET_ARCH64 && type */
4824 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4826 /* First see what kinds of registers we would need. */
4827 if (TREE_CODE (type) == VECTOR_TYPE)
4830 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4832 /* The ABI obviously doesn't specify how packed structures
4833 are passed. These are defined to be passed in int regs
4834 if possible, otherwise memory. */
4835 if (packed_p || !named)
4836 fpregs_p = 0, intregs_p = 1;
4838 /* If all arg slots are filled, then must pass on stack. */
4839 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4842 /* If there are only int args and all int arg slots are filled,
4843 then must pass on stack. */
4844 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4847 /* Note that even if all int arg slots are filled, fp members may
4848 still be passed in regs if such regs are available.
4849 *PREGNO isn't set because there may be more than one, it's up
4850 to the caller to compute them. */
4863 /* Handle recursive register counting for structure field layout. */
4865 struct function_arg_record_value_parms
4867 rtx ret; /* return expression being built. */
4868 int slotno; /* slot number of the argument. */
4869 int named; /* whether the argument is named. */
4870 int regbase; /* regno of the base register. */
4871 int stack; /* 1 if part of the argument is on the stack. */
4872 int intoffset; /* offset of the first pending integer field. */
4873 unsigned int nregs; /* number of words passed in registers. */
4876 static void function_arg_record_value_3
4877 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4878 static void function_arg_record_value_2
4879 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4880 static void function_arg_record_value_1
4881 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4882 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
4883 static rtx function_arg_union_value (int, enum machine_mode, int, int);
4885 /* A subroutine of function_arg_record_value. Traverse the structure
4886 recursively and determine how many registers will be required. */
4889 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
4890 struct function_arg_record_value_parms *parms,
4895 /* We need to compute how many registers are needed so we can
4896 allocate the PARALLEL but before we can do that we need to know
4897 whether there are any packed fields. The ABI obviously doesn't
4898 specify how structures are passed in this case, so they are
4899 defined to be passed in int regs if possible, otherwise memory,
4900 regardless of whether there are fp values present. */
4903 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4905 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4912 /* Compute how many registers we need. */
4913 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4915 if (TREE_CODE (field) == FIELD_DECL)
4917 HOST_WIDE_INT bitpos = startbitpos;
4919 if (DECL_SIZE (field) != 0)
4921 if (integer_zerop (DECL_SIZE (field)))
4924 if (host_integerp (bit_position (field), 1))
4925 bitpos += int_bit_position (field);
4928 /* ??? FIXME: else assume zero offset. */
4930 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4931 function_arg_record_value_1 (TREE_TYPE (field),
4935 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4936 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4941 if (parms->intoffset != -1)
4943 unsigned int startbit, endbit;
4944 int intslots, this_slotno;
4946 startbit = parms->intoffset & -BITS_PER_WORD;
4947 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4949 intslots = (endbit - startbit) / BITS_PER_WORD;
4950 this_slotno = parms->slotno + parms->intoffset
4953 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4955 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4956 /* We need to pass this field on the stack. */
4960 parms->nregs += intslots;
4961 parms->intoffset = -1;
4964 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4965 If it wasn't true we wouldn't be here. */
4966 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4967 && DECL_MODE (field) == BLKmode)
4968 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4969 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4976 if (parms->intoffset == -1)
4977 parms->intoffset = bitpos;
4983 /* A subroutine of function_arg_record_value. Assign the bits of the
4984 structure between parms->intoffset and bitpos to integer registers. */
4987 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4988 struct function_arg_record_value_parms *parms)
4990 enum machine_mode mode;
4992 unsigned int startbit, endbit;
4993 int this_slotno, intslots, intoffset;
4996 if (parms->intoffset == -1)
4999 intoffset = parms->intoffset;
5000 parms->intoffset = -1;
5002 startbit = intoffset & -BITS_PER_WORD;
5003 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5004 intslots = (endbit - startbit) / BITS_PER_WORD;
5005 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
5007 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
5011 /* If this is the trailing part of a word, only load that much into
5012 the register. Otherwise load the whole register. Note that in
5013 the latter case we may pick up unwanted bits. It's not a problem
5014 at the moment but may wish to revisit. */
5016 if (intoffset % BITS_PER_WORD != 0)
5017 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5022 intoffset /= BITS_PER_UNIT;
5025 regno = parms->regbase + this_slotno;
5026 reg = gen_rtx_REG (mode, regno);
5027 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5028 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5031 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5036 while (intslots > 0);
5039 /* A subroutine of function_arg_record_value. Traverse the structure
5040 recursively and assign bits to floating point registers. Track which
5041 bits in between need integer registers; invoke function_arg_record_value_3
5042 to make that happen. */
5045 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
5046 struct function_arg_record_value_parms *parms,
5052 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5054 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5061 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5063 if (TREE_CODE (field) == FIELD_DECL)
5065 HOST_WIDE_INT bitpos = startbitpos;
5067 if (DECL_SIZE (field) != 0)
5069 if (integer_zerop (DECL_SIZE (field)))
5072 if (host_integerp (bit_position (field), 1))
5073 bitpos += int_bit_position (field);
5076 /* ??? FIXME: else assume zero offset. */
5078 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5079 function_arg_record_value_2 (TREE_TYPE (field),
5083 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5084 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5089 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5090 int regno, nregs, pos;
5091 enum machine_mode mode = DECL_MODE (field);
5094 function_arg_record_value_3 (bitpos, parms);
5096 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5099 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5100 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5102 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5104 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5110 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5111 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5113 reg = gen_rtx_REG (mode, regno);
5114 pos = bitpos / BITS_PER_UNIT;
5115 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5116 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5120 regno += GET_MODE_SIZE (mode) / 4;
5121 reg = gen_rtx_REG (mode, regno);
5122 pos += GET_MODE_SIZE (mode);
5123 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5124 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5130 if (parms->intoffset == -1)
5131 parms->intoffset = bitpos;
5137 /* Used by function_arg and function_value to implement the complex
5138 conventions of the 64-bit ABI for passing and returning structures.
5139 Return an expression valid as a return value for the two macros
5140 FUNCTION_ARG and FUNCTION_VALUE.
5142 TYPE is the data type of the argument (as a tree).
5143 This is null for libcalls where that information may
5145 MODE is the argument's machine mode.
5146 SLOTNO is the index number of the argument's slot in the parameter array.
5147 NAMED is nonzero if this argument is a named parameter
5148 (otherwise it is an extra parameter matching an ellipsis).
5149 REGBASE is the regno of the base register for the parameter array. */
5152 function_arg_record_value (const_tree type, enum machine_mode mode,
5153 int slotno, int named, int regbase)
5155 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5156 struct function_arg_record_value_parms parms;
5159 parms.ret = NULL_RTX;
5160 parms.slotno = slotno;
5161 parms.named = named;
5162 parms.regbase = regbase;
5165 /* Compute how many registers we need. */
5167 parms.intoffset = 0;
5168 function_arg_record_value_1 (type, 0, &parms, false);
5170 /* Take into account pending integer fields. */
5171 if (parms.intoffset != -1)
5173 unsigned int startbit, endbit;
5174 int intslots, this_slotno;
5176 startbit = parms.intoffset & -BITS_PER_WORD;
5177 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5178 intslots = (endbit - startbit) / BITS_PER_WORD;
5179 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5181 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5183 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5184 /* We need to pass this field on the stack. */
5188 parms.nregs += intslots;
5190 nregs = parms.nregs;
5192 /* Allocate the vector and handle some annoying special cases. */
5195 /* ??? Empty structure has no value? Duh? */
5198 /* Though there's nothing really to store, return a word register
5199 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5200 leads to breakage due to the fact that there are zero bytes to
5202 return gen_rtx_REG (mode, regbase);
5206 /* ??? C++ has structures with no fields, and yet a size. Give up
5207 for now and pass everything back in integer registers. */
5208 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5210 if (nregs + slotno > SPARC_INT_ARG_MAX)
5211 nregs = SPARC_INT_ARG_MAX - slotno;
5213 gcc_assert (nregs != 0);
5215 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5217 /* If at least one field must be passed on the stack, generate
5218 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5219 also be passed on the stack. We can't do much better because the
5220 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5221 of structures for which the fields passed exclusively in registers
5222 are not at the beginning of the structure. */
5224 XVECEXP (parms.ret, 0, 0)
5225 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5227 /* Fill in the entries. */
5229 parms.intoffset = 0;
5230 function_arg_record_value_2 (type, 0, &parms, false);
5231 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5233 gcc_assert (parms.nregs == nregs);
5238 /* Used by function_arg and function_value to implement the conventions
5239 of the 64-bit ABI for passing and returning unions.
5240 Return an expression valid as a return value for the two macros
5241 FUNCTION_ARG and FUNCTION_VALUE.
5243 SIZE is the size in bytes of the union.
5244 MODE is the argument's machine mode.
5245 REGNO is the hard register the union will be passed in. */
5248 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5251 int nwords = ROUND_ADVANCE (size), i;
5254 /* See comment in previous function for empty structures. */
5256 return gen_rtx_REG (mode, regno);
5258 if (slotno == SPARC_INT_ARG_MAX - 1)
5261 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5263 for (i = 0; i < nwords; i++)
5265 /* Unions are passed left-justified. */
5266 XVECEXP (regs, 0, i)
5267 = gen_rtx_EXPR_LIST (VOIDmode,
5268 gen_rtx_REG (word_mode, regno),
5269 GEN_INT (UNITS_PER_WORD * i));
5276 /* Used by function_arg and function_value to implement the conventions
5277 for passing and returning large (BLKmode) vectors.
5278 Return an expression valid as a return value for the two macros
5279 FUNCTION_ARG and FUNCTION_VALUE.
5281 SIZE is the size in bytes of the vector (at least 8 bytes).
5282 REGNO is the FP hard register the vector will be passed in. */
5285 function_arg_vector_value (int size, int regno)
5287 int i, nregs = size / 8;
5290 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5292 for (i = 0; i < nregs; i++)
5294 XVECEXP (regs, 0, i)
5295 = gen_rtx_EXPR_LIST (VOIDmode,
5296 gen_rtx_REG (DImode, regno + 2*i),
5303 /* Handle the FUNCTION_ARG macro.
5304 Determine where to put an argument to a function.
5305 Value is zero to push the argument on the stack,
5306 or a hard register in which to store the argument.
5308 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5309 the preceding args and about the function being called.
5310 MODE is the argument's machine mode.
5311 TYPE is the data type of the argument (as a tree).
5312 This is null for libcalls where that information may
5314 NAMED is nonzero if this argument is a named parameter
5315 (otherwise it is an extra parameter matching an ellipsis).
5316 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5319 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5320 tree type, int named, int incoming_p)
5322 int regbase = (incoming_p
5323 ? SPARC_INCOMING_INT_ARG_FIRST
5324 : SPARC_OUTGOING_INT_ARG_FIRST);
5325 int slotno, regno, padding;
5326 enum mode_class mclass = GET_MODE_CLASS (mode);
5328 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5333 /* Vector types deserve special treatment because they are polymorphic wrt
5334 their mode, depending upon whether VIS instructions are enabled. */
5335 if (type && TREE_CODE (type) == VECTOR_TYPE)
5337 HOST_WIDE_INT size = int_size_in_bytes (type);
5338 gcc_assert ((TARGET_ARCH32 && size <= 8)
5339 || (TARGET_ARCH64 && size <= 16));
5341 if (mode == BLKmode)
5342 return function_arg_vector_value (size,
5343 SPARC_FP_ARG_FIRST + 2*slotno);
5345 mclass = MODE_FLOAT;
5349 return gen_rtx_REG (mode, regno);
5351 /* Structures up to 16 bytes in size are passed in arg slots on the stack
5352 and are promoted to registers if possible. */
5353 if (type && TREE_CODE (type) == RECORD_TYPE)
5355 HOST_WIDE_INT size = int_size_in_bytes (type);
5356 gcc_assert (size <= 16);
5358 return function_arg_record_value (type, mode, slotno, named, regbase);
5361 /* Unions up to 16 bytes in size are passed in integer registers. */
5362 else if (type && TREE_CODE (type) == UNION_TYPE)
5364 HOST_WIDE_INT size = int_size_in_bytes (type);
5365 gcc_assert (size <= 16);
5367 return function_arg_union_value (size, mode, slotno, regno);
5370 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5371 but also have the slot allocated for them.
5372 If no prototype is in scope fp values in register slots get passed
5373 in two places, either fp regs and int regs or fp regs and memory. */
5374 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5375 && SPARC_FP_REG_P (regno))
5377 rtx reg = gen_rtx_REG (mode, regno);
5378 if (cum->prototype_p || cum->libcall_p)
5380 /* "* 2" because fp reg numbers are recorded in 4 byte
5383 /* ??? This will cause the value to be passed in the fp reg and
5384 in the stack. When a prototype exists we want to pass the
5385 value in the reg but reserve space on the stack. That's an
5386 optimization, and is deferred [for a bit]. */
5387 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5388 return gen_rtx_PARALLEL (mode,
5390 gen_rtx_EXPR_LIST (VOIDmode,
5391 NULL_RTX, const0_rtx),
5392 gen_rtx_EXPR_LIST (VOIDmode,
5396 /* ??? It seems that passing back a register even when past
5397 the area declared by REG_PARM_STACK_SPACE will allocate
5398 space appropriately, and will not copy the data onto the
5399 stack, exactly as we desire.
5401 This is due to locate_and_pad_parm being called in
5402 expand_call whenever reg_parm_stack_space > 0, which
5403 while beneficial to our example here, would seem to be
5404 in error from what had been intended. Ho hum... -- r~ */
5412 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5416 /* On incoming, we don't need to know that the value
5417 is passed in %f0 and %i0, and it confuses other parts
5418 causing needless spillage even on the simplest cases. */
5422 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5423 + (regno - SPARC_FP_ARG_FIRST) / 2);
5425 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5426 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5428 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5432 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5433 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5434 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5439 /* All other aggregate types are passed in an integer register in a mode
5440 corresponding to the size of the type. */
5441 else if (type && AGGREGATE_TYPE_P (type))
5443 HOST_WIDE_INT size = int_size_in_bytes (type);
5444 gcc_assert (size <= 16);
5446 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5449 return gen_rtx_REG (mode, regno);
5452 /* For an arg passed partly in registers and partly in memory,
5453 this is the number of bytes of registers used.
5454 For args passed entirely in registers or entirely in memory, zero.
5456 Any arg that starts in the first 6 regs but won't entirely fit in them
5457 needs partial registers on v8. On v9, structures with integer
5458 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5459 values that begin in the last fp reg [where "last fp reg" varies with the
5460 mode] will be split between that reg and memory. */
5463 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5464 tree type, bool named)
5466 int slotno, regno, padding;
5468 /* We pass 0 for incoming_p here, it doesn't matter. */
5469 slotno = function_arg_slotno (cum, mode, type, named, 0, ®no, &padding);
5476 if ((slotno + (mode == BLKmode
5477 ? ROUND_ADVANCE (int_size_in_bytes (type))
5478 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5479 > SPARC_INT_ARG_MAX)
5480 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5484 /* We are guaranteed by pass_by_reference that the size of the
5485 argument is not greater than 16 bytes, so we only need to return
5486 one word if the argument is partially passed in registers. */
5488 if (type && AGGREGATE_TYPE_P (type))
5490 int size = int_size_in_bytes (type);
5492 if (size > UNITS_PER_WORD
5493 && slotno == SPARC_INT_ARG_MAX - 1)
5494 return UNITS_PER_WORD;
5496 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5497 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5498 && ! (TARGET_FPU && named)))
5500 /* The complex types are passed as packed types. */
5501 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5502 && slotno == SPARC_INT_ARG_MAX - 1)
5503 return UNITS_PER_WORD;
5505 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5507 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5509 return UNITS_PER_WORD;
5516 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5517 Specify whether to pass the argument by reference. */
5520 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5521 enum machine_mode mode, const_tree type,
5522 bool named ATTRIBUTE_UNUSED)
5525 /* Original SPARC 32-bit ABI says that structures and unions,
5526 and quad-precision floats are passed by reference. For Pascal,
5527 also pass arrays by reference. All other base types are passed
5530 Extended ABI (as implemented by the Sun compiler) says that all
5531 complex floats are passed by reference. Pass complex integers
5532 in registers up to 8 bytes. More generally, enforce the 2-word
5533 cap for passing arguments in registers.
5535 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5536 integers are passed like floats of the same size, that is in
5537 registers up to 8 bytes. Pass all vector floats by reference
5538 like structure and unions. */
5539 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5541 /* Catch CDImode, TFmode, DCmode and TCmode. */
5542 || GET_MODE_SIZE (mode) > 8
5544 && TREE_CODE (type) == VECTOR_TYPE
5545 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5547 /* Original SPARC 64-bit ABI says that structures and unions
5548 smaller than 16 bytes are passed in registers, as well as
5549 all other base types.
5551 Extended ABI (as implemented by the Sun compiler) says that
5552 complex floats are passed in registers up to 16 bytes. Pass
5553 all complex integers in registers up to 16 bytes. More generally,
5554 enforce the 2-word cap for passing arguments in registers.
5556 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5557 integers are passed like floats of the same size, that is in
5558 registers (up to 16 bytes). Pass all vector floats like structure
5561 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5562 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5563 /* Catch CTImode and TCmode. */
5564 || GET_MODE_SIZE (mode) > 16);
5567 /* Handle the FUNCTION_ARG_ADVANCE macro.
5568 Update the data in CUM to advance over an argument
5569 of mode MODE and data type TYPE.
5570 TYPE is null for libcalls where that information may not be available. */
5573 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5574 tree type, int named)
5576 int slotno, regno, padding;
5578 /* We pass 0 for incoming_p here, it doesn't matter. */
5579 slotno = function_arg_slotno (cum, mode, type, named, 0, ®no, &padding);
5581 /* If register required leading padding, add it. */
5583 cum->words += padding;
5587 cum->words += (mode != BLKmode
5588 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5589 : ROUND_ADVANCE (int_size_in_bytes (type)));
5593 if (type && AGGREGATE_TYPE_P (type))
5595 int size = int_size_in_bytes (type);
5599 else if (size <= 16)
5601 else /* passed by reference */
5606 cum->words += (mode != BLKmode
5607 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5608 : ROUND_ADVANCE (int_size_in_bytes (type)));
5613 /* Handle the FUNCTION_ARG_PADDING macro.
5614 For the 64 bit ABI structs are always stored left shifted in their
5618 function_arg_padding (enum machine_mode mode, const_tree type)
5620 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5623 /* Fall back to the default. */
5624 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5627 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5628 Specify whether to return the return value in memory. */
5631 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5634 /* Original SPARC 32-bit ABI says that structures and unions,
5635 and quad-precision floats are returned in memory. All other
5636 base types are returned in registers.
5638 Extended ABI (as implemented by the Sun compiler) says that
5639 all complex floats are returned in registers (8 FP registers
5640 at most for '_Complex long double'). Return all complex integers
5641 in registers (4 at most for '_Complex long long').
5643 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5644 integers are returned like floats of the same size, that is in
5645 registers up to 8 bytes and in memory otherwise. Return all
5646 vector floats in memory like structure and unions; note that
5647 they always have BLKmode like the latter. */
5648 return (TYPE_MODE (type) == BLKmode
5649 || TYPE_MODE (type) == TFmode
5650 || (TREE_CODE (type) == VECTOR_TYPE
5651 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5653 /* Original SPARC 64-bit ABI says that structures and unions
5654 smaller than 32 bytes are returned in registers, as well as
5655 all other base types.
5657 Extended ABI (as implemented by the Sun compiler) says that all
5658 complex floats are returned in registers (8 FP registers at most
5659 for '_Complex long double'). Return all complex integers in
5660 registers (4 at most for '_Complex TItype').
5662 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5663 integers are returned like floats of the same size, that is in
5664 registers. Return all vector floats like structure and unions;
5665 note that they always have BLKmode like the latter. */
5666 return ((TYPE_MODE (type) == BLKmode
5667 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5670 /* Handle the TARGET_STRUCT_VALUE target hook.
5671 Return where to find the structure return value address. */
5674 sparc_struct_value_rtx (tree fndecl, int incoming)
5683 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5684 STRUCT_VALUE_OFFSET));
5686 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5687 STRUCT_VALUE_OFFSET));
5689 /* Only follow the SPARC ABI for fixed-size structure returns.
5690 Variable size structure returns are handled per the normal
5691 procedures in GCC. This is enabled by -mstd-struct-return */
5693 && sparc_std_struct_return
5694 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
5695 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
5697 /* We must check and adjust the return address, as it is
5698 optional as to whether the return object is really
5700 rtx ret_rtx = gen_rtx_REG (Pmode, 31);
5701 rtx scratch = gen_reg_rtx (SImode);
5702 rtx endlab = gen_label_rtx ();
5704 /* Calculate the return object size */
5705 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
5706 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
5707 /* Construct a temporary return value */
5708 rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
5710 /* Implement SPARC 32-bit psABI callee returns struck checking
5713 Fetch the instruction where we will return to and see if
5714 it's an unimp instruction (the most significant 10 bits
5716 emit_move_insn (scratch, gen_rtx_MEM (SImode,
5717 plus_constant (ret_rtx, 8)));
5718 /* Assume the size is valid and pre-adjust */
5719 emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5720 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
5721 emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5722 /* Assign stack temp:
5723 Write the address of the memory pointed to by temp_val into
5724 the memory pointed to by mem */
5725 emit_move_insn (mem, XEXP (temp_val, 0));
5726 emit_label (endlab);
5729 set_mem_alias_set (mem, struct_value_alias_set);
5734 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5735 For v9, function return values are subject to the same rules as arguments,
5736 except that up to 32 bytes may be returned in registers. */
5739 function_value (const_tree type, enum machine_mode mode, int incoming_p)
5741 /* Beware that the two values are swapped here wrt function_arg. */
5742 int regbase = (incoming_p
5743 ? SPARC_OUTGOING_INT_ARG_FIRST
5744 : SPARC_INCOMING_INT_ARG_FIRST);
5745 enum mode_class mclass = GET_MODE_CLASS (mode);
5748 /* Vector types deserve special treatment because they are polymorphic wrt
5749 their mode, depending upon whether VIS instructions are enabled. */
5750 if (type && TREE_CODE (type) == VECTOR_TYPE)
5752 HOST_WIDE_INT size = int_size_in_bytes (type);
5753 gcc_assert ((TARGET_ARCH32 && size <= 8)
5754 || (TARGET_ARCH64 && size <= 32));
5756 if (mode == BLKmode)
5757 return function_arg_vector_value (size,
5758 SPARC_FP_ARG_FIRST);
5760 mclass = MODE_FLOAT;
5763 if (TARGET_ARCH64 && type)
5765 /* Structures up to 32 bytes in size are returned in registers. */
5766 if (TREE_CODE (type) == RECORD_TYPE)
5768 HOST_WIDE_INT size = int_size_in_bytes (type);
5769 gcc_assert (size <= 32);
5771 return function_arg_record_value (type, mode, 0, 1, regbase);
5774 /* Unions up to 32 bytes in size are returned in integer registers. */
5775 else if (TREE_CODE (type) == UNION_TYPE)
5777 HOST_WIDE_INT size = int_size_in_bytes (type);
5778 gcc_assert (size <= 32);
5780 return function_arg_union_value (size, mode, 0, regbase);
5783 /* Objects that require it are returned in FP registers. */
5784 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5787 /* All other aggregate types are returned in an integer register in a
5788 mode corresponding to the size of the type. */
5789 else if (AGGREGATE_TYPE_P (type))
5791 /* All other aggregate types are passed in an integer register
5792 in a mode corresponding to the size of the type. */
5793 HOST_WIDE_INT size = int_size_in_bytes (type);
5794 gcc_assert (size <= 32);
5796 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5798 /* ??? We probably should have made the same ABI change in
5799 3.4.0 as the one we made for unions. The latter was
5800 required by the SCD though, while the former is not
5801 specified, so we favored compatibility and efficiency.
5803 Now we're stuck for aggregates larger than 16 bytes,
5804 because OImode vanished in the meantime. Let's not
5805 try to be unduly clever, and simply follow the ABI
5806 for unions in that case. */
5807 if (mode == BLKmode)
5808 return function_arg_union_value (size, mode, 0, regbase);
5813 /* This must match sparc_promote_function_mode.
5814 ??? Maybe 32-bit pointers should actually remain in Pmode? */
5815 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5819 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
5820 regno = SPARC_FP_ARG_FIRST;
5824 return gen_rtx_REG (mode, regno);
5827 /* Do what is necessary for `va_start'. We look at the current function
5828 to determine if stdarg or varargs is used and return the address of
5829 the first unnamed parameter. */
5832 sparc_builtin_saveregs (void)
5834 int first_reg = crtl->args.info.words;
5838 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5839 emit_move_insn (gen_rtx_MEM (word_mode,
5840 gen_rtx_PLUS (Pmode,
5842 GEN_INT (FIRST_PARM_OFFSET (0)
5845 gen_rtx_REG (word_mode,
5846 SPARC_INCOMING_INT_ARG_FIRST + regno));
5848 address = gen_rtx_PLUS (Pmode,
5850 GEN_INT (FIRST_PARM_OFFSET (0)
5851 + UNITS_PER_WORD * first_reg));
5856 /* Implement `va_start' for stdarg. */
5859 sparc_va_start (tree valist, rtx nextarg)
5861 nextarg = expand_builtin_saveregs ();
5862 std_expand_builtin_va_start (valist, nextarg);
5865 /* Implement `va_arg' for stdarg. */
5868 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
5871 HOST_WIDE_INT size, rsize, align;
5874 tree ptrtype = build_pointer_type (type);
5876 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5879 size = rsize = UNITS_PER_WORD;
5885 size = int_size_in_bytes (type);
5886 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5891 /* For SPARC64, objects requiring 16-byte alignment get it. */
5892 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5893 align = 2 * UNITS_PER_WORD;
5895 /* SPARC-V9 ABI states that structures up to 16 bytes in size
5896 are left-justified in their slots. */
5897 if (AGGREGATE_TYPE_P (type))
5900 size = rsize = UNITS_PER_WORD;
5910 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5911 size_int (align - 1));
5912 incr = fold_convert (sizetype, incr);
5913 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
5915 incr = fold_convert (ptr_type_node, incr);
5918 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5921 if (BYTES_BIG_ENDIAN && size < rsize)
5922 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5923 size_int (rsize - size));
5927 addr = fold_convert (build_pointer_type (ptrtype), addr);
5928 addr = build_va_arg_indirect_ref (addr);
5931 /* If the address isn't aligned properly for the type, we need a temporary.
5932 FIXME: This is inefficient, usually we can do this in registers. */
5933 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
5935 tree tmp = create_tmp_var (type, "va_arg_tmp");
5936 tree dest_addr = build_fold_addr_expr (tmp);
5937 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
5938 3, dest_addr, addr, size_int (rsize));
5939 TREE_ADDRESSABLE (tmp) = 1;
5940 gimplify_and_add (copy, pre_p);
5945 addr = fold_convert (ptrtype, addr);
5948 = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
5949 gimplify_assign (valist, incr, post_p);
5951 return build_va_arg_indirect_ref (addr);
5954 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5955 Specify whether the vector mode is supported by the hardware. */
5958 sparc_vector_mode_supported_p (enum machine_mode mode)
5960 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5963 /* Return the string to output an unconditional branch to LABEL, which is
5964 the operand number of the label.
5966 DEST is the destination insn (i.e. the label), INSN is the source. */
5969 output_ubranch (rtx dest, int label, rtx insn)
5971 static char string[64];
5972 bool v9_form = false;
5975 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5977 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5978 - INSN_ADDRESSES (INSN_UID (insn)));
5979 /* Leave some instructions for "slop". */
5980 if (delta >= -260000 && delta < 260000)
5985 strcpy (string, "ba%*,pt\t%%xcc, ");
5987 strcpy (string, "b%*\t");
5989 p = strchr (string, '\0');
6000 /* Return the string to output a conditional branch to LABEL, which is
6001 the operand number of the label. OP is the conditional expression.
6002 XEXP (OP, 0) is assumed to be a condition code register (integer or
6003 floating point) and its mode specifies what kind of comparison we made.
6005 DEST is the destination insn (i.e. the label), INSN is the source.
6007 REVERSED is nonzero if we should reverse the sense of the comparison.
6009 ANNUL is nonzero if we should generate an annulling branch. */
6012 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
6015 static char string[64];
6016 enum rtx_code code = GET_CODE (op);
6017 rtx cc_reg = XEXP (op, 0);
6018 enum machine_mode mode = GET_MODE (cc_reg);
6019 const char *labelno, *branch;
6020 int spaces = 8, far;
6023 /* v9 branches are limited to +-1MB. If it is too far away,
6036 fbne,a,pn %fcc2, .LC29
6044 far = TARGET_V9 && (get_attr_length (insn) >= 3);
6047 /* Reversal of FP compares takes care -- an ordered compare
6048 becomes an unordered compare and vice versa. */
6049 if (mode == CCFPmode || mode == CCFPEmode)
6050 code = reverse_condition_maybe_unordered (code);
6052 code = reverse_condition (code);
6055 /* Start by writing the branch condition. */
6056 if (mode == CCFPmode || mode == CCFPEmode)
6107 /* ??? !v9: FP branches cannot be preceded by another floating point
6108 insn. Because there is currently no concept of pre-delay slots,
6109 we can fix this only by always emitting a nop before a floating
6114 strcpy (string, "nop\n\t");
6115 strcat (string, branch);
6128 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6140 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6161 strcpy (string, branch);
6163 spaces -= strlen (branch);
6164 p = strchr (string, '\0');
6166 /* Now add the annulling, the label, and a possible noop. */
6179 if (! far && insn && INSN_ADDRESSES_SET_P ())
6181 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6182 - INSN_ADDRESSES (INSN_UID (insn)));
6183 /* Leave some instructions for "slop". */
6184 if (delta < -260000 || delta >= 260000)
6188 if (mode == CCFPmode || mode == CCFPEmode)
6190 static char v9_fcc_labelno[] = "%%fccX, ";
6191 /* Set the char indicating the number of the fcc reg to use. */
6192 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6193 labelno = v9_fcc_labelno;
6196 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6200 else if (mode == CCXmode || mode == CCX_NOOVmode)
6202 labelno = "%%xcc, ";
6207 labelno = "%%icc, ";
6212 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6215 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6228 strcpy (p, labelno);
6229 p = strchr (p, '\0');
6232 strcpy (p, ".+12\n\t nop\n\tb\t");
6233 /* Skip the next insn if requested or
6234 if we know that it will be a nop. */
6235 if (annul || ! final_sequence)
6249 /* Emit a library call comparison between floating point X and Y.
6250 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
6251 Return the new operator to be used in the comparison sequence.
6253 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6254 values as arguments instead of the TFmode registers themselves,
6255 that's why we cannot call emit_float_lib_cmp. */
6258 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6261 rtx slot0, slot1, result, tem, tem2, libfunc;
6262 enum machine_mode mode;
6263 enum rtx_code new_comparison;
6268 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
6272 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
6276 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
6280 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
6284 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
6288 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
6299 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
6312 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6313 emit_move_insn (slot0, x);
6320 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6321 emit_move_insn (slot1, y);
6324 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6325 emit_library_call (libfunc, LCT_NORMAL,
6327 XEXP (slot0, 0), Pmode,
6328 XEXP (slot1, 0), Pmode);
6333 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6334 emit_library_call (libfunc, LCT_NORMAL,
6336 x, TFmode, y, TFmode);
6341 /* Immediately move the result of the libcall into a pseudo
6342 register so reload doesn't clobber the value if it needs
6343 the return register for a spill reg. */
6344 result = gen_reg_rtx (mode);
6345 emit_move_insn (result, hard_libcall_value (mode, libfunc));
6350 return gen_rtx_NE (VOIDmode, result, const0_rtx);
6353 new_comparison = (comparison == UNORDERED ? EQ : NE);
6354 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
6357 new_comparison = (comparison == UNGT ? GT : NE);
6358 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
6360 return gen_rtx_NE (VOIDmode, result, const2_rtx);
6362 tem = gen_reg_rtx (mode);
6364 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6366 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6367 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
6370 tem = gen_reg_rtx (mode);
6372 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6374 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6375 tem2 = gen_reg_rtx (mode);
6377 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6379 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6380 new_comparison = (comparison == UNEQ ? EQ : NE);
6381 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
6387 /* Generate an unsigned DImode to FP conversion. This is the same code
6388 optabs would emit if we didn't have TFmode patterns. */
6391 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6393 rtx neglab, donelab, i0, i1, f0, in, out;
6396 in = force_reg (DImode, operands[1]);
6397 neglab = gen_label_rtx ();
6398 donelab = gen_label_rtx ();
6399 i0 = gen_reg_rtx (DImode);
6400 i1 = gen_reg_rtx (DImode);
6401 f0 = gen_reg_rtx (mode);
6403 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6405 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6406 emit_jump_insn (gen_jump (donelab));
6409 emit_label (neglab);
6411 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6412 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6413 emit_insn (gen_iordi3 (i0, i0, i1));
6414 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6415 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6417 emit_label (donelab);
6420 /* Generate an FP to unsigned DImode conversion. This is the same code
6421 optabs would emit if we didn't have TFmode patterns. */
6424 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6426 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6429 in = force_reg (mode, operands[1]);
6430 neglab = gen_label_rtx ();
6431 donelab = gen_label_rtx ();
6432 i0 = gen_reg_rtx (DImode);
6433 i1 = gen_reg_rtx (DImode);
6434 limit = gen_reg_rtx (mode);
6435 f0 = gen_reg_rtx (mode);
6437 emit_move_insn (limit,
6438 CONST_DOUBLE_FROM_REAL_VALUE (
6439 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6440 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6442 emit_insn (gen_rtx_SET (VOIDmode,
6444 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6445 emit_jump_insn (gen_jump (donelab));
6448 emit_label (neglab);
6450 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6451 emit_insn (gen_rtx_SET (VOIDmode,
6453 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6454 emit_insn (gen_movdi (i1, const1_rtx));
6455 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6456 emit_insn (gen_xordi3 (out, i0, i1));
6458 emit_label (donelab);
6461 /* Return the string to output a conditional branch to LABEL, testing
6462 register REG. LABEL is the operand number of the label; REG is the
6463 operand number of the reg. OP is the conditional expression. The mode
6464 of REG says what kind of comparison we made.
6466 DEST is the destination insn (i.e. the label), INSN is the source.
6468 REVERSED is nonzero if we should reverse the sense of the comparison.
6470 ANNUL is nonzero if we should generate an annulling branch. */
6473 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6474 int annul, rtx insn)
6476 static char string[64];
6477 enum rtx_code code = GET_CODE (op);
6478 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6483 /* branch on register are limited to +-128KB. If it is too far away,
6496 brgez,a,pn %o1, .LC29
6502 ba,pt %xcc, .LC29 */
6504 far = get_attr_length (insn) >= 3;
6506 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6508 code = reverse_condition (code);
6510 /* Only 64 bit versions of these instructions exist. */
6511 gcc_assert (mode == DImode);
6513 /* Start by writing the branch condition. */
6518 strcpy (string, "brnz");
6522 strcpy (string, "brz");
6526 strcpy (string, "brgez");
6530 strcpy (string, "brlz");
6534 strcpy (string, "brlez");
6538 strcpy (string, "brgz");
6545 p = strchr (string, '\0');
6547 /* Now add the annulling, reg, label, and nop. */
6554 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6557 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6562 *p = p < string + 8 ? '\t' : ' ';
6570 int veryfar = 1, delta;
6572 if (INSN_ADDRESSES_SET_P ())
6574 delta = (INSN_ADDRESSES (INSN_UID (dest))
6575 - INSN_ADDRESSES (INSN_UID (insn)));
6576 /* Leave some instructions for "slop". */
6577 if (delta >= -260000 && delta < 260000)
6581 strcpy (p, ".+12\n\t nop\n\t");
6582 /* Skip the next insn if requested or
6583 if we know that it will be a nop. */
6584 if (annul || ! final_sequence)
6594 strcpy (p, "ba,pt\t%%xcc, ");
6608 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6609 Such instructions cannot be used in the delay slot of return insn on v9.
6610 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6614 epilogue_renumber (register rtx *where, int test)
6616 register const char *fmt;
6618 register enum rtx_code code;
6623 code = GET_CODE (*where);
6628 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6630 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6631 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6639 /* Do not replace the frame pointer with the stack pointer because
6640 it can cause the delayed instruction to load below the stack.
6641 This occurs when instructions like:
6643 (set (reg/i:SI 24 %i0)
6644 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6645 (const_int -20 [0xffffffec])) 0))
6647 are in the return delayed slot. */
6649 if (GET_CODE (XEXP (*where, 0)) == REG
6650 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6651 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6652 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6657 if (SPARC_STACK_BIAS
6658 && GET_CODE (XEXP (*where, 0)) == REG
6659 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6667 fmt = GET_RTX_FORMAT (code);
6669 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6674 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6675 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6678 else if (fmt[i] == 'e'
6679 && epilogue_renumber (&(XEXP (*where, i)), test))
6685 /* Leaf functions and non-leaf functions have different needs. */
6688 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6691 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6693 static const int *const reg_alloc_orders[] = {
6694 reg_leaf_alloc_order,
6695 reg_nonleaf_alloc_order};
6698 order_regs_for_local_alloc (void)
6700 static int last_order_nonleaf = 1;
6702 if (df_regs_ever_live_p (15) != last_order_nonleaf)
6704 last_order_nonleaf = !last_order_nonleaf;
6705 memcpy ((char *) reg_alloc_order,
6706 (const char *) reg_alloc_orders[last_order_nonleaf],
6707 FIRST_PSEUDO_REGISTER * sizeof (int));
6711 /* Return 1 if REG and MEM are legitimate enough to allow the various
6712 mem<-->reg splits to be run. */
6715 sparc_splitdi_legitimate (rtx reg, rtx mem)
6717 /* Punt if we are here by mistake. */
6718 gcc_assert (reload_completed);
6720 /* We must have an offsettable memory reference. */
6721 if (! offsettable_memref_p (mem))
6724 /* If we have legitimate args for ldd/std, we do not want
6725 the split to happen. */
6726 if ((REGNO (reg) % 2) == 0
6727 && mem_min_alignment (mem, 8))
6734 /* Return 1 if x and y are some kind of REG and they refer to
6735 different hard registers. This test is guaranteed to be
6736 run after reload. */
6739 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6741 if (GET_CODE (x) != REG)
6743 if (GET_CODE (y) != REG)
6745 if (REGNO (x) == REGNO (y))
6750 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6751 This makes them candidates for using ldd and std insns.
6753 Note reg1 and reg2 *must* be hard registers. */
6756 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6758 /* We might have been passed a SUBREG. */
6759 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6762 if (REGNO (reg1) % 2 != 0)
6765 /* Integer ldd is deprecated in SPARC V9 */
6766 if (TARGET_V9 && REGNO (reg1) < 32)
6769 return (REGNO (reg1) == REGNO (reg2) - 1);
6772 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6775 This can only happen when addr1 and addr2, the addresses in mem1
6776 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6777 addr1 must also be aligned on a 64-bit boundary.
6779 Also iff dependent_reg_rtx is not null it should not be used to
6780 compute the address for mem1, i.e. we cannot optimize a sequence
6792 But, note that the transformation from:
6797 is perfectly fine. Thus, the peephole2 patterns always pass us
6798 the destination register of the first load, never the second one.
6800 For stores we don't have a similar problem, so dependent_reg_rtx is
6804 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6808 HOST_WIDE_INT offset1;
6810 /* The mems cannot be volatile. */
6811 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6814 /* MEM1 should be aligned on a 64-bit boundary. */
6815 if (MEM_ALIGN (mem1) < 64)
6818 addr1 = XEXP (mem1, 0);
6819 addr2 = XEXP (mem2, 0);
6821 /* Extract a register number and offset (if used) from the first addr. */
6822 if (GET_CODE (addr1) == PLUS)
6824 /* If not a REG, return zero. */
6825 if (GET_CODE (XEXP (addr1, 0)) != REG)
6829 reg1 = REGNO (XEXP (addr1, 0));
6830 /* The offset must be constant! */
6831 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6833 offset1 = INTVAL (XEXP (addr1, 1));
6836 else if (GET_CODE (addr1) != REG)
6840 reg1 = REGNO (addr1);
6841 /* This was a simple (mem (reg)) expression. Offset is 0. */
6845 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6846 if (GET_CODE (addr2) != PLUS)
6849 if (GET_CODE (XEXP (addr2, 0)) != REG
6850 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6853 if (reg1 != REGNO (XEXP (addr2, 0)))
6856 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6859 /* The first offset must be evenly divisible by 8 to ensure the
6860 address is 64 bit aligned. */
6861 if (offset1 % 8 != 0)
6864 /* The offset for the second addr must be 4 more than the first addr. */
6865 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6868 /* All the tests passed. addr1 and addr2 are valid for ldd and std
6873 /* Return 1 if reg is a pseudo, or is the first register in
6874 a hard register pair. This makes it suitable for use in
6875 ldd and std insns. */
6878 register_ok_for_ldd (rtx reg)
6880 /* We might have been passed a SUBREG. */
6884 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6885 return (REGNO (reg) % 2 == 0);
6890 /* Return 1 if OP is a memory whose address is known to be
6891 aligned to 8-byte boundary, or a pseudo during reload.
6892 This makes it suitable for use in ldd and std insns. */
6895 memory_ok_for_ldd (rtx op)
6899 /* In 64-bit mode, we assume that the address is word-aligned. */
6900 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
6903 if ((reload_in_progress || reload_completed)
6904 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
6907 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
6909 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
6918 /* Print operand X (an rtx) in assembler syntax to file FILE.
6919 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6920 For `%' followed by punctuation, CODE is the punctuation and X is null. */
6923 print_operand (FILE *file, rtx x, int code)
6928 /* Output an insn in a delay slot. */
6930 sparc_indent_opcode = 1;
6932 fputs ("\n\t nop", file);
6935 /* Output an annul flag if there's nothing for the delay slot and we
6936 are optimizing. This is always used with '(' below.
6937 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6938 this is a dbx bug. So, we only do this when optimizing.
6939 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6940 Always emit a nop in case the next instruction is a branch. */
6941 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6945 /* Output a 'nop' if there's nothing for the delay slot and we are
6946 not optimizing. This is always used with '*' above. */
6947 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6948 fputs ("\n\t nop", file);
6949 else if (final_sequence)
6950 sparc_indent_opcode = 1;
6953 /* Output the right displacement from the saved PC on function return.
6954 The caller may have placed an "unimp" insn immediately after the call
6955 so we have to account for it. This insn is used in the 32-bit ABI
6956 when calling a function that returns a non zero-sized structure. The
6957 64-bit ABI doesn't have it. Be careful to have this test be the same
6958 as that for the call. The exception is when sparc_std_struct_return
6959 is enabled, the psABI is followed exactly and the adjustment is made
6960 by the code in sparc_struct_value_rtx. The call emitted is the same
6961 when sparc_std_struct_return is enabled. */
6963 && cfun->returns_struct
6964 && !sparc_std_struct_return
6965 && DECL_SIZE (DECL_RESULT (current_function_decl))
6966 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6968 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6974 /* Output the Embedded Medium/Anywhere code model base register. */
6975 fputs (EMBMEDANY_BASE_REG, file);
6978 /* Print some local dynamic TLS name. */
6979 assemble_name (file, get_some_local_dynamic_name ());
6983 /* Adjust the operand to take into account a RESTORE operation. */
6984 if (GET_CODE (x) == CONST_INT)
6986 else if (GET_CODE (x) != REG)
6987 output_operand_lossage ("invalid %%Y operand");
6988 else if (REGNO (x) < 8)
6989 fputs (reg_names[REGNO (x)], file);
6990 else if (REGNO (x) >= 24 && REGNO (x) < 32)
6991 fputs (reg_names[REGNO (x)-16], file);
6993 output_operand_lossage ("invalid %%Y operand");
6996 /* Print out the low order register name of a register pair. */
6997 if (WORDS_BIG_ENDIAN)
6998 fputs (reg_names[REGNO (x)+1], file);
7000 fputs (reg_names[REGNO (x)], file);
7003 /* Print out the high order register name of a register pair. */
7004 if (WORDS_BIG_ENDIAN)
7005 fputs (reg_names[REGNO (x)], file);
7007 fputs (reg_names[REGNO (x)+1], file);
7010 /* Print out the second register name of a register pair or quad.
7011 I.e., R (%o0) => %o1. */
7012 fputs (reg_names[REGNO (x)+1], file);
7015 /* Print out the third register name of a register quad.
7016 I.e., S (%o0) => %o2. */
7017 fputs (reg_names[REGNO (x)+2], file);
7020 /* Print out the fourth register name of a register quad.
7021 I.e., T (%o0) => %o3. */
7022 fputs (reg_names[REGNO (x)+3], file);
7025 /* Print a condition code register. */
7026 if (REGNO (x) == SPARC_ICC_REG)
7028 /* We don't handle CC[X]_NOOVmode because they're not supposed
7030 if (GET_MODE (x) == CCmode)
7031 fputs ("%icc", file);
7032 else if (GET_MODE (x) == CCXmode)
7033 fputs ("%xcc", file);
7038 /* %fccN register */
7039 fputs (reg_names[REGNO (x)], file);
7042 /* Print the operand's address only. */
7043 output_address (XEXP (x, 0));
7046 /* In this case we need a register. Use %g0 if the
7047 operand is const0_rtx. */
7049 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
7051 fputs ("%g0", file);
7058 switch (GET_CODE (x))
7060 case IOR: fputs ("or", file); break;
7061 case AND: fputs ("and", file); break;
7062 case XOR: fputs ("xor", file); break;
7063 default: output_operand_lossage ("invalid %%A operand");
7068 switch (GET_CODE (x))
7070 case IOR: fputs ("orn", file); break;
7071 case AND: fputs ("andn", file); break;
7072 case XOR: fputs ("xnor", file); break;
7073 default: output_operand_lossage ("invalid %%B operand");
7077 /* These are used by the conditional move instructions. */
7081 enum rtx_code rc = GET_CODE (x);
7085 enum machine_mode mode = GET_MODE (XEXP (x, 0));
7086 if (mode == CCFPmode || mode == CCFPEmode)
7087 rc = reverse_condition_maybe_unordered (GET_CODE (x));
7089 rc = reverse_condition (GET_CODE (x));
7093 case NE: fputs ("ne", file); break;
7094 case EQ: fputs ("e", file); break;
7095 case GE: fputs ("ge", file); break;
7096 case GT: fputs ("g", file); break;
7097 case LE: fputs ("le", file); break;
7098 case LT: fputs ("l", file); break;
7099 case GEU: fputs ("geu", file); break;
7100 case GTU: fputs ("gu", file); break;
7101 case LEU: fputs ("leu", file); break;
7102 case LTU: fputs ("lu", file); break;
7103 case LTGT: fputs ("lg", file); break;
7104 case UNORDERED: fputs ("u", file); break;
7105 case ORDERED: fputs ("o", file); break;
7106 case UNLT: fputs ("ul", file); break;
7107 case UNLE: fputs ("ule", file); break;
7108 case UNGT: fputs ("ug", file); break;
7109 case UNGE: fputs ("uge", file); break;
7110 case UNEQ: fputs ("ue", file); break;
7111 default: output_operand_lossage (code == 'c'
7112 ? "invalid %%c operand"
7113 : "invalid %%C operand");
7118 /* These are used by the movr instruction pattern. */
7122 enum rtx_code rc = (code == 'd'
7123 ? reverse_condition (GET_CODE (x))
7127 case NE: fputs ("ne", file); break;
7128 case EQ: fputs ("e", file); break;
7129 case GE: fputs ("gez", file); break;
7130 case LT: fputs ("lz", file); break;
7131 case LE: fputs ("lez", file); break;
7132 case GT: fputs ("gz", file); break;
7133 default: output_operand_lossage (code == 'd'
7134 ? "invalid %%d operand"
7135 : "invalid %%D operand");
7142 /* Print a sign-extended character. */
7143 int i = trunc_int_for_mode (INTVAL (x), QImode);
7144 fprintf (file, "%d", i);
7149 /* Operand must be a MEM; write its address. */
7150 if (GET_CODE (x) != MEM)
7151 output_operand_lossage ("invalid %%f operand");
7152 output_address (XEXP (x, 0));
7157 /* Print a sign-extended 32-bit value. */
7159 if (GET_CODE(x) == CONST_INT)
7161 else if (GET_CODE(x) == CONST_DOUBLE)
7162 i = CONST_DOUBLE_LOW (x);
7165 output_operand_lossage ("invalid %%s operand");
7168 i = trunc_int_for_mode (i, SImode);
7169 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7174 /* Do nothing special. */
7178 /* Undocumented flag. */
7179 output_operand_lossage ("invalid operand output code");
7182 if (GET_CODE (x) == REG)
7183 fputs (reg_names[REGNO (x)], file);
7184 else if (GET_CODE (x) == MEM)
7187 /* Poor Sun assembler doesn't understand absolute addressing. */
7188 if (CONSTANT_P (XEXP (x, 0)))
7189 fputs ("%g0+", file);
7190 output_address (XEXP (x, 0));
7193 else if (GET_CODE (x) == HIGH)
7195 fputs ("%hi(", file);
7196 output_addr_const (file, XEXP (x, 0));
7199 else if (GET_CODE (x) == LO_SUM)
7201 print_operand (file, XEXP (x, 0), 0);
7202 if (TARGET_CM_MEDMID)
7203 fputs ("+%l44(", file);
7205 fputs ("+%lo(", file);
7206 output_addr_const (file, XEXP (x, 1));
7209 else if (GET_CODE (x) == CONST_DOUBLE
7210 && (GET_MODE (x) == VOIDmode
7211 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7213 if (CONST_DOUBLE_HIGH (x) == 0)
7214 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7215 else if (CONST_DOUBLE_HIGH (x) == -1
7216 && CONST_DOUBLE_LOW (x) < 0)
7217 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7219 output_operand_lossage ("long long constant not a valid immediate operand");
7221 else if (GET_CODE (x) == CONST_DOUBLE)
7222 output_operand_lossage ("floating point constant not a valid immediate operand");
7223 else { output_addr_const (file, x); }
7226 /* Target hook for assembling integer objects. The sparc version has
7227 special handling for aligned DI-mode objects. */
7230 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7232 /* ??? We only output .xword's for symbols and only then in environments
7233 where the assembler can handle them. */
7234 if (aligned_p && size == 8
7235 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7239 assemble_integer_with_op ("\t.xword\t", x);
7244 assemble_aligned_integer (4, const0_rtx);
7245 assemble_aligned_integer (4, x);
7249 return default_assemble_integer (x, size, aligned_p);
7252 /* Return the value of a code used in the .proc pseudo-op that says
7253 what kind of result this function returns. For non-C types, we pick
7254 the closest C type. */
7256 #ifndef SHORT_TYPE_SIZE
7257 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7260 #ifndef INT_TYPE_SIZE
7261 #define INT_TYPE_SIZE BITS_PER_WORD
7264 #ifndef LONG_TYPE_SIZE
7265 #define LONG_TYPE_SIZE BITS_PER_WORD
7268 #ifndef LONG_LONG_TYPE_SIZE
7269 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7272 #ifndef FLOAT_TYPE_SIZE
7273 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7276 #ifndef DOUBLE_TYPE_SIZE
7277 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7280 #ifndef LONG_DOUBLE_TYPE_SIZE
7281 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7285 sparc_type_code (register tree type)
7287 register unsigned long qualifiers = 0;
7288 register unsigned shift;
7290 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7291 setting more, since some assemblers will give an error for this. Also,
7292 we must be careful to avoid shifts of 32 bits or more to avoid getting
7293 unpredictable results. */
7295 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7297 switch (TREE_CODE (type))
7303 qualifiers |= (3 << shift);
7308 qualifiers |= (2 << shift);
7312 case REFERENCE_TYPE:
7314 qualifiers |= (1 << shift);
7318 return (qualifiers | 8);
7321 case QUAL_UNION_TYPE:
7322 return (qualifiers | 9);
7325 return (qualifiers | 10);
7328 return (qualifiers | 16);
7331 /* If this is a range type, consider it to be the underlying
7333 if (TREE_TYPE (type) != 0)
7336 /* Carefully distinguish all the standard types of C,
7337 without messing up if the language is not C. We do this by
7338 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7339 look at both the names and the above fields, but that's redundant.
7340 Any type whose size is between two C types will be considered
7341 to be the wider of the two types. Also, we do not have a
7342 special code to use for "long long", so anything wider than
7343 long is treated the same. Note that we can't distinguish
7344 between "int" and "long" in this code if they are the same
7345 size, but that's fine, since neither can the assembler. */
7347 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7348 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7350 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7351 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7353 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7354 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7357 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7360 /* If this is a range type, consider it to be the underlying
7362 if (TREE_TYPE (type) != 0)
7365 /* Carefully distinguish all the standard types of C,
7366 without messing up if the language is not C. */
7368 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7369 return (qualifiers | 6);
7372 return (qualifiers | 7);
7374 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7375 /* ??? We need to distinguish between double and float complex types,
7376 but I don't know how yet because I can't reach this code from
7377 existing front-ends. */
7378 return (qualifiers | 7); /* Who knows? */
7381 case BOOLEAN_TYPE: /* Boolean truth value type. */
7382 case LANG_TYPE: /* ? */
7386 gcc_unreachable (); /* Not a type! */
7393 /* Nested function support. */
7395 /* Emit RTL insns to initialize the variable parts of a trampoline.
7396 FNADDR is an RTX for the address of the function's pure code.
7397 CXT is an RTX for the static chain value for the function.
7399 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7400 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7401 (to store insns). This is a bit excessive. Perhaps a different
7402 mechanism would be better here.
7404 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7407 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
7409 /* SPARC 32-bit trampoline:
7412 sethi %hi(static), %g2
7414 or %g2, %lo(static), %g2
7416 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7417 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7421 (adjust_address (m_tramp, SImode, 0),
7422 expand_binop (SImode, ior_optab,
7423 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7424 size_int (10), 0, 1),
7425 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7426 NULL_RTX, 1, OPTAB_DIRECT));
7429 (adjust_address (m_tramp, SImode, 4),
7430 expand_binop (SImode, ior_optab,
7431 expand_shift (RSHIFT_EXPR, SImode, cxt,
7432 size_int (10), 0, 1),
7433 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7434 NULL_RTX, 1, OPTAB_DIRECT));
7437 (adjust_address (m_tramp, SImode, 8),
7438 expand_binop (SImode, ior_optab,
7439 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7440 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7441 NULL_RTX, 1, OPTAB_DIRECT));
7444 (adjust_address (m_tramp, SImode, 12),
7445 expand_binop (SImode, ior_optab,
7446 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7447 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7448 NULL_RTX, 1, OPTAB_DIRECT));
7450 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7451 aligned on a 16 byte boundary so one flush clears it all. */
7452 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
7453 if (sparc_cpu != PROCESSOR_ULTRASPARC
7454 && sparc_cpu != PROCESSOR_ULTRASPARC3
7455 && sparc_cpu != PROCESSOR_NIAGARA
7456 && sparc_cpu != PROCESSOR_NIAGARA2)
7457 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
7459 /* Call __enable_execute_stack after writing onto the stack to make sure
7460 the stack address is accessible. */
7461 #ifdef ENABLE_EXECUTE_STACK
7462 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7463 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
7468 /* The 64-bit version is simpler because it makes more sense to load the
7469 values as "immediate" data out of the trampoline. It's also easier since
7470 we can read the PC without clobbering a register. */
7473 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
7475 /* SPARC 64-bit trampoline:
7484 emit_move_insn (adjust_address (m_tramp, SImode, 0),
7485 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7486 emit_move_insn (adjust_address (m_tramp, SImode, 4),
7487 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7488 emit_move_insn (adjust_address (m_tramp, SImode, 8),
7489 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7490 emit_move_insn (adjust_address (m_tramp, SImode, 12),
7491 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7492 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
7493 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
7494 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
7496 if (sparc_cpu != PROCESSOR_ULTRASPARC
7497 && sparc_cpu != PROCESSOR_ULTRASPARC3
7498 && sparc_cpu != PROCESSOR_NIAGARA
7499 && sparc_cpu != PROCESSOR_NIAGARA2)
7500 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
7502 /* Call __enable_execute_stack after writing onto the stack to make sure
7503 the stack address is accessible. */
7504 #ifdef ENABLE_EXECUTE_STACK
7505 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7506 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
7510 /* Worker for TARGET_TRAMPOLINE_INIT. */
7513 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
7515 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
7516 cxt = force_reg (Pmode, cxt);
7518 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
7520 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
7523 /* Adjust the cost of a scheduling dependency. Return the new cost of
7524 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7527 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7529 enum attr_type insn_type;
7531 if (! recog_memoized (insn))
7534 insn_type = get_attr_type (insn);
7536 if (REG_NOTE_KIND (link) == 0)
7538 /* Data dependency; DEP_INSN writes a register that INSN reads some
7541 /* if a load, then the dependence must be on the memory address;
7542 add an extra "cycle". Note that the cost could be two cycles
7543 if the reg was written late in an instruction group; we ca not tell
7545 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7548 /* Get the delay only if the address of the store is the dependence. */
7549 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7551 rtx pat = PATTERN(insn);
7552 rtx dep_pat = PATTERN (dep_insn);
7554 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7555 return cost; /* This should not happen! */
7557 /* The dependency between the two instructions was on the data that
7558 is being stored. Assume that this implies that the address of the
7559 store is not dependent. */
7560 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7563 return cost + 3; /* An approximation. */
7566 /* A shift instruction cannot receive its data from an instruction
7567 in the same cycle; add a one cycle penalty. */
7568 if (insn_type == TYPE_SHIFT)
7569 return cost + 3; /* Split before cascade into shift. */
7573 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7574 INSN writes some cycles later. */
7576 /* These are only significant for the fpu unit; writing a fp reg before
7577 the fpu has finished with it stalls the processor. */
7579 /* Reusing an integer register causes no problems. */
7580 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7588 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7590 enum attr_type insn_type, dep_type;
7591 rtx pat = PATTERN(insn);
7592 rtx dep_pat = PATTERN (dep_insn);
7594 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7597 insn_type = get_attr_type (insn);
7598 dep_type = get_attr_type (dep_insn);
7600 switch (REG_NOTE_KIND (link))
7603 /* Data dependency; DEP_INSN writes a register that INSN reads some
7610 /* Get the delay iff the address of the store is the dependence. */
7611 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7614 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7621 /* If a load, then the dependence must be on the memory address. If
7622 the addresses aren't equal, then it might be a false dependency */
7623 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7625 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7626 || GET_CODE (SET_DEST (dep_pat)) != MEM
7627 || GET_CODE (SET_SRC (pat)) != MEM
7628 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7629 XEXP (SET_SRC (pat), 0)))
7637 /* Compare to branch latency is 0. There is no benefit from
7638 separating compare and branch. */
7639 if (dep_type == TYPE_COMPARE)
7641 /* Floating point compare to branch latency is less than
7642 compare to conditional move. */
7643 if (dep_type == TYPE_FPCMP)
7652 /* Anti-dependencies only penalize the fpu unit. */
7653 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7665 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7669 case PROCESSOR_SUPERSPARC:
7670 cost = supersparc_adjust_cost (insn, link, dep, cost);
7672 case PROCESSOR_HYPERSPARC:
7673 case PROCESSOR_SPARCLITE86X:
7674 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7683 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7684 int sched_verbose ATTRIBUTE_UNUSED,
7685 int max_ready ATTRIBUTE_UNUSED)
7690 sparc_use_sched_lookahead (void)
7692 if (sparc_cpu == PROCESSOR_NIAGARA
7693 || sparc_cpu == PROCESSOR_NIAGARA2)
7695 if (sparc_cpu == PROCESSOR_ULTRASPARC
7696 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7698 if ((1 << sparc_cpu) &
7699 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7700 (1 << PROCESSOR_SPARCLITE86X)))
7706 sparc_issue_rate (void)
7710 case PROCESSOR_NIAGARA:
7711 case PROCESSOR_NIAGARA2:
7715 /* Assume V9 processors are capable of at least dual-issue. */
7717 case PROCESSOR_SUPERSPARC:
7719 case PROCESSOR_HYPERSPARC:
7720 case PROCESSOR_SPARCLITE86X:
7722 case PROCESSOR_ULTRASPARC:
7723 case PROCESSOR_ULTRASPARC3:
7729 set_extends (rtx insn)
7731 register rtx pat = PATTERN (insn);
7733 switch (GET_CODE (SET_SRC (pat)))
7735 /* Load and some shift instructions zero extend. */
7738 /* sethi clears the high bits */
7740 /* LO_SUM is used with sethi. sethi cleared the high
7741 bits and the values used with lo_sum are positive */
7743 /* Store flag stores 0 or 1 */
7753 rtx op0 = XEXP (SET_SRC (pat), 0);
7754 rtx op1 = XEXP (SET_SRC (pat), 1);
7755 if (GET_CODE (op1) == CONST_INT)
7756 return INTVAL (op1) >= 0;
7757 if (GET_CODE (op0) != REG)
7759 if (sparc_check_64 (op0, insn) == 1)
7761 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7766 rtx op0 = XEXP (SET_SRC (pat), 0);
7767 rtx op1 = XEXP (SET_SRC (pat), 1);
7768 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7770 if (GET_CODE (op1) == CONST_INT)
7771 return INTVAL (op1) >= 0;
7772 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7775 return GET_MODE (SET_SRC (pat)) == SImode;
7776 /* Positive integers leave the high bits zero. */
7778 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7780 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7783 return - (GET_MODE (SET_SRC (pat)) == SImode);
7785 return sparc_check_64 (SET_SRC (pat), insn);
7791 /* We _ought_ to have only one kind per function, but... */
7792 static GTY(()) rtx sparc_addr_diff_list;
7793 static GTY(()) rtx sparc_addr_list;
7796 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7798 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7800 sparc_addr_diff_list
7801 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7803 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7807 sparc_output_addr_vec (rtx vec)
7809 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7810 int idx, vlen = XVECLEN (body, 0);
7812 #ifdef ASM_OUTPUT_ADDR_VEC_START
7813 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7816 #ifdef ASM_OUTPUT_CASE_LABEL
7817 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7820 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7823 for (idx = 0; idx < vlen; idx++)
7825 ASM_OUTPUT_ADDR_VEC_ELT
7826 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7829 #ifdef ASM_OUTPUT_ADDR_VEC_END
7830 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7835 sparc_output_addr_diff_vec (rtx vec)
7837 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7838 rtx base = XEXP (XEXP (body, 0), 0);
7839 int idx, vlen = XVECLEN (body, 1);
7841 #ifdef ASM_OUTPUT_ADDR_VEC_START
7842 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7845 #ifdef ASM_OUTPUT_CASE_LABEL
7846 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7849 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7852 for (idx = 0; idx < vlen; idx++)
7854 ASM_OUTPUT_ADDR_DIFF_ELT
7857 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7858 CODE_LABEL_NUMBER (base));
7861 #ifdef ASM_OUTPUT_ADDR_VEC_END
7862 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7867 sparc_output_deferred_case_vectors (void)
7872 if (sparc_addr_list == NULL_RTX
7873 && sparc_addr_diff_list == NULL_RTX)
7876 /* Align to cache line in the function's code section. */
7877 switch_to_section (current_function_section ());
7879 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7881 ASM_OUTPUT_ALIGN (asm_out_file, align);
7883 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7884 sparc_output_addr_vec (XEXP (t, 0));
7885 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7886 sparc_output_addr_diff_vec (XEXP (t, 0));
7888 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7891 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7892 unknown. Return 1 if the high bits are zero, -1 if the register is
7895 sparc_check_64 (rtx x, rtx insn)
7897 /* If a register is set only once it is safe to ignore insns this
7898 code does not know how to handle. The loop will either recognize
7899 the single set and return the correct value or fail to recognize
7904 gcc_assert (GET_CODE (x) == REG);
7906 if (GET_MODE (x) == DImode)
7907 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7909 if (flag_expensive_optimizations
7910 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
7916 insn = get_last_insn_anywhere ();
7921 while ((insn = PREV_INSN (insn)))
7923 switch (GET_CODE (insn))
7936 rtx pat = PATTERN (insn);
7937 if (GET_CODE (pat) != SET)
7939 if (rtx_equal_p (x, SET_DEST (pat)))
7940 return set_extends (insn);
7941 if (y && rtx_equal_p (y, SET_DEST (pat)))
7942 return set_extends (insn);
7943 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7951 /* Returns assembly code to perform a DImode shift using
7952 a 64-bit global or out register on SPARC-V8+. */
7954 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7956 static char asm_code[60];
7958 /* The scratch register is only required when the destination
7959 register is not a 64-bit global or out register. */
7960 if (which_alternative != 2)
7961 operands[3] = operands[0];
7963 /* We can only shift by constants <= 63. */
7964 if (GET_CODE (operands[2]) == CONST_INT)
7965 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7967 if (GET_CODE (operands[1]) == CONST_INT)
7969 output_asm_insn ("mov\t%1, %3", operands);
7973 output_asm_insn ("sllx\t%H1, 32, %3", operands);
7974 if (sparc_check_64 (operands[1], insn) <= 0)
7975 output_asm_insn ("srl\t%L1, 0, %L1", operands);
7976 output_asm_insn ("or\t%L1, %3, %3", operands);
7979 strcpy(asm_code, opcode);
7981 if (which_alternative != 2)
7982 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7984 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7987 /* Output rtl to increment the profiler label LABELNO
7988 for profiling a function entry. */
7991 sparc_profile_hook (int labelno)
7996 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7997 if (NO_PROFILE_COUNTERS)
7999 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
8003 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8004 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
8005 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
8009 #ifdef OBJECT_FORMAT_ELF
8011 sparc_elf_asm_named_section (const char *name, unsigned int flags,
8014 if (flags & SECTION_MERGE)
8016 /* entsize cannot be expressed in this section attributes
8018 default_elf_asm_named_section (name, flags, decl);
8022 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
8024 if (!(flags & SECTION_DEBUG))
8025 fputs (",#alloc", asm_out_file);
8026 if (flags & SECTION_WRITE)
8027 fputs (",#write", asm_out_file);
8028 if (flags & SECTION_TLS)
8029 fputs (",#tls", asm_out_file);
8030 if (flags & SECTION_CODE)
8031 fputs (",#execinstr", asm_out_file);
8033 /* ??? Handle SECTION_BSS. */
8035 fputc ('\n', asm_out_file);
8037 #endif /* OBJECT_FORMAT_ELF */
8039 /* We do not allow indirect calls to be optimized into sibling calls.
8041 We cannot use sibling calls when delayed branches are disabled
8042 because they will likely require the call delay slot to be filled.
8044 Also, on SPARC 32-bit we cannot emit a sibling call when the
8045 current function returns a structure. This is because the "unimp
8046 after call" convention would cause the callee to return to the
8047 wrong place. The generic code already disallows cases where the
8048 function being called returns a structure.
8050 It may seem strange how this last case could occur. Usually there
8051 is code after the call which jumps to epilogue code which dumps the
8052 return value into the struct return area. That ought to invalidate
8053 the sibling call right? Well, in the C++ case we can end up passing
8054 the pointer to the struct return area to a constructor (which returns
8055 void) and then nothing else happens. Such a sibling call would look
8056 valid without the added check here.
8058 VxWorks PIC PLT entries require the global pointer to be initialized
8059 on entry. We therefore can't emit sibling calls to them. */
8061 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8064 && flag_delayed_branch
8065 && (TARGET_ARCH64 || ! cfun->returns_struct)
8066 && !(TARGET_VXWORKS_RTP
8068 && !targetm.binds_local_p (decl)));
8071 /* libfunc renaming. */
8072 #include "config/gofast.h"
8075 sparc_init_libfuncs (void)
8079 /* Use the subroutines that Sun's library provides for integer
8080 multiply and divide. The `*' prevents an underscore from
8081 being prepended by the compiler. .umul is a little faster
8083 set_optab_libfunc (smul_optab, SImode, "*.umul");
8084 set_optab_libfunc (sdiv_optab, SImode, "*.div");
8085 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
8086 set_optab_libfunc (smod_optab, SImode, "*.rem");
8087 set_optab_libfunc (umod_optab, SImode, "*.urem");
8089 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
8090 set_optab_libfunc (add_optab, TFmode, "_Q_add");
8091 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
8092 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
8093 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
8094 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
8096 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
8097 is because with soft-float, the SFmode and DFmode sqrt
8098 instructions will be absent, and the compiler will notice and
8099 try to use the TFmode sqrt instruction for calls to the
8100 builtin function sqrt, but this fails. */
8102 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
8104 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
8105 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
8106 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
8107 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
8108 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
8109 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
8111 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
8112 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
8113 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
8114 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
8116 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
8117 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
8118 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
8119 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
8121 if (DITF_CONVERSION_LIBFUNCS)
8123 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
8124 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
8125 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
8126 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
8129 if (SUN_CONVERSION_LIBFUNCS)
8131 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
8132 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
8133 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
8134 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
8139 /* In the SPARC 64bit ABI, SImode multiply and divide functions
8140 do not exist in the library. Make sure the compiler does not
8141 emit calls to them by accident. (It should always use the
8142 hardware instructions.) */
8143 set_optab_libfunc (smul_optab, SImode, 0);
8144 set_optab_libfunc (sdiv_optab, SImode, 0);
8145 set_optab_libfunc (udiv_optab, SImode, 0);
8146 set_optab_libfunc (smod_optab, SImode, 0);
8147 set_optab_libfunc (umod_optab, SImode, 0);
8149 if (SUN_INTEGER_MULTIPLY_64)
8151 set_optab_libfunc (smul_optab, DImode, "__mul64");
8152 set_optab_libfunc (sdiv_optab, DImode, "__div64");
8153 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
8154 set_optab_libfunc (smod_optab, DImode, "__rem64");
8155 set_optab_libfunc (umod_optab, DImode, "__urem64");
8158 if (SUN_CONVERSION_LIBFUNCS)
8160 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
8161 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
8162 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
8163 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
8167 gofast_maybe_init_libfuncs ();
8170 #define def_builtin(NAME, CODE, TYPE) \
8171 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
8174 /* Implement the TARGET_INIT_BUILTINS target hook.
8175 Create builtin functions for special SPARC instructions. */
8178 sparc_init_builtins (void)
8181 sparc_vis_init_builtins ();
8184 /* Create builtin functions for VIS 1.0 instructions. */
8187 sparc_vis_init_builtins (void)
8189 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
8190 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
8191 tree v4hi = build_vector_type (intHI_type_node, 4);
8192 tree v2hi = build_vector_type (intHI_type_node, 2);
8193 tree v2si = build_vector_type (intSI_type_node, 2);
8195 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
8196 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
8197 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
8198 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
8199 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
8200 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
8201 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
8202 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
8203 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
8204 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
8205 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
8206 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
8207 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
8209 intDI_type_node, 0);
8210 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
8212 intDI_type_node, 0);
8213 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
8215 intSI_type_node, 0);
8216 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
8218 intDI_type_node, 0);
8220 /* Packing and expanding vectors. */
8221 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
8222 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
8223 v8qi_ftype_v2si_v8qi);
8224 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
8226 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
8227 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
8228 v8qi_ftype_v4qi_v4qi);
8230 /* Multiplications. */
8231 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
8232 v4hi_ftype_v4qi_v4hi);
8233 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
8234 v4hi_ftype_v4qi_v2hi);
8235 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
8236 v4hi_ftype_v4qi_v2hi);
8237 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
8238 v4hi_ftype_v8qi_v4hi);
8239 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
8240 v4hi_ftype_v8qi_v4hi);
8241 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
8242 v2si_ftype_v4qi_v2hi);
8243 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
8244 v2si_ftype_v4qi_v2hi);
8246 /* Data aligning. */
8247 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
8248 v4hi_ftype_v4hi_v4hi);
8249 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
8250 v8qi_ftype_v8qi_v8qi);
8251 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
8252 v2si_ftype_v2si_v2si);
8253 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
8256 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
8259 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
8262 /* Pixel distance. */
8263 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
8264 di_ftype_v8qi_v8qi_di);
8267 /* Handle TARGET_EXPAND_BUILTIN target hook.
8268 Expand builtin functions for sparc intrinsics. */
8271 sparc_expand_builtin (tree exp, rtx target,
8272 rtx subtarget ATTRIBUTE_UNUSED,
8273 enum machine_mode tmode ATTRIBUTE_UNUSED,
8274 int ignore ATTRIBUTE_UNUSED)
8277 call_expr_arg_iterator iter;
8278 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8279 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
8281 enum machine_mode mode[4];
8284 mode[0] = insn_data[icode].operand[0].mode;
8286 || GET_MODE (target) != mode[0]
8287 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
8288 op[0] = gen_reg_rtx (mode[0]);
8292 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8295 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8296 op[arg_count] = expand_normal (arg);
8298 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8300 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8306 pat = GEN_FCN (icode) (op[0], op[1]);
8309 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8312 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8327 sparc_vis_mul8x16 (int e8, int e16)
8329 return (e8 * e16 + 128) / 256;
8332 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8333 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8334 constants. A tree list with the results of the multiplications is returned,
8335 and each element in the list is of INNER_TYPE. */
8338 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8340 tree n_elts = NULL_TREE;
8345 case CODE_FOR_fmul8x16_vis:
8346 for (; elts0 && elts1;
8347 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8350 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8351 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8352 n_elts = tree_cons (NULL_TREE,
8353 build_int_cst (inner_type, val),
8358 case CODE_FOR_fmul8x16au_vis:
8359 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8361 for (; elts0; elts0 = TREE_CHAIN (elts0))
8364 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8366 n_elts = tree_cons (NULL_TREE,
8367 build_int_cst (inner_type, val),
8372 case CODE_FOR_fmul8x16al_vis:
8373 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8375 for (; elts0; elts0 = TREE_CHAIN (elts0))
8378 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8380 n_elts = tree_cons (NULL_TREE,
8381 build_int_cst (inner_type, val),
8390 return nreverse (n_elts);
8393 /* Handle TARGET_FOLD_BUILTIN target hook.
8394 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8395 result of the function call is ignored. NULL_TREE is returned if the
8396 function could not be folded. */
8399 sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
8401 tree arg0, arg1, arg2;
8402 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8403 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
8406 && icode != CODE_FOR_alignaddrsi_vis
8407 && icode != CODE_FOR_alignaddrdi_vis)
8408 return fold_convert (rtype, integer_zero_node);
8412 case CODE_FOR_fexpand_vis:
8413 arg0 = TREE_VALUE (arglist);
8416 if (TREE_CODE (arg0) == VECTOR_CST)
8418 tree inner_type = TREE_TYPE (rtype);
8419 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8420 tree n_elts = NULL_TREE;
8422 for (; elts; elts = TREE_CHAIN (elts))
8424 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8425 n_elts = tree_cons (NULL_TREE,
8426 build_int_cst (inner_type, val),
8429 return build_vector (rtype, nreverse (n_elts));
8433 case CODE_FOR_fmul8x16_vis:
8434 case CODE_FOR_fmul8x16au_vis:
8435 case CODE_FOR_fmul8x16al_vis:
8436 arg0 = TREE_VALUE (arglist);
8437 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8441 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8443 tree inner_type = TREE_TYPE (rtype);
8444 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8445 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8446 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
8449 return build_vector (rtype, n_elts);
8453 case CODE_FOR_fpmerge_vis:
8454 arg0 = TREE_VALUE (arglist);
8455 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8459 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8461 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8462 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8463 tree n_elts = NULL_TREE;
8465 for (; elts0 && elts1;
8466 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8468 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8469 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8472 return build_vector (rtype, nreverse (n_elts));
8476 case CODE_FOR_pdist_vis:
8477 arg0 = TREE_VALUE (arglist);
8478 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8479 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8484 if (TREE_CODE (arg0) == VECTOR_CST
8485 && TREE_CODE (arg1) == VECTOR_CST
8486 && TREE_CODE (arg2) == INTEGER_CST)
8489 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8490 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8491 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8492 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8494 for (; elts0 && elts1;
8495 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8497 unsigned HOST_WIDE_INT
8498 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8499 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8500 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8501 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8503 unsigned HOST_WIDE_INT l;
8506 overflow |= neg_double (low1, high1, &l, &h);
8507 overflow |= add_double (low0, high0, l, h, &l, &h);
8509 overflow |= neg_double (l, h, &l, &h);
8511 overflow |= add_double (low, high, l, h, &low, &high);
8514 gcc_assert (overflow == 0);
8516 return build_int_cst_wide (rtype, low, high);
8526 /* ??? This duplicates information provided to the compiler by the
8527 ??? scheduler description. Some day, teach genautomata to output
8528 ??? the latencies and then CSE will just use that. */
8531 sparc_rtx_costs (rtx x, int code, int outer_code, int *total,
8532 bool speed ATTRIBUTE_UNUSED)
8534 enum machine_mode mode = GET_MODE (x);
8535 bool float_mode_p = FLOAT_MODE_P (mode);
8540 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8558 if (GET_MODE (x) == VOIDmode
8559 && ((CONST_DOUBLE_HIGH (x) == 0
8560 && CONST_DOUBLE_LOW (x) < 0x1000)
8561 || (CONST_DOUBLE_HIGH (x) == -1
8562 && CONST_DOUBLE_LOW (x) < 0
8563 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8570 /* If outer-code was a sign or zero extension, a cost
8571 of COSTS_N_INSNS (1) was already added in. This is
8572 why we are subtracting it back out. */
8573 if (outer_code == ZERO_EXTEND)
8575 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8577 else if (outer_code == SIGN_EXTEND)
8579 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8581 else if (float_mode_p)
8583 *total = sparc_costs->float_load;
8587 *total = sparc_costs->int_load;
8595 *total = sparc_costs->float_plusminus;
8597 *total = COSTS_N_INSNS (1);
8602 *total = sparc_costs->float_mul;
8603 else if (! TARGET_HARD_MUL)
8604 *total = COSTS_N_INSNS (25);
8610 if (sparc_costs->int_mul_bit_factor)
8614 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8616 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8617 for (nbits = 0; value != 0; value &= value - 1)
8620 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8621 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8623 rtx x1 = XEXP (x, 1);
8624 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8625 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8627 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8629 for (; value2 != 0; value2 &= value2 - 1)
8637 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8638 bit_cost = COSTS_N_INSNS (bit_cost);
8642 *total = sparc_costs->int_mulX + bit_cost;
8644 *total = sparc_costs->int_mul + bit_cost;
8651 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8661 *total = sparc_costs->float_div_df;
8663 *total = sparc_costs->float_div_sf;
8668 *total = sparc_costs->int_divX;
8670 *total = sparc_costs->int_div;
8677 *total = COSTS_N_INSNS (1);
8684 case UNSIGNED_FLOAT:
8688 case FLOAT_TRUNCATE:
8689 *total = sparc_costs->float_move;
8694 *total = sparc_costs->float_sqrt_df;
8696 *total = sparc_costs->float_sqrt_sf;
8701 *total = sparc_costs->float_cmp;
8703 *total = COSTS_N_INSNS (1);
8708 *total = sparc_costs->float_cmove;
8710 *total = sparc_costs->int_cmove;
8714 /* Handle the NAND vector patterns. */
8715 if (sparc_vector_mode_supported_p (GET_MODE (x))
8716 && GET_CODE (XEXP (x, 0)) == NOT
8717 && GET_CODE (XEXP (x, 1)) == NOT)
8719 *total = COSTS_N_INSNS (1);
8730 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
8731 This is achieved by means of a manual dynamic stack space allocation in
8732 the current frame. We make the assumption that SEQ doesn't contain any
8733 function calls, with the possible exception of calls to the PIC helper. */
8736 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8738 /* We must preserve the lowest 16 words for the register save area. */
8739 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
8740 /* We really need only 2 words of fresh stack space. */
8741 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
8744 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
8745 SPARC_STACK_BIAS + offset));
8747 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
8748 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8750 emit_insn (gen_rtx_SET (VOIDmode,
8751 adjust_address (slot, word_mode, UNITS_PER_WORD),
8755 emit_insn (gen_rtx_SET (VOIDmode,
8757 adjust_address (slot, word_mode, UNITS_PER_WORD)));
8758 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8759 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
8762 /* Output the assembler code for a thunk function. THUNK_DECL is the
8763 declaration for the thunk function itself, FUNCTION is the decl for
8764 the target function. DELTA is an immediate constant offset to be
8765 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8766 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8769 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8770 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8773 rtx this_rtx, insn, funexp;
8774 unsigned int int_arg_first;
8776 reload_completed = 1;
8777 epilogue_completed = 1;
8779 emit_note (NOTE_INSN_PROLOGUE_END);
8781 if (flag_delayed_branch)
8783 /* We will emit a regular sibcall below, so we need to instruct
8784 output_sibcall that we are in a leaf function. */
8785 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8787 /* This will cause final.c to invoke leaf_renumber_regs so we
8788 must behave as if we were in a not-yet-leafified function. */
8789 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8793 /* We will emit the sibcall manually below, so we will need to
8794 manually spill non-leaf registers. */
8795 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8797 /* We really are in a leaf function. */
8798 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8801 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8802 returns a structure, the structure return pointer is there instead. */
8803 if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8804 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
8806 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
8808 /* Add DELTA. When possible use a plain add, otherwise load it into
8809 a register first. */
8812 rtx delta_rtx = GEN_INT (delta);
8814 if (! SPARC_SIMM13_P (delta))
8816 rtx scratch = gen_rtx_REG (Pmode, 1);
8817 emit_move_insn (scratch, delta_rtx);
8818 delta_rtx = scratch;
8821 /* THIS_RTX += DELTA. */
8822 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
8825 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
8828 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8829 rtx scratch = gen_rtx_REG (Pmode, 1);
8831 gcc_assert (vcall_offset < 0);
8833 /* SCRATCH = *THIS_RTX. */
8834 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
8836 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8837 may not have any available scratch register at this point. */
8838 if (SPARC_SIMM13_P (vcall_offset))
8840 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8841 else if (! fixed_regs[5]
8842 /* The below sequence is made up of at least 2 insns,
8843 while the default method may need only one. */
8844 && vcall_offset < -8192)
8846 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8847 emit_move_insn (scratch2, vcall_offset_rtx);
8848 vcall_offset_rtx = scratch2;
8852 rtx increment = GEN_INT (-4096);
8854 /* VCALL_OFFSET is a negative number whose typical range can be
8855 estimated as -32768..0 in 32-bit mode. In almost all cases
8856 it is therefore cheaper to emit multiple add insns than
8857 spilling and loading the constant into a register (at least
8859 while (! SPARC_SIMM13_P (vcall_offset))
8861 emit_insn (gen_add2_insn (scratch, increment));
8862 vcall_offset += 4096;
8864 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8867 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
8868 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8869 gen_rtx_PLUS (Pmode,
8871 vcall_offset_rtx)));
8873 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
8874 emit_insn (gen_add2_insn (this_rtx, scratch));
8877 /* Generate a tail call to the target function. */
8878 if (! TREE_USED (function))
8880 assemble_external (function);
8881 TREE_USED (function) = 1;
8883 funexp = XEXP (DECL_RTL (function), 0);
8885 if (flag_delayed_branch)
8887 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8888 insn = emit_call_insn (gen_sibcall (funexp));
8889 SIBLING_CALL_P (insn) = 1;
8893 /* The hoops we have to jump through in order to generate a sibcall
8894 without using delay slots... */
8895 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
8899 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8900 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
8902 /* Delay emitting the PIC helper function because it needs to
8903 change the section and we are emitting assembly code. */
8904 load_pic_register (true); /* clobbers %o7 */
8905 scratch = legitimize_pic_address (funexp, Pmode, scratch);
8908 emit_and_preserve (seq, spill_reg, spill_reg2);
8910 else if (TARGET_ARCH32)
8912 emit_insn (gen_rtx_SET (VOIDmode,
8914 gen_rtx_HIGH (SImode, funexp)));
8915 emit_insn (gen_rtx_SET (VOIDmode,
8917 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8919 else /* TARGET_ARCH64 */
8921 switch (sparc_cmodel)
8925 /* The destination can serve as a temporary. */
8926 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8931 /* The destination cannot serve as a temporary. */
8932 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8934 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8937 emit_and_preserve (seq, spill_reg, 0);
8945 emit_jump_insn (gen_indirect_jump (scratch));
8950 /* Run just enough of rest_of_compilation to get the insns emitted.
8951 There's not really enough bulk here to make other passes such as
8952 instruction scheduling worth while. Note that use_thunk calls
8953 assemble_start_function and assemble_end_function. */
8954 insn = get_insns ();
8955 insn_locators_alloc ();
8956 shorten_branches (insn);
8957 final_start_function (insn, file, 1);
8958 final (insn, file, 1);
8959 final_end_function ();
8961 reload_completed = 0;
8962 epilogue_completed = 0;
8965 /* Return true if sparc_output_mi_thunk would be able to output the
8966 assembler code for the thunk function specified by the arguments
8967 it is passed, and false otherwise. */
8969 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
8970 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8971 HOST_WIDE_INT vcall_offset,
8972 const_tree function ATTRIBUTE_UNUSED)
8974 /* Bound the loop used in the default method above. */
8975 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8978 /* How to allocate a 'struct machine_function'. */
8980 static struct machine_function *
8981 sparc_init_machine_status (void)
8983 return GGC_CNEW (struct machine_function);
8986 /* Locate some local-dynamic symbol still in use by this function
8987 so that we can print its name in local-dynamic base patterns. */
8990 get_some_local_dynamic_name (void)
8994 if (cfun->machine->some_ld_name)
8995 return cfun->machine->some_ld_name;
8997 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8999 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
9000 return cfun->machine->some_ld_name;
9006 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
9011 && GET_CODE (x) == SYMBOL_REF
9012 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
9014 cfun->machine->some_ld_name = XSTR (x, 0);
9021 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
9022 This is called from dwarf2out.c to emit call frame instructions
9023 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
9025 sparc_dwarf_handle_frame_unspec (const char *label,
9026 rtx pattern ATTRIBUTE_UNUSED,
9027 int index ATTRIBUTE_UNUSED)
9029 gcc_assert (index == UNSPECV_SAVEW);
9030 dwarf2out_window_save (label);
9033 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
9034 We need to emit DTP-relative relocations. */
9037 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
9042 fputs ("\t.word\t%r_tls_dtpoff32(", file);
9045 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
9050 output_addr_const (file, x);
9054 /* Do whatever processing is required at the end of a file. */
9057 sparc_file_end (void)
9059 /* If we haven't emitted the special PIC helper function, do so now. */
9060 if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
9063 if (NEED_INDICATE_EXEC_STACK)
9064 file_end_indicate_exec_stack ();
9067 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9068 /* Implement TARGET_MANGLE_TYPE. */
9071 sparc_mangle_type (const_tree type)
9074 && TYPE_MAIN_VARIANT (type) == long_double_type_node
9075 && TARGET_LONG_DOUBLE_128)
9078 /* For all other types, use normal C++ mangling. */
9083 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
9084 compare and swap on the word containing the byte or half-word. */
9087 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
9089 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
9090 rtx addr = gen_reg_rtx (Pmode);
9091 rtx off = gen_reg_rtx (SImode);
9092 rtx oldv = gen_reg_rtx (SImode);
9093 rtx newv = gen_reg_rtx (SImode);
9094 rtx oldvalue = gen_reg_rtx (SImode);
9095 rtx newvalue = gen_reg_rtx (SImode);
9096 rtx res = gen_reg_rtx (SImode);
9097 rtx resv = gen_reg_rtx (SImode);
9098 rtx memsi, val, mask, end_label, loop_label, cc;
9100 emit_insn (gen_rtx_SET (VOIDmode, addr,
9101 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
9103 if (Pmode != SImode)
9104 addr1 = gen_lowpart (SImode, addr1);
9105 emit_insn (gen_rtx_SET (VOIDmode, off,
9106 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
9108 memsi = gen_rtx_MEM (SImode, addr);
9109 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
9110 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
9112 val = force_reg (SImode, memsi);
9114 emit_insn (gen_rtx_SET (VOIDmode, off,
9115 gen_rtx_XOR (SImode, off,
9116 GEN_INT (GET_MODE (mem) == QImode
9119 emit_insn (gen_rtx_SET (VOIDmode, off,
9120 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
9122 if (GET_MODE (mem) == QImode)
9123 mask = force_reg (SImode, GEN_INT (0xff));
9125 mask = force_reg (SImode, GEN_INT (0xffff));
9127 emit_insn (gen_rtx_SET (VOIDmode, mask,
9128 gen_rtx_ASHIFT (SImode, mask, off)));
9130 emit_insn (gen_rtx_SET (VOIDmode, val,
9131 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9134 oldval = gen_lowpart (SImode, oldval);
9135 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9136 gen_rtx_ASHIFT (SImode, oldval, off)));
9138 newval = gen_lowpart_common (SImode, newval);
9139 emit_insn (gen_rtx_SET (VOIDmode, newv,
9140 gen_rtx_ASHIFT (SImode, newval, off)));
9142 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9143 gen_rtx_AND (SImode, oldv, mask)));
9145 emit_insn (gen_rtx_SET (VOIDmode, newv,
9146 gen_rtx_AND (SImode, newv, mask)));
9148 end_label = gen_label_rtx ();
9149 loop_label = gen_label_rtx ();
9150 emit_label (loop_label);
9152 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
9153 gen_rtx_IOR (SImode, oldv, val)));
9155 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
9156 gen_rtx_IOR (SImode, newv, val)));
9158 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
9160 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
9162 emit_insn (gen_rtx_SET (VOIDmode, resv,
9163 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9166 cc = gen_compare_reg_1 (NE, resv, val);
9167 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
9169 /* Use cbranchcc4 to separate the compare and branch! */
9170 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
9171 cc, const0_rtx, loop_label));
9173 emit_label (end_label);
9175 emit_insn (gen_rtx_SET (VOIDmode, res,
9176 gen_rtx_AND (SImode, res, mask)));
9178 emit_insn (gen_rtx_SET (VOIDmode, res,
9179 gen_rtx_LSHIFTRT (SImode, res, off)));
9181 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
9184 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
9187 sparc_frame_pointer_required (void)
9189 return !(leaf_function_p () && only_leaf_regs_used ());
9192 /* The way this is structured, we can't eliminate SFP in favor of SP
9193 if the frame pointer is required: we want to use the SFP->HFP elimination
9194 in that case. But the test in update_eliminables doesn't know we are
9195 assuming below that we only do the former elimination. */
9198 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
9200 return (to == HARD_FRAME_POINTER_REGNUM
9201 || !targetm.frame_pointer_required ());
9204 #include "gt-sparc.h"