1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com)
7 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
38 #include "insn-attr.h"
45 #include "diagnostic-core.h"
50 #include "target-def.h"
51 #include "common/common-target.h"
52 #include "cfglayout.h"
54 #include "langhooks.h"
58 #include "dwarf2out.h"
63 struct processor_costs {
67 /* Integer signed load */
70 /* Integer zeroed load */
76 /* fmov, fneg, fabs */
80 const int float_plusminus;
86 const int float_cmove;
92 const int float_div_sf;
95 const int float_div_df;
98 const int float_sqrt_sf;
101 const int float_sqrt_df;
109 /* integer multiply cost for each bit set past the most
110 significant 3, so the formula for multiply cost becomes:
113 highest_bit = highest_clear_bit(rs1);
115 highest_bit = highest_set_bit(rs1);
118 cost = int_mul{,X} + ((highest_bit - 3) / int_mul_bit_factor);
120 A value of zero indicates that the multiply costs is fixed,
122 const int int_mul_bit_factor;
133 /* penalty for shifts, due to scheduling rules etc. */
134 const int shift_penalty;
138 struct processor_costs cypress_costs = {
139 COSTS_N_INSNS (2), /* int load */
140 COSTS_N_INSNS (2), /* int signed load */
141 COSTS_N_INSNS (2), /* int zeroed load */
142 COSTS_N_INSNS (2), /* float load */
143 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
144 COSTS_N_INSNS (5), /* fadd, fsub */
145 COSTS_N_INSNS (1), /* fcmp */
146 COSTS_N_INSNS (1), /* fmov, fmovr */
147 COSTS_N_INSNS (7), /* fmul */
148 COSTS_N_INSNS (37), /* fdivs */
149 COSTS_N_INSNS (37), /* fdivd */
150 COSTS_N_INSNS (63), /* fsqrts */
151 COSTS_N_INSNS (63), /* fsqrtd */
152 COSTS_N_INSNS (1), /* imul */
153 COSTS_N_INSNS (1), /* imulX */
154 0, /* imul bit factor */
155 COSTS_N_INSNS (1), /* idiv */
156 COSTS_N_INSNS (1), /* idivX */
157 COSTS_N_INSNS (1), /* movcc/movr */
158 0, /* shift penalty */
162 struct processor_costs supersparc_costs = {
163 COSTS_N_INSNS (1), /* int load */
164 COSTS_N_INSNS (1), /* int signed load */
165 COSTS_N_INSNS (1), /* int zeroed load */
166 COSTS_N_INSNS (0), /* float load */
167 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
168 COSTS_N_INSNS (3), /* fadd, fsub */
169 COSTS_N_INSNS (3), /* fcmp */
170 COSTS_N_INSNS (1), /* fmov, fmovr */
171 COSTS_N_INSNS (3), /* fmul */
172 COSTS_N_INSNS (6), /* fdivs */
173 COSTS_N_INSNS (9), /* fdivd */
174 COSTS_N_INSNS (12), /* fsqrts */
175 COSTS_N_INSNS (12), /* fsqrtd */
176 COSTS_N_INSNS (4), /* imul */
177 COSTS_N_INSNS (4), /* imulX */
178 0, /* imul bit factor */
179 COSTS_N_INSNS (4), /* idiv */
180 COSTS_N_INSNS (4), /* idivX */
181 COSTS_N_INSNS (1), /* movcc/movr */
182 1, /* shift penalty */
186 struct processor_costs hypersparc_costs = {
187 COSTS_N_INSNS (1), /* int load */
188 COSTS_N_INSNS (1), /* int signed load */
189 COSTS_N_INSNS (1), /* int zeroed load */
190 COSTS_N_INSNS (1), /* float load */
191 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
192 COSTS_N_INSNS (1), /* fadd, fsub */
193 COSTS_N_INSNS (1), /* fcmp */
194 COSTS_N_INSNS (1), /* fmov, fmovr */
195 COSTS_N_INSNS (1), /* fmul */
196 COSTS_N_INSNS (8), /* fdivs */
197 COSTS_N_INSNS (12), /* fdivd */
198 COSTS_N_INSNS (17), /* fsqrts */
199 COSTS_N_INSNS (17), /* fsqrtd */
200 COSTS_N_INSNS (17), /* imul */
201 COSTS_N_INSNS (17), /* imulX */
202 0, /* imul bit factor */
203 COSTS_N_INSNS (17), /* idiv */
204 COSTS_N_INSNS (17), /* idivX */
205 COSTS_N_INSNS (1), /* movcc/movr */
206 0, /* shift penalty */
210 struct processor_costs leon_costs = {
211 COSTS_N_INSNS (1), /* int load */
212 COSTS_N_INSNS (1), /* int signed load */
213 COSTS_N_INSNS (1), /* int zeroed load */
214 COSTS_N_INSNS (1), /* float load */
215 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
216 COSTS_N_INSNS (1), /* fadd, fsub */
217 COSTS_N_INSNS (1), /* fcmp */
218 COSTS_N_INSNS (1), /* fmov, fmovr */
219 COSTS_N_INSNS (1), /* fmul */
220 COSTS_N_INSNS (15), /* fdivs */
221 COSTS_N_INSNS (15), /* fdivd */
222 COSTS_N_INSNS (23), /* fsqrts */
223 COSTS_N_INSNS (23), /* fsqrtd */
224 COSTS_N_INSNS (5), /* imul */
225 COSTS_N_INSNS (5), /* imulX */
226 0, /* imul bit factor */
227 COSTS_N_INSNS (5), /* idiv */
228 COSTS_N_INSNS (5), /* idivX */
229 COSTS_N_INSNS (1), /* movcc/movr */
230 0, /* shift penalty */
234 struct processor_costs sparclet_costs = {
235 COSTS_N_INSNS (3), /* int load */
236 COSTS_N_INSNS (3), /* int signed load */
237 COSTS_N_INSNS (1), /* int zeroed load */
238 COSTS_N_INSNS (1), /* float load */
239 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
240 COSTS_N_INSNS (1), /* fadd, fsub */
241 COSTS_N_INSNS (1), /* fcmp */
242 COSTS_N_INSNS (1), /* fmov, fmovr */
243 COSTS_N_INSNS (1), /* fmul */
244 COSTS_N_INSNS (1), /* fdivs */
245 COSTS_N_INSNS (1), /* fdivd */
246 COSTS_N_INSNS (1), /* fsqrts */
247 COSTS_N_INSNS (1), /* fsqrtd */
248 COSTS_N_INSNS (5), /* imul */
249 COSTS_N_INSNS (5), /* imulX */
250 0, /* imul bit factor */
251 COSTS_N_INSNS (5), /* idiv */
252 COSTS_N_INSNS (5), /* idivX */
253 COSTS_N_INSNS (1), /* movcc/movr */
254 0, /* shift penalty */
258 struct processor_costs ultrasparc_costs = {
259 COSTS_N_INSNS (2), /* int load */
260 COSTS_N_INSNS (3), /* int signed load */
261 COSTS_N_INSNS (2), /* int zeroed load */
262 COSTS_N_INSNS (2), /* float load */
263 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
264 COSTS_N_INSNS (4), /* fadd, fsub */
265 COSTS_N_INSNS (1), /* fcmp */
266 COSTS_N_INSNS (2), /* fmov, fmovr */
267 COSTS_N_INSNS (4), /* fmul */
268 COSTS_N_INSNS (13), /* fdivs */
269 COSTS_N_INSNS (23), /* fdivd */
270 COSTS_N_INSNS (13), /* fsqrts */
271 COSTS_N_INSNS (23), /* fsqrtd */
272 COSTS_N_INSNS (4), /* imul */
273 COSTS_N_INSNS (4), /* imulX */
274 2, /* imul bit factor */
275 COSTS_N_INSNS (37), /* idiv */
276 COSTS_N_INSNS (68), /* idivX */
277 COSTS_N_INSNS (2), /* movcc/movr */
278 2, /* shift penalty */
282 struct processor_costs ultrasparc3_costs = {
283 COSTS_N_INSNS (2), /* int load */
284 COSTS_N_INSNS (3), /* int signed load */
285 COSTS_N_INSNS (3), /* int zeroed load */
286 COSTS_N_INSNS (2), /* float load */
287 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
288 COSTS_N_INSNS (4), /* fadd, fsub */
289 COSTS_N_INSNS (5), /* fcmp */
290 COSTS_N_INSNS (3), /* fmov, fmovr */
291 COSTS_N_INSNS (4), /* fmul */
292 COSTS_N_INSNS (17), /* fdivs */
293 COSTS_N_INSNS (20), /* fdivd */
294 COSTS_N_INSNS (20), /* fsqrts */
295 COSTS_N_INSNS (29), /* fsqrtd */
296 COSTS_N_INSNS (6), /* imul */
297 COSTS_N_INSNS (6), /* imulX */
298 0, /* imul bit factor */
299 COSTS_N_INSNS (40), /* idiv */
300 COSTS_N_INSNS (71), /* idivX */
301 COSTS_N_INSNS (2), /* movcc/movr */
302 0, /* shift penalty */
306 struct processor_costs niagara_costs = {
307 COSTS_N_INSNS (3), /* int load */
308 COSTS_N_INSNS (3), /* int signed load */
309 COSTS_N_INSNS (3), /* int zeroed load */
310 COSTS_N_INSNS (9), /* float load */
311 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
312 COSTS_N_INSNS (8), /* fadd, fsub */
313 COSTS_N_INSNS (26), /* fcmp */
314 COSTS_N_INSNS (8), /* fmov, fmovr */
315 COSTS_N_INSNS (29), /* fmul */
316 COSTS_N_INSNS (54), /* fdivs */
317 COSTS_N_INSNS (83), /* fdivd */
318 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
319 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
320 COSTS_N_INSNS (11), /* imul */
321 COSTS_N_INSNS (11), /* imulX */
322 0, /* imul bit factor */
323 COSTS_N_INSNS (72), /* idiv */
324 COSTS_N_INSNS (72), /* idivX */
325 COSTS_N_INSNS (1), /* movcc/movr */
326 0, /* shift penalty */
330 struct processor_costs niagara2_costs = {
331 COSTS_N_INSNS (3), /* int load */
332 COSTS_N_INSNS (3), /* int signed load */
333 COSTS_N_INSNS (3), /* int zeroed load */
334 COSTS_N_INSNS (3), /* float load */
335 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
336 COSTS_N_INSNS (6), /* fadd, fsub */
337 COSTS_N_INSNS (6), /* fcmp */
338 COSTS_N_INSNS (6), /* fmov, fmovr */
339 COSTS_N_INSNS (6), /* fmul */
340 COSTS_N_INSNS (19), /* fdivs */
341 COSTS_N_INSNS (33), /* fdivd */
342 COSTS_N_INSNS (19), /* fsqrts */
343 COSTS_N_INSNS (33), /* fsqrtd */
344 COSTS_N_INSNS (5), /* imul */
345 COSTS_N_INSNS (5), /* imulX */
346 0, /* imul bit factor */
347 COSTS_N_INSNS (26), /* idiv, average of 12 - 41 cycle range */
348 COSTS_N_INSNS (26), /* idivX, average of 12 - 41 cycle range */
349 COSTS_N_INSNS (1), /* movcc/movr */
350 0, /* shift penalty */
354 struct processor_costs niagara3_costs = {
355 COSTS_N_INSNS (3), /* int load */
356 COSTS_N_INSNS (3), /* int signed load */
357 COSTS_N_INSNS (3), /* int zeroed load */
358 COSTS_N_INSNS (3), /* float load */
359 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
360 COSTS_N_INSNS (9), /* fadd, fsub */
361 COSTS_N_INSNS (9), /* fcmp */
362 COSTS_N_INSNS (9), /* fmov, fmovr */
363 COSTS_N_INSNS (9), /* fmul */
364 COSTS_N_INSNS (23), /* fdivs */
365 COSTS_N_INSNS (37), /* fdivd */
366 COSTS_N_INSNS (23), /* fsqrts */
367 COSTS_N_INSNS (37), /* fsqrtd */
368 COSTS_N_INSNS (9), /* imul */
369 COSTS_N_INSNS (9), /* imulX */
370 0, /* imul bit factor */
371 COSTS_N_INSNS (31), /* idiv, average of 17 - 45 cycle range */
372 COSTS_N_INSNS (30), /* idivX, average of 16 - 44 cycle range */
373 COSTS_N_INSNS (1), /* movcc/movr */
374 0, /* shift penalty */
377 static const struct processor_costs *sparc_costs = &cypress_costs;
379 #ifdef HAVE_AS_RELAX_OPTION
380 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
381 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
382 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
383 somebody does not branch between the sethi and jmp. */
384 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
386 #define LEAF_SIBCALL_SLOT_RESERVED_P \
387 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
390 /* Vector to say how input registers are mapped to output registers.
391 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
392 eliminate it. You must use -fomit-frame-pointer to get that. */
393 char leaf_reg_remap[] =
394 { 0, 1, 2, 3, 4, 5, 6, 7,
395 -1, -1, -1, -1, -1, -1, 14, -1,
396 -1, -1, -1, -1, -1, -1, -1, -1,
397 8, 9, 10, 11, 12, 13, -1, 15,
399 32, 33, 34, 35, 36, 37, 38, 39,
400 40, 41, 42, 43, 44, 45, 46, 47,
401 48, 49, 50, 51, 52, 53, 54, 55,
402 56, 57, 58, 59, 60, 61, 62, 63,
403 64, 65, 66, 67, 68, 69, 70, 71,
404 72, 73, 74, 75, 76, 77, 78, 79,
405 80, 81, 82, 83, 84, 85, 86, 87,
406 88, 89, 90, 91, 92, 93, 94, 95,
407 96, 97, 98, 99, 100, 101, 102};
409 /* Vector, indexed by hard register number, which contains 1
410 for a register that is allowable in a candidate for leaf
411 function treatment. */
412 char sparc_leaf_regs[] =
413 { 1, 1, 1, 1, 1, 1, 1, 1,
414 0, 0, 0, 0, 0, 0, 1, 0,
415 0, 0, 0, 0, 0, 0, 0, 0,
416 1, 1, 1, 1, 1, 1, 0, 1,
417 1, 1, 1, 1, 1, 1, 1, 1,
418 1, 1, 1, 1, 1, 1, 1, 1,
419 1, 1, 1, 1, 1, 1, 1, 1,
420 1, 1, 1, 1, 1, 1, 1, 1,
421 1, 1, 1, 1, 1, 1, 1, 1,
422 1, 1, 1, 1, 1, 1, 1, 1,
423 1, 1, 1, 1, 1, 1, 1, 1,
424 1, 1, 1, 1, 1, 1, 1, 1,
425 1, 1, 1, 1, 1, 1, 1};
427 struct GTY(()) machine_function
429 /* Size of the frame of the function. */
430 HOST_WIDE_INT frame_size;
432 /* Size of the frame of the function minus the register window save area
433 and the outgoing argument area. */
434 HOST_WIDE_INT apparent_frame_size;
436 /* Register we pretend the frame pointer is allocated to. Normally, this
437 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
438 record "offset" separately as it may be too big for (reg + disp). */
440 HOST_WIDE_INT frame_base_offset;
442 /* Some local-dynamic TLS symbol name. */
443 const char *some_ld_name;
445 /* Number of global or FP registers to be saved (as 4-byte quantities). */
446 int n_global_fp_regs;
448 /* True if the current function is leaf and uses only leaf regs,
449 so that the SPARC leaf function optimization can be applied.
450 Private version of current_function_uses_only_leaf_regs, see
451 sparc_expand_prologue for the rationale. */
454 /* True if the prologue saves local or in registers. */
455 bool save_local_in_regs_p;
457 /* True if the data calculated by sparc_expand_prologue are valid. */
458 bool prologue_data_valid_p;
461 #define sparc_frame_size cfun->machine->frame_size
462 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
463 #define sparc_frame_base_reg cfun->machine->frame_base_reg
464 #define sparc_frame_base_offset cfun->machine->frame_base_offset
465 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
466 #define sparc_leaf_function_p cfun->machine->leaf_function_p
467 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
468 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
470 /* 1 if the next opcode is to be specially indented. */
471 int sparc_indent_opcode = 0;
473 static void sparc_option_override (void);
474 static void sparc_init_modes (void);
475 static void scan_record_type (const_tree, int *, int *, int *);
476 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
477 const_tree, bool, bool, int *, int *);
479 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
480 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
482 static void sparc_emit_set_const32 (rtx, rtx);
483 static void sparc_emit_set_const64 (rtx, rtx);
484 static void sparc_output_addr_vec (rtx);
485 static void sparc_output_addr_diff_vec (rtx);
486 static void sparc_output_deferred_case_vectors (void);
487 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
488 static bool sparc_legitimate_constant_p (enum machine_mode, rtx);
489 static rtx sparc_builtin_saveregs (void);
490 static int epilogue_renumber (rtx *, int);
491 static bool sparc_assemble_integer (rtx, unsigned int, int);
492 static int set_extends (rtx);
493 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
494 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
495 #ifdef TARGET_SOLARIS
496 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
497 tree) ATTRIBUTE_UNUSED;
499 static int sparc_adjust_cost (rtx, rtx, rtx, int);
500 static int sparc_issue_rate (void);
501 static void sparc_sched_init (FILE *, int, int);
502 static int sparc_use_sched_lookahead (void);
504 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
505 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
506 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
507 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
508 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
510 static bool sparc_function_ok_for_sibcall (tree, tree);
511 static void sparc_init_libfuncs (void);
512 static void sparc_init_builtins (void);
513 static void sparc_vis_init_builtins (void);
514 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
515 static tree sparc_fold_builtin (tree, int, tree *, bool);
516 static int sparc_vis_mul8x16 (int, int);
517 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
518 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
519 HOST_WIDE_INT, tree);
520 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
521 HOST_WIDE_INT, const_tree);
522 static void sparc_reorg (void);
523 static struct machine_function * sparc_init_machine_status (void);
524 static bool sparc_cannot_force_const_mem (enum machine_mode, rtx);
525 static rtx sparc_tls_get_addr (void);
526 static rtx sparc_tls_got (void);
527 static const char *get_some_local_dynamic_name (void);
528 static int get_some_local_dynamic_name_1 (rtx *, void *);
529 static int sparc_register_move_cost (enum machine_mode,
530 reg_class_t, reg_class_t);
531 static bool sparc_rtx_costs (rtx, int, int, int, int *, bool);
532 static rtx sparc_function_value (const_tree, const_tree, bool);
533 static rtx sparc_libcall_value (enum machine_mode, const_rtx);
534 static bool sparc_function_value_regno_p (const unsigned int);
535 static rtx sparc_struct_value_rtx (tree, int);
536 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
537 int *, const_tree, int);
538 static bool sparc_return_in_memory (const_tree, const_tree);
539 static bool sparc_strict_argument_naming (cumulative_args_t);
540 static void sparc_va_start (tree, rtx);
541 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
542 static bool sparc_vector_mode_supported_p (enum machine_mode);
543 static bool sparc_tls_referenced_p (rtx);
544 static rtx sparc_legitimize_tls_address (rtx);
545 static rtx sparc_legitimize_pic_address (rtx, rtx);
546 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
547 static rtx sparc_delegitimize_address (rtx);
548 static bool sparc_mode_dependent_address_p (const_rtx);
549 static bool sparc_pass_by_reference (cumulative_args_t,
550 enum machine_mode, const_tree, bool);
551 static void sparc_function_arg_advance (cumulative_args_t,
552 enum machine_mode, const_tree, bool);
553 static rtx sparc_function_arg_1 (cumulative_args_t,
554 enum machine_mode, const_tree, bool, bool);
555 static rtx sparc_function_arg (cumulative_args_t,
556 enum machine_mode, const_tree, bool);
557 static rtx sparc_function_incoming_arg (cumulative_args_t,
558 enum machine_mode, const_tree, bool);
559 static unsigned int sparc_function_arg_boundary (enum machine_mode,
561 static int sparc_arg_partial_bytes (cumulative_args_t,
562 enum machine_mode, tree, bool);
563 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
564 static void sparc_file_end (void);
565 static bool sparc_frame_pointer_required (void);
566 static bool sparc_can_eliminate (const int, const int);
567 static rtx sparc_builtin_setjmp_frame_value (void);
568 static void sparc_conditional_register_usage (void);
569 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
570 static const char *sparc_mangle_type (const_tree);
572 static void sparc_trampoline_init (rtx, tree, rtx);
573 static enum machine_mode sparc_preferred_simd_mode (enum machine_mode);
574 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
575 static bool sparc_print_operand_punct_valid_p (unsigned char);
576 static void sparc_print_operand (FILE *, rtx, int);
577 static void sparc_print_operand_address (FILE *, rtx);
578 static reg_class_t sparc_secondary_reload (bool, rtx, reg_class_t,
579 enum machine_mode, secondary_reload_info *);
581 #ifdef SUBTARGET_ATTRIBUTE_TABLE
582 /* Table of valid machine attributes. */
583 static const struct attribute_spec sparc_attribute_table[] =
585 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
587 SUBTARGET_ATTRIBUTE_TABLE,
588 { NULL, 0, 0, false, false, false, NULL, false }
592 /* Option handling. */
595 enum cmodel sparc_cmodel;
597 char sparc_hard_reg_printed[8];
599 /* Initialize the GCC target structure. */
601 /* The default is to use .half rather than .short for aligned HI objects. */
602 #undef TARGET_ASM_ALIGNED_HI_OP
603 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
605 #undef TARGET_ASM_UNALIGNED_HI_OP
606 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
607 #undef TARGET_ASM_UNALIGNED_SI_OP
608 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
609 #undef TARGET_ASM_UNALIGNED_DI_OP
610 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
612 /* The target hook has to handle DI-mode values. */
613 #undef TARGET_ASM_INTEGER
614 #define TARGET_ASM_INTEGER sparc_assemble_integer
616 #undef TARGET_ASM_FUNCTION_PROLOGUE
617 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
618 #undef TARGET_ASM_FUNCTION_EPILOGUE
619 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
621 #undef TARGET_SCHED_ADJUST_COST
622 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
623 #undef TARGET_SCHED_ISSUE_RATE
624 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
625 #undef TARGET_SCHED_INIT
626 #define TARGET_SCHED_INIT sparc_sched_init
627 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
628 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
630 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
631 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
633 #undef TARGET_INIT_LIBFUNCS
634 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
635 #undef TARGET_INIT_BUILTINS
636 #define TARGET_INIT_BUILTINS sparc_init_builtins
638 #undef TARGET_LEGITIMIZE_ADDRESS
639 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
640 #undef TARGET_DELEGITIMIZE_ADDRESS
641 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
642 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
643 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
645 #undef TARGET_EXPAND_BUILTIN
646 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
647 #undef TARGET_FOLD_BUILTIN
648 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
651 #undef TARGET_HAVE_TLS
652 #define TARGET_HAVE_TLS true
655 #undef TARGET_CANNOT_FORCE_CONST_MEM
656 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
658 #undef TARGET_ASM_OUTPUT_MI_THUNK
659 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
660 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
661 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
663 #undef TARGET_MACHINE_DEPENDENT_REORG
664 #define TARGET_MACHINE_DEPENDENT_REORG sparc_reorg
666 #undef TARGET_RTX_COSTS
667 #define TARGET_RTX_COSTS sparc_rtx_costs
668 #undef TARGET_ADDRESS_COST
669 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
670 #undef TARGET_REGISTER_MOVE_COST
671 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
673 #undef TARGET_PROMOTE_FUNCTION_MODE
674 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
676 #undef TARGET_FUNCTION_VALUE
677 #define TARGET_FUNCTION_VALUE sparc_function_value
678 #undef TARGET_LIBCALL_VALUE
679 #define TARGET_LIBCALL_VALUE sparc_libcall_value
680 #undef TARGET_FUNCTION_VALUE_REGNO_P
681 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
683 #undef TARGET_STRUCT_VALUE_RTX
684 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
685 #undef TARGET_RETURN_IN_MEMORY
686 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
687 #undef TARGET_MUST_PASS_IN_STACK
688 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
689 #undef TARGET_PASS_BY_REFERENCE
690 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
691 #undef TARGET_ARG_PARTIAL_BYTES
692 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
693 #undef TARGET_FUNCTION_ARG_ADVANCE
694 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
695 #undef TARGET_FUNCTION_ARG
696 #define TARGET_FUNCTION_ARG sparc_function_arg
697 #undef TARGET_FUNCTION_INCOMING_ARG
698 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
699 #undef TARGET_FUNCTION_ARG_BOUNDARY
700 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
702 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
703 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
704 #undef TARGET_STRICT_ARGUMENT_NAMING
705 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
707 #undef TARGET_EXPAND_BUILTIN_VA_START
708 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
709 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
710 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
712 #undef TARGET_VECTOR_MODE_SUPPORTED_P
713 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
715 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
716 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
718 #ifdef SUBTARGET_INSERT_ATTRIBUTES
719 #undef TARGET_INSERT_ATTRIBUTES
720 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
723 #ifdef SUBTARGET_ATTRIBUTE_TABLE
724 #undef TARGET_ATTRIBUTE_TABLE
725 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
728 #undef TARGET_RELAXED_ORDERING
729 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
731 #undef TARGET_OPTION_OVERRIDE
732 #define TARGET_OPTION_OVERRIDE sparc_option_override
734 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
735 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
736 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
739 #undef TARGET_ASM_FILE_END
740 #define TARGET_ASM_FILE_END sparc_file_end
742 #undef TARGET_FRAME_POINTER_REQUIRED
743 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
745 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
746 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE sparc_builtin_setjmp_frame_value
748 #undef TARGET_CAN_ELIMINATE
749 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
751 #undef TARGET_PREFERRED_RELOAD_CLASS
752 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
754 #undef TARGET_SECONDARY_RELOAD
755 #define TARGET_SECONDARY_RELOAD sparc_secondary_reload
757 #undef TARGET_CONDITIONAL_REGISTER_USAGE
758 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
760 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
761 #undef TARGET_MANGLE_TYPE
762 #define TARGET_MANGLE_TYPE sparc_mangle_type
765 #undef TARGET_LEGITIMATE_ADDRESS_P
766 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
768 #undef TARGET_LEGITIMATE_CONSTANT_P
769 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
771 #undef TARGET_TRAMPOLINE_INIT
772 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
774 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
775 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
776 #undef TARGET_PRINT_OPERAND
777 #define TARGET_PRINT_OPERAND sparc_print_operand
778 #undef TARGET_PRINT_OPERAND_ADDRESS
779 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
781 struct gcc_target targetm = TARGET_INITIALIZER;
784 dump_target_flag_bits (const int flags)
786 if (flags & MASK_64BIT)
787 fprintf (stderr, "64BIT ");
788 if (flags & MASK_APP_REGS)
789 fprintf (stderr, "APP_REGS ");
790 if (flags & MASK_FASTER_STRUCTS)
791 fprintf (stderr, "FASTER_STRUCTS ");
792 if (flags & MASK_FLAT)
793 fprintf (stderr, "FLAT ");
794 if (flags & MASK_FMAF)
795 fprintf (stderr, "FMAF ");
796 if (flags & MASK_FPU)
797 fprintf (stderr, "FPU ");
798 if (flags & MASK_HARD_QUAD)
799 fprintf (stderr, "HARD_QUAD ");
800 if (flags & MASK_POPC)
801 fprintf (stderr, "POPC ");
802 if (flags & MASK_PTR64)
803 fprintf (stderr, "PTR64 ");
804 if (flags & MASK_STACK_BIAS)
805 fprintf (stderr, "STACK_BIAS ");
806 if (flags & MASK_UNALIGNED_DOUBLES)
807 fprintf (stderr, "UNALIGNED_DOUBLES ");
808 if (flags & MASK_V8PLUS)
809 fprintf (stderr, "V8PLUS ");
810 if (flags & MASK_VIS)
811 fprintf (stderr, "VIS ");
812 if (flags & MASK_VIS2)
813 fprintf (stderr, "VIS2 ");
814 if (flags & MASK_VIS3)
815 fprintf (stderr, "VIS3 ");
816 if (flags & MASK_DEPRECATED_V8_INSNS)
817 fprintf (stderr, "DEPRECATED_V8_INSNS ");
818 if (flags & MASK_SPARCLET)
819 fprintf (stderr, "SPARCLET ");
820 if (flags & MASK_SPARCLITE)
821 fprintf (stderr, "SPARCLITE ");
823 fprintf (stderr, "V8 ");
825 fprintf (stderr, "V9 ");
829 dump_target_flags (const char *prefix, const int flags)
831 fprintf (stderr, "%s: (%08x) [ ", prefix, flags);
832 dump_target_flag_bits (flags);
833 fprintf(stderr, "]\n");
836 /* Validate and override various options, and do some machine dependent
840 sparc_option_override (void)
842 static struct code_model {
843 const char *const name;
844 const enum cmodel value;
845 } const cmodels[] = {
847 { "medlow", CM_MEDLOW },
848 { "medmid", CM_MEDMID },
849 { "medany", CM_MEDANY },
850 { "embmedany", CM_EMBMEDANY },
851 { NULL, (enum cmodel) 0 }
853 const struct code_model *cmodel;
854 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
855 static struct cpu_default {
857 const enum processor_type processor;
858 } const cpu_default[] = {
859 /* There must be one entry here for each TARGET_CPU value. */
860 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
861 { TARGET_CPU_v8, PROCESSOR_V8 },
862 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
863 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
864 { TARGET_CPU_leon, PROCESSOR_LEON },
865 { TARGET_CPU_sparclite, PROCESSOR_F930 },
866 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
867 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
868 { TARGET_CPU_v9, PROCESSOR_V9 },
869 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
870 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
871 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
872 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
873 { TARGET_CPU_niagara3, PROCESSOR_NIAGARA3 },
874 { TARGET_CPU_niagara4, PROCESSOR_NIAGARA4 },
877 const struct cpu_default *def;
878 /* Table of values for -m{cpu,tune}=. This must match the order of
879 the PROCESSOR_* enumeration. */
880 static struct cpu_table {
881 const char *const name;
884 } const cpu_table[] = {
885 { "v7", MASK_ISA, 0 },
886 { "cypress", MASK_ISA, 0 },
887 { "v8", MASK_ISA, MASK_V8 },
888 /* TI TMS390Z55 supersparc */
889 { "supersparc", MASK_ISA, MASK_V8 },
890 { "hypersparc", MASK_ISA, MASK_V8|MASK_FPU },
892 { "leon", MASK_ISA, MASK_V8|MASK_FPU },
893 { "sparclite", MASK_ISA, MASK_SPARCLITE },
894 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
895 { "f930", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
896 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
897 { "f934", MASK_ISA, MASK_SPARCLITE|MASK_FPU },
898 { "sparclite86x", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
899 { "sparclet", MASK_ISA, MASK_SPARCLET },
901 { "tsc701", MASK_ISA, MASK_SPARCLET },
902 { "v9", MASK_ISA, MASK_V9 },
903 /* UltraSPARC I, II, IIi */
904 { "ultrasparc", MASK_ISA,
905 /* Although insns using %y are deprecated, it is a clear win. */
906 MASK_V9|MASK_DEPRECATED_V8_INSNS },
908 /* ??? Check if %y issue still holds true. */
909 { "ultrasparc3", MASK_ISA,
910 MASK_V9|MASK_DEPRECATED_V8_INSNS|MASK_VIS2 },
912 { "niagara", MASK_ISA,
913 MASK_V9|MASK_DEPRECATED_V8_INSNS },
915 { "niagara2", MASK_ISA,
916 MASK_V9|MASK_POPC|MASK_VIS2 },
918 { "niagara3", MASK_ISA,
919 MASK_V9|MASK_POPC|MASK_VIS2|MASK_VIS3|MASK_FMAF },
921 { "niagara4", MASK_ISA,
922 MASK_V9|MASK_POPC|MASK_VIS2|MASK_VIS3|MASK_FMAF },
924 const struct cpu_table *cpu;
928 if (sparc_debug_string != NULL)
933 p = ASTRDUP (sparc_debug_string);
934 while ((q = strtok (p, ",")) != NULL)
948 if (! strcmp (q, "all"))
949 mask = MASK_DEBUG_ALL;
950 else if (! strcmp (q, "options"))
951 mask = MASK_DEBUG_OPTIONS;
953 error ("unknown -mdebug-%s switch", q);
956 sparc_debug &= ~mask;
962 if (TARGET_DEBUG_OPTIONS)
964 dump_target_flags("Initial target_flags", target_flags);
965 dump_target_flags("target_flags_explicit", target_flags_explicit);
968 #ifdef SUBTARGET_OVERRIDE_OPTIONS
969 SUBTARGET_OVERRIDE_OPTIONS;
972 #ifndef SPARC_BI_ARCH
973 /* Check for unsupported architecture size. */
974 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
975 error ("%s is not supported by this configuration",
976 DEFAULT_ARCH32_P ? "-m64" : "-m32");
979 /* We force all 64bit archs to use 128 bit long double */
980 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
982 error ("-mlong-double-64 not allowed with -m64");
983 target_flags |= MASK_LONG_DOUBLE_128;
986 /* Code model selection. */
987 sparc_cmodel = SPARC_DEFAULT_CMODEL;
991 sparc_cmodel = CM_32;
994 if (sparc_cmodel_string != NULL)
998 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
999 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
1001 if (cmodel->name == NULL)
1002 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
1004 sparc_cmodel = cmodel->value;
1007 error ("-mcmodel= is not supported on 32 bit systems");
1010 /* Check that -fcall-saved-REG wasn't specified for out registers. */
1011 for (i = 8; i < 16; i++)
1012 if (!call_used_regs [i])
1014 error ("-fcall-saved-REG is not supported for out registers");
1015 call_used_regs [i] = 1;
1018 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
1020 /* Set the default CPU. */
1021 if (!global_options_set.x_sparc_cpu_and_features)
1023 for (def = &cpu_default[0]; def->cpu != -1; ++def)
1024 if (def->cpu == TARGET_CPU_DEFAULT)
1026 gcc_assert (def->cpu != -1);
1027 sparc_cpu_and_features = def->processor;
1030 if (!global_options_set.x_sparc_cpu)
1031 sparc_cpu = sparc_cpu_and_features;
1033 cpu = &cpu_table[(int) sparc_cpu_and_features];
1035 if (TARGET_DEBUG_OPTIONS)
1037 fprintf (stderr, "sparc_cpu_and_features: %s\n", cpu->name);
1038 fprintf (stderr, "sparc_cpu: %s\n",
1039 cpu_table[(int) sparc_cpu].name);
1040 dump_target_flags ("cpu->disable", cpu->disable);
1041 dump_target_flags ("cpu->enable", cpu->enable);
1044 target_flags &= ~cpu->disable;
1045 target_flags |= (cpu->enable
1046 #ifndef HAVE_AS_FMAF_HPC_VIS3
1047 & ~(MASK_FMAF | MASK_VIS3)
1051 /* If -mfpu or -mno-fpu was explicitly used, don't override with
1052 the processor default. */
1053 if (target_flags_explicit & MASK_FPU)
1054 target_flags = (target_flags & ~MASK_FPU) | fpu;
1056 /* -mvis2 implies -mvis */
1058 target_flags |= MASK_VIS;
1060 /* -mvis3 implies -mvis2 and -mvis */
1062 target_flags |= MASK_VIS2 | MASK_VIS;
1064 /* Don't allow -mvis, -mvis2, -mvis3, or -mfmaf if FPU is disabled. */
1066 target_flags &= ~(MASK_VIS | MASK_VIS2 | MASK_VIS3 | MASK_FMAF);
1068 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
1070 -m64 also implies v9. */
1071 if (TARGET_VIS || TARGET_ARCH64)
1073 target_flags |= MASK_V9;
1074 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
1077 /* -mvis also implies -mv8plus on 32-bit */
1078 if (TARGET_VIS && ! TARGET_ARCH64)
1079 target_flags |= MASK_V8PLUS;
1081 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
1082 if (TARGET_V9 && TARGET_ARCH32)
1083 target_flags |= MASK_DEPRECATED_V8_INSNS;
1085 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
1086 if (! TARGET_V9 || TARGET_ARCH64)
1087 target_flags &= ~MASK_V8PLUS;
1089 /* Don't use stack biasing in 32 bit mode. */
1091 target_flags &= ~MASK_STACK_BIAS;
1093 /* Supply a default value for align_functions. */
1094 if (align_functions == 0
1095 && (sparc_cpu == PROCESSOR_ULTRASPARC
1096 || sparc_cpu == PROCESSOR_ULTRASPARC3
1097 || sparc_cpu == PROCESSOR_NIAGARA
1098 || sparc_cpu == PROCESSOR_NIAGARA2
1099 || sparc_cpu == PROCESSOR_NIAGARA3
1100 || sparc_cpu == PROCESSOR_NIAGARA4))
1101 align_functions = 32;
1103 /* Validate PCC_STRUCT_RETURN. */
1104 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
1105 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
1107 /* Only use .uaxword when compiling for a 64-bit target. */
1109 targetm.asm_out.unaligned_op.di = NULL;
1111 /* Do various machine dependent initializations. */
1112 sparc_init_modes ();
1114 /* Set up function hooks. */
1115 init_machine_status = sparc_init_machine_status;
1120 case PROCESSOR_CYPRESS:
1121 sparc_costs = &cypress_costs;
1124 case PROCESSOR_SPARCLITE:
1125 case PROCESSOR_SUPERSPARC:
1126 sparc_costs = &supersparc_costs;
1128 case PROCESSOR_F930:
1129 case PROCESSOR_F934:
1130 case PROCESSOR_HYPERSPARC:
1131 case PROCESSOR_SPARCLITE86X:
1132 sparc_costs = &hypersparc_costs;
1134 case PROCESSOR_LEON:
1135 sparc_costs = &leon_costs;
1137 case PROCESSOR_SPARCLET:
1138 case PROCESSOR_TSC701:
1139 sparc_costs = &sparclet_costs;
1142 case PROCESSOR_ULTRASPARC:
1143 sparc_costs = &ultrasparc_costs;
1145 case PROCESSOR_ULTRASPARC3:
1146 sparc_costs = &ultrasparc3_costs;
1148 case PROCESSOR_NIAGARA:
1149 sparc_costs = &niagara_costs;
1151 case PROCESSOR_NIAGARA2:
1152 sparc_costs = &niagara2_costs;
1154 case PROCESSOR_NIAGARA3:
1155 case PROCESSOR_NIAGARA4:
1156 sparc_costs = &niagara3_costs;
1158 case PROCESSOR_NATIVE:
1162 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1163 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1164 target_flags |= MASK_LONG_DOUBLE_128;
1167 if (TARGET_DEBUG_OPTIONS)
1168 dump_target_flags ("Final target_flags", target_flags);
1170 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
1171 ((sparc_cpu == PROCESSOR_ULTRASPARC
1172 || sparc_cpu == PROCESSOR_NIAGARA
1173 || sparc_cpu == PROCESSOR_NIAGARA2
1174 || sparc_cpu == PROCESSOR_NIAGARA3
1175 || sparc_cpu == PROCESSOR_NIAGARA4)
1177 : (sparc_cpu == PROCESSOR_ULTRASPARC3
1179 global_options.x_param_values,
1180 global_options_set.x_param_values);
1181 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
1182 ((sparc_cpu == PROCESSOR_ULTRASPARC
1183 || sparc_cpu == PROCESSOR_ULTRASPARC3
1184 || sparc_cpu == PROCESSOR_NIAGARA
1185 || sparc_cpu == PROCESSOR_NIAGARA2
1186 || sparc_cpu == PROCESSOR_NIAGARA3
1187 || sparc_cpu == PROCESSOR_NIAGARA4)
1189 global_options.x_param_values,
1190 global_options_set.x_param_values);
1192 /* Disable save slot sharing for call-clobbered registers by default.
1193 The IRA sharing algorithm works on single registers only and this
1194 pessimizes for double floating-point registers. */
1195 if (!global_options_set.x_flag_ira_share_save_slots)
1196 flag_ira_share_save_slots = 0;
1199 /* Miscellaneous utilities. */
1201 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
1202 or branch on register contents instructions. */
1205 v9_regcmp_p (enum rtx_code code)
1207 return (code == EQ || code == NE || code == GE || code == LT
1208 || code == LE || code == GT);
1211 /* Nonzero if OP is a floating point constant which can
1212 be loaded into an integer register using a single
1213 sethi instruction. */
1218 if (GET_CODE (op) == CONST_DOUBLE)
1223 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1224 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1225 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
1231 /* Nonzero if OP is a floating point constant which can
1232 be loaded into an integer register using a single
1238 if (GET_CODE (op) == CONST_DOUBLE)
1243 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1244 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1245 return SPARC_SIMM13_P (i);
1251 /* Nonzero if OP is a floating point constant which can
1252 be loaded into an integer register using a high/losum
1253 instruction sequence. */
1256 fp_high_losum_p (rtx op)
1258 /* The constraints calling this should only be in
1259 SFmode move insns, so any constant which cannot
1260 be moved using a single insn will do. */
1261 if (GET_CODE (op) == CONST_DOUBLE)
1266 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1267 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1268 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
1274 /* Return true if the address of LABEL can be loaded by means of the
1275 mov{si,di}_pic_label_ref patterns in PIC mode. */
1278 can_use_mov_pic_label_ref (rtx label)
1280 /* VxWorks does not impose a fixed gap between segments; the run-time
1281 gap can be different from the object-file gap. We therefore can't
1282 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1283 are absolutely sure that X is in the same segment as the GOT.
1284 Unfortunately, the flexibility of linker scripts means that we
1285 can't be sure of that in general, so assume that GOT-relative
1286 accesses are never valid on VxWorks. */
1287 if (TARGET_VXWORKS_RTP)
1290 /* Similarly, if the label is non-local, it might end up being placed
1291 in a different section than the current one; now mov_pic_label_ref
1292 requires the label and the code to be in the same section. */
1293 if (LABEL_REF_NONLOCAL_P (label))
1296 /* Finally, if we are reordering basic blocks and partition into hot
1297 and cold sections, this might happen for any label. */
1298 if (flag_reorder_blocks_and_partition)
1304 /* Expand a move instruction. Return true if all work is done. */
1307 sparc_expand_move (enum machine_mode mode, rtx *operands)
1309 /* Handle sets of MEM first. */
1310 if (GET_CODE (operands[0]) == MEM)
1312 /* 0 is a register (or a pair of registers) on SPARC. */
1313 if (register_or_zero_operand (operands[1], mode))
1316 if (!reload_in_progress)
1318 operands[0] = validize_mem (operands[0]);
1319 operands[1] = force_reg (mode, operands[1]);
1323 /* Fixup TLS cases. */
1325 && CONSTANT_P (operands[1])
1326 && sparc_tls_referenced_p (operands [1]))
1328 operands[1] = sparc_legitimize_tls_address (operands[1]);
1332 /* Fixup PIC cases. */
1333 if (flag_pic && CONSTANT_P (operands[1]))
1335 if (pic_address_needs_scratch (operands[1]))
1336 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
1338 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
1339 if (GET_CODE (operands[1]) == LABEL_REF
1340 && can_use_mov_pic_label_ref (operands[1]))
1344 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1350 gcc_assert (TARGET_ARCH64);
1351 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1356 if (symbolic_operand (operands[1], mode))
1359 = sparc_legitimize_pic_address (operands[1],
1361 ? operands[0] : NULL_RTX);
1366 /* If we are trying to toss an integer constant into FP registers,
1367 or loading a FP or vector constant, force it into memory. */
1368 if (CONSTANT_P (operands[1])
1369 && REG_P (operands[0])
1370 && (SPARC_FP_REG_P (REGNO (operands[0]))
1371 || SCALAR_FLOAT_MODE_P (mode)
1372 || VECTOR_MODE_P (mode)))
1374 /* emit_group_store will send such bogosity to us when it is
1375 not storing directly into memory. So fix this up to avoid
1376 crashes in output_constant_pool. */
1377 if (operands [1] == const0_rtx)
1378 operands[1] = CONST0_RTX (mode);
1380 /* We can clear or set to all-ones FP registers if TARGET_VIS, and
1381 always other regs. */
1382 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1383 && (const_zero_operand (operands[1], mode)
1384 || const_all_ones_operand (operands[1], mode)))
1387 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1388 /* We are able to build any SF constant in integer registers
1389 with at most 2 instructions. */
1391 /* And any DF constant in integer registers. */
1393 && ! can_create_pseudo_p ())))
1396 operands[1] = force_const_mem (mode, operands[1]);
1397 if (!reload_in_progress)
1398 operands[1] = validize_mem (operands[1]);
1402 /* Accept non-constants and valid constants unmodified. */
1403 if (!CONSTANT_P (operands[1])
1404 || GET_CODE (operands[1]) == HIGH
1405 || input_operand (operands[1], mode))
1411 /* All QImode constants require only one insn, so proceed. */
1416 sparc_emit_set_const32 (operands[0], operands[1]);
1420 /* input_operand should have filtered out 32-bit mode. */
1421 sparc_emit_set_const64 (operands[0], operands[1]);
1431 /* Load OP1, a 32-bit constant, into OP0, a register.
1432 We know it can't be done in one insn when we get
1433 here, the move expander guarantees this. */
1436 sparc_emit_set_const32 (rtx op0, rtx op1)
1438 enum machine_mode mode = GET_MODE (op0);
1441 if (can_create_pseudo_p ())
1442 temp = gen_reg_rtx (mode);
1444 if (GET_CODE (op1) == CONST_INT)
1446 gcc_assert (!small_int_operand (op1, mode)
1447 && !const_high_operand (op1, mode));
1449 /* Emit them as real moves instead of a HIGH/LO_SUM,
1450 this way CSE can see everything and reuse intermediate
1451 values if it wants. */
1452 emit_insn (gen_rtx_SET (VOIDmode, temp,
1453 GEN_INT (INTVAL (op1)
1454 & ~(HOST_WIDE_INT)0x3ff)));
1456 emit_insn (gen_rtx_SET (VOIDmode,
1458 gen_rtx_IOR (mode, temp,
1459 GEN_INT (INTVAL (op1) & 0x3ff))));
1463 /* A symbol, emit in the traditional way. */
1464 emit_insn (gen_rtx_SET (VOIDmode, temp,
1465 gen_rtx_HIGH (mode, op1)));
1466 emit_insn (gen_rtx_SET (VOIDmode,
1467 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1471 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1472 If TEMP is nonzero, we are forbidden to use any other scratch
1473 registers. Otherwise, we are allowed to generate them as needed.
1475 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1476 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1479 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1481 rtx temp1, temp2, temp3, temp4, temp5;
1484 if (temp && GET_MODE (temp) == TImode)
1487 temp = gen_rtx_REG (DImode, REGNO (temp));
1490 /* SPARC-V9 code-model support. */
1491 switch (sparc_cmodel)
1494 /* The range spanned by all instructions in the object is less
1495 than 2^31 bytes (2GB) and the distance from any instruction
1496 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1497 than 2^31 bytes (2GB).
1499 The executable must be in the low 4TB of the virtual address
1502 sethi %hi(symbol), %temp1
1503 or %temp1, %lo(symbol), %reg */
1505 temp1 = temp; /* op0 is allowed. */
1507 temp1 = gen_reg_rtx (DImode);
1509 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1510 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1514 /* The range spanned by all instructions in the object is less
1515 than 2^31 bytes (2GB) and the distance from any instruction
1516 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1517 than 2^31 bytes (2GB).
1519 The executable must be in the low 16TB of the virtual address
1522 sethi %h44(symbol), %temp1
1523 or %temp1, %m44(symbol), %temp2
1524 sllx %temp2, 12, %temp3
1525 or %temp3, %l44(symbol), %reg */
1530 temp3 = temp; /* op0 is allowed. */
1534 temp1 = gen_reg_rtx (DImode);
1535 temp2 = gen_reg_rtx (DImode);
1536 temp3 = gen_reg_rtx (DImode);
1539 emit_insn (gen_seth44 (temp1, op1));
1540 emit_insn (gen_setm44 (temp2, temp1, op1));
1541 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1542 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1543 emit_insn (gen_setl44 (op0, temp3, op1));
1547 /* The range spanned by all instructions in the object is less
1548 than 2^31 bytes (2GB) and the distance from any instruction
1549 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1550 than 2^31 bytes (2GB).
1552 The executable can be placed anywhere in the virtual address
1555 sethi %hh(symbol), %temp1
1556 sethi %lm(symbol), %temp2
1557 or %temp1, %hm(symbol), %temp3
1558 sllx %temp3, 32, %temp4
1559 or %temp4, %temp2, %temp5
1560 or %temp5, %lo(symbol), %reg */
1563 /* It is possible that one of the registers we got for operands[2]
1564 might coincide with that of operands[0] (which is why we made
1565 it TImode). Pick the other one to use as our scratch. */
1566 if (rtx_equal_p (temp, op0))
1568 gcc_assert (ti_temp);
1569 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1572 temp2 = temp; /* op0 is _not_ allowed, see above. */
1579 temp1 = gen_reg_rtx (DImode);
1580 temp2 = gen_reg_rtx (DImode);
1581 temp3 = gen_reg_rtx (DImode);
1582 temp4 = gen_reg_rtx (DImode);
1583 temp5 = gen_reg_rtx (DImode);
1586 emit_insn (gen_sethh (temp1, op1));
1587 emit_insn (gen_setlm (temp2, op1));
1588 emit_insn (gen_sethm (temp3, temp1, op1));
1589 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1590 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1591 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1592 gen_rtx_PLUS (DImode, temp4, temp2)));
1593 emit_insn (gen_setlo (op0, temp5, op1));
1597 /* Old old old backwards compatibility kruft here.
1598 Essentially it is MEDLOW with a fixed 64-bit
1599 virtual base added to all data segment addresses.
1600 Text-segment stuff is computed like MEDANY, we can't
1601 reuse the code above because the relocation knobs
1604 Data segment: sethi %hi(symbol), %temp1
1605 add %temp1, EMBMEDANY_BASE_REG, %temp2
1606 or %temp2, %lo(symbol), %reg */
1607 if (data_segment_operand (op1, GET_MODE (op1)))
1611 temp1 = temp; /* op0 is allowed. */
1616 temp1 = gen_reg_rtx (DImode);
1617 temp2 = gen_reg_rtx (DImode);
1620 emit_insn (gen_embmedany_sethi (temp1, op1));
1621 emit_insn (gen_embmedany_brsum (temp2, temp1));
1622 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1625 /* Text segment: sethi %uhi(symbol), %temp1
1626 sethi %hi(symbol), %temp2
1627 or %temp1, %ulo(symbol), %temp3
1628 sllx %temp3, 32, %temp4
1629 or %temp4, %temp2, %temp5
1630 or %temp5, %lo(symbol), %reg */
1635 /* It is possible that one of the registers we got for operands[2]
1636 might coincide with that of operands[0] (which is why we made
1637 it TImode). Pick the other one to use as our scratch. */
1638 if (rtx_equal_p (temp, op0))
1640 gcc_assert (ti_temp);
1641 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1644 temp2 = temp; /* op0 is _not_ allowed, see above. */
1651 temp1 = gen_reg_rtx (DImode);
1652 temp2 = gen_reg_rtx (DImode);
1653 temp3 = gen_reg_rtx (DImode);
1654 temp4 = gen_reg_rtx (DImode);
1655 temp5 = gen_reg_rtx (DImode);
1658 emit_insn (gen_embmedany_textuhi (temp1, op1));
1659 emit_insn (gen_embmedany_texthi (temp2, op1));
1660 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1661 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1662 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1663 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1664 gen_rtx_PLUS (DImode, temp4, temp2)));
1665 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1674 #if HOST_BITS_PER_WIDE_INT == 32
1676 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1681 /* These avoid problems when cross compiling. If we do not
1682 go through all this hair then the optimizer will see
1683 invalid REG_EQUAL notes or in some cases none at all. */
1684 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1685 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1686 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1687 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1689 /* The optimizer is not to assume anything about exactly
1690 which bits are set for a HIGH, they are unspecified.
1691 Unfortunately this leads to many missed optimizations
1692 during CSE. We mask out the non-HIGH bits, and matches
1693 a plain movdi, to alleviate this problem. */
1695 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1697 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1701 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1703 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1707 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1709 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1713 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1715 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1718 /* Worker routines for 64-bit constant formation on arch64.
1719 One of the key things to be doing in these emissions is
1720 to create as many temp REGs as possible. This makes it
1721 possible for half-built constants to be used later when
1722 such values are similar to something required later on.
1723 Without doing this, the optimizer cannot see such
1726 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1727 unsigned HOST_WIDE_INT, int);
1730 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1731 unsigned HOST_WIDE_INT low_bits, int is_neg)
1733 unsigned HOST_WIDE_INT high_bits;
1736 high_bits = (~low_bits) & 0xffffffff;
1738 high_bits = low_bits;
1740 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1743 emit_insn (gen_rtx_SET (VOIDmode, op0,
1744 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1748 /* If we are XOR'ing with -1, then we should emit a one's complement
1749 instead. This way the combiner will notice logical operations
1750 such as ANDN later on and substitute. */
1751 if ((low_bits & 0x3ff) == 0x3ff)
1753 emit_insn (gen_rtx_SET (VOIDmode, op0,
1754 gen_rtx_NOT (DImode, temp)));
1758 emit_insn (gen_rtx_SET (VOIDmode, op0,
1759 gen_safe_XOR64 (temp,
1760 (-(HOST_WIDE_INT)0x400
1761 | (low_bits & 0x3ff)))));
1766 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1767 unsigned HOST_WIDE_INT, int);
1770 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1771 unsigned HOST_WIDE_INT high_bits,
1772 unsigned HOST_WIDE_INT low_immediate,
1777 if ((high_bits & 0xfffffc00) != 0)
1779 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1780 if ((high_bits & ~0xfffffc00) != 0)
1781 emit_insn (gen_rtx_SET (VOIDmode, op0,
1782 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1788 emit_insn (gen_safe_SET64 (temp, high_bits));
1792 /* Now shift it up into place. */
1793 emit_insn (gen_rtx_SET (VOIDmode, op0,
1794 gen_rtx_ASHIFT (DImode, temp2,
1795 GEN_INT (shift_count))));
1797 /* If there is a low immediate part piece, finish up by
1798 putting that in as well. */
1799 if (low_immediate != 0)
1800 emit_insn (gen_rtx_SET (VOIDmode, op0,
1801 gen_safe_OR64 (op0, low_immediate)));
1804 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1805 unsigned HOST_WIDE_INT);
1807 /* Full 64-bit constant decomposition. Even though this is the
1808 'worst' case, we still optimize a few things away. */
1810 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1811 unsigned HOST_WIDE_INT high_bits,
1812 unsigned HOST_WIDE_INT low_bits)
1816 if (can_create_pseudo_p ())
1817 sub_temp = gen_reg_rtx (DImode);
1819 if ((high_bits & 0xfffffc00) != 0)
1821 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1822 if ((high_bits & ~0xfffffc00) != 0)
1823 emit_insn (gen_rtx_SET (VOIDmode,
1825 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1831 emit_insn (gen_safe_SET64 (temp, high_bits));
1835 if (can_create_pseudo_p ())
1837 rtx temp2 = gen_reg_rtx (DImode);
1838 rtx temp3 = gen_reg_rtx (DImode);
1839 rtx temp4 = gen_reg_rtx (DImode);
1841 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1842 gen_rtx_ASHIFT (DImode, sub_temp,
1845 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1846 if ((low_bits & ~0xfffffc00) != 0)
1848 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1849 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1850 emit_insn (gen_rtx_SET (VOIDmode, op0,
1851 gen_rtx_PLUS (DImode, temp4, temp3)));
1855 emit_insn (gen_rtx_SET (VOIDmode, op0,
1856 gen_rtx_PLUS (DImode, temp4, temp2)));
1861 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1862 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1863 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1866 /* We are in the middle of reload, so this is really
1867 painful. However we do still make an attempt to
1868 avoid emitting truly stupid code. */
1869 if (low1 != const0_rtx)
1871 emit_insn (gen_rtx_SET (VOIDmode, op0,
1872 gen_rtx_ASHIFT (DImode, sub_temp,
1873 GEN_INT (to_shift))));
1874 emit_insn (gen_rtx_SET (VOIDmode, op0,
1875 gen_rtx_IOR (DImode, op0, low1)));
1883 if (low2 != const0_rtx)
1885 emit_insn (gen_rtx_SET (VOIDmode, op0,
1886 gen_rtx_ASHIFT (DImode, sub_temp,
1887 GEN_INT (to_shift))));
1888 emit_insn (gen_rtx_SET (VOIDmode, op0,
1889 gen_rtx_IOR (DImode, op0, low2)));
1897 emit_insn (gen_rtx_SET (VOIDmode, op0,
1898 gen_rtx_ASHIFT (DImode, sub_temp,
1899 GEN_INT (to_shift))));
1900 if (low3 != const0_rtx)
1901 emit_insn (gen_rtx_SET (VOIDmode, op0,
1902 gen_rtx_IOR (DImode, op0, low3)));
1907 /* Analyze a 64-bit constant for certain properties. */
1908 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1909 unsigned HOST_WIDE_INT,
1910 int *, int *, int *);
1913 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1914 unsigned HOST_WIDE_INT low_bits,
1915 int *hbsp, int *lbsp, int *abbasp)
1917 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1920 lowest_bit_set = highest_bit_set = -1;
1924 if ((lowest_bit_set == -1)
1925 && ((low_bits >> i) & 1))
1927 if ((highest_bit_set == -1)
1928 && ((high_bits >> (32 - i - 1)) & 1))
1929 highest_bit_set = (64 - i - 1);
1932 && ((highest_bit_set == -1)
1933 || (lowest_bit_set == -1)));
1939 if ((lowest_bit_set == -1)
1940 && ((high_bits >> i) & 1))
1941 lowest_bit_set = i + 32;
1942 if ((highest_bit_set == -1)
1943 && ((low_bits >> (32 - i - 1)) & 1))
1944 highest_bit_set = 32 - i - 1;
1947 && ((highest_bit_set == -1)
1948 || (lowest_bit_set == -1)));
1950 /* If there are no bits set this should have gone out
1951 as one instruction! */
1952 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1953 all_bits_between_are_set = 1;
1954 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1958 if ((low_bits & (1 << i)) != 0)
1963 if ((high_bits & (1 << (i - 32))) != 0)
1966 all_bits_between_are_set = 0;
1969 *hbsp = highest_bit_set;
1970 *lbsp = lowest_bit_set;
1971 *abbasp = all_bits_between_are_set;
1974 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1977 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1978 unsigned HOST_WIDE_INT low_bits)
1980 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1983 || high_bits == 0xffffffff)
1986 analyze_64bit_constant (high_bits, low_bits,
1987 &highest_bit_set, &lowest_bit_set,
1988 &all_bits_between_are_set);
1990 if ((highest_bit_set == 63
1991 || lowest_bit_set == 0)
1992 && all_bits_between_are_set != 0)
1995 if ((highest_bit_set - lowest_bit_set) < 21)
2001 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
2002 unsigned HOST_WIDE_INT,
2005 static unsigned HOST_WIDE_INT
2006 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
2007 unsigned HOST_WIDE_INT low_bits,
2008 int lowest_bit_set, int shift)
2010 HOST_WIDE_INT hi, lo;
2012 if (lowest_bit_set < 32)
2014 lo = (low_bits >> lowest_bit_set) << shift;
2015 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
2020 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
2022 gcc_assert (! (hi & lo));
2026 /* Here we are sure to be arch64 and this is an integer constant
2027 being loaded into a register. Emit the most efficient
2028 insn sequence possible. Detection of all the 1-insn cases
2029 has been done already. */
2031 sparc_emit_set_const64 (rtx op0, rtx op1)
2033 unsigned HOST_WIDE_INT high_bits, low_bits;
2034 int lowest_bit_set, highest_bit_set;
2035 int all_bits_between_are_set;
2038 /* Sanity check that we know what we are working with. */
2039 gcc_assert (TARGET_ARCH64
2040 && (GET_CODE (op0) == SUBREG
2041 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
2043 if (! can_create_pseudo_p ())
2046 if (GET_CODE (op1) != CONST_INT)
2048 sparc_emit_set_symbolic_const64 (op0, op1, temp);
2053 temp = gen_reg_rtx (DImode);
2055 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
2056 low_bits = (INTVAL (op1) & 0xffffffff);
2058 /* low_bits bits 0 --> 31
2059 high_bits bits 32 --> 63 */
2061 analyze_64bit_constant (high_bits, low_bits,
2062 &highest_bit_set, &lowest_bit_set,
2063 &all_bits_between_are_set);
2065 /* First try for a 2-insn sequence. */
2067 /* These situations are preferred because the optimizer can
2068 * do more things with them:
2070 * sllx %reg, shift, %reg
2072 * srlx %reg, shift, %reg
2073 * 3) mov some_small_const, %reg
2074 * sllx %reg, shift, %reg
2076 if (((highest_bit_set == 63
2077 || lowest_bit_set == 0)
2078 && all_bits_between_are_set != 0)
2079 || ((highest_bit_set - lowest_bit_set) < 12))
2081 HOST_WIDE_INT the_const = -1;
2082 int shift = lowest_bit_set;
2084 if ((highest_bit_set != 63
2085 && lowest_bit_set != 0)
2086 || all_bits_between_are_set == 0)
2089 create_simple_focus_bits (high_bits, low_bits,
2092 else if (lowest_bit_set == 0)
2093 shift = -(63 - highest_bit_set);
2095 gcc_assert (SPARC_SIMM13_P (the_const));
2096 gcc_assert (shift != 0);
2098 emit_insn (gen_safe_SET64 (temp, the_const));
2100 emit_insn (gen_rtx_SET (VOIDmode,
2102 gen_rtx_ASHIFT (DImode,
2106 emit_insn (gen_rtx_SET (VOIDmode,
2108 gen_rtx_LSHIFTRT (DImode,
2110 GEN_INT (-shift))));
2114 /* Now a range of 22 or less bits set somewhere.
2115 * 1) sethi %hi(focus_bits), %reg
2116 * sllx %reg, shift, %reg
2117 * 2) sethi %hi(focus_bits), %reg
2118 * srlx %reg, shift, %reg
2120 if ((highest_bit_set - lowest_bit_set) < 21)
2122 unsigned HOST_WIDE_INT focus_bits =
2123 create_simple_focus_bits (high_bits, low_bits,
2124 lowest_bit_set, 10);
2126 gcc_assert (SPARC_SETHI_P (focus_bits));
2127 gcc_assert (lowest_bit_set != 10);
2129 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
2131 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
2132 if (lowest_bit_set < 10)
2133 emit_insn (gen_rtx_SET (VOIDmode,
2135 gen_rtx_LSHIFTRT (DImode, temp,
2136 GEN_INT (10 - lowest_bit_set))));
2137 else if (lowest_bit_set > 10)
2138 emit_insn (gen_rtx_SET (VOIDmode,
2140 gen_rtx_ASHIFT (DImode, temp,
2141 GEN_INT (lowest_bit_set - 10))));
2145 /* 1) sethi %hi(low_bits), %reg
2146 * or %reg, %lo(low_bits), %reg
2147 * 2) sethi %hi(~low_bits), %reg
2148 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
2151 || high_bits == 0xffffffff)
2153 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
2154 (high_bits == 0xffffffff));
2158 /* Now, try 3-insn sequences. */
2160 /* 1) sethi %hi(high_bits), %reg
2161 * or %reg, %lo(high_bits), %reg
2162 * sllx %reg, 32, %reg
2166 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
2170 /* We may be able to do something quick
2171 when the constant is negated, so try that. */
2172 if (const64_is_2insns ((~high_bits) & 0xffffffff,
2173 (~low_bits) & 0xfffffc00))
2175 /* NOTE: The trailing bits get XOR'd so we need the
2176 non-negated bits, not the negated ones. */
2177 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
2179 if ((((~high_bits) & 0xffffffff) == 0
2180 && ((~low_bits) & 0x80000000) == 0)
2181 || (((~high_bits) & 0xffffffff) == 0xffffffff
2182 && ((~low_bits) & 0x80000000) != 0))
2184 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
2186 if ((SPARC_SETHI_P (fast_int)
2187 && (~high_bits & 0xffffffff) == 0)
2188 || SPARC_SIMM13_P (fast_int))
2189 emit_insn (gen_safe_SET64 (temp, fast_int));
2191 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
2196 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
2197 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
2198 sparc_emit_set_const64 (temp, negated_const);
2201 /* If we are XOR'ing with -1, then we should emit a one's complement
2202 instead. This way the combiner will notice logical operations
2203 such as ANDN later on and substitute. */
2204 if (trailing_bits == 0x3ff)
2206 emit_insn (gen_rtx_SET (VOIDmode, op0,
2207 gen_rtx_NOT (DImode, temp)));
2211 emit_insn (gen_rtx_SET (VOIDmode,
2213 gen_safe_XOR64 (temp,
2214 (-0x400 | trailing_bits))));
2219 /* 1) sethi %hi(xxx), %reg
2220 * or %reg, %lo(xxx), %reg
2221 * sllx %reg, yyy, %reg
2223 * ??? This is just a generalized version of the low_bits==0
2224 * thing above, FIXME...
2226 if ((highest_bit_set - lowest_bit_set) < 32)
2228 unsigned HOST_WIDE_INT focus_bits =
2229 create_simple_focus_bits (high_bits, low_bits,
2232 /* We can't get here in this state. */
2233 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
2235 /* So what we know is that the set bits straddle the
2236 middle of the 64-bit word. */
2237 sparc_emit_set_const64_quick2 (op0, temp,
2243 /* 1) sethi %hi(high_bits), %reg
2244 * or %reg, %lo(high_bits), %reg
2245 * sllx %reg, 32, %reg
2246 * or %reg, low_bits, %reg
2248 if (SPARC_SIMM13_P(low_bits)
2249 && ((int)low_bits > 0))
2251 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
2255 /* The easiest way when all else fails, is full decomposition. */
2256 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
2258 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
2260 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2261 return the mode to be used for the comparison. For floating-point,
2262 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
2263 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2264 processing is needed. */
2267 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
2269 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2295 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2296 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2298 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2299 return CCX_NOOVmode;
2305 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2312 /* Emit the compare insn and return the CC reg for a CODE comparison
2313 with operands X and Y. */
2316 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2318 enum machine_mode mode;
2321 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2324 mode = SELECT_CC_MODE (code, x, y);
2326 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2327 fcc regs (cse can't tell they're really call clobbered regs and will
2328 remove a duplicate comparison even if there is an intervening function
2329 call - it will then try to reload the cc reg via an int reg which is why
2330 we need the movcc patterns). It is possible to provide the movcc
2331 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2332 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2333 to tell cse that CCFPE mode registers (even pseudos) are call
2336 /* ??? This is an experiment. Rather than making changes to cse which may
2337 or may not be easy/clean, we do our own cse. This is possible because
2338 we will generate hard registers. Cse knows they're call clobbered (it
2339 doesn't know the same thing about pseudos). If we guess wrong, no big
2340 deal, but if we win, great! */
2342 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2343 #if 1 /* experiment */
2346 /* We cycle through the registers to ensure they're all exercised. */
2347 static int next_fcc_reg = 0;
2348 /* Previous x,y for each fcc reg. */
2349 static rtx prev_args[4][2];
2351 /* Scan prev_args for x,y. */
2352 for (reg = 0; reg < 4; reg++)
2353 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2358 prev_args[reg][0] = x;
2359 prev_args[reg][1] = y;
2360 next_fcc_reg = (next_fcc_reg + 1) & 3;
2362 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2365 cc_reg = gen_reg_rtx (mode);
2366 #endif /* ! experiment */
2367 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2368 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2370 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2372 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2373 will only result in an unrecognizable insn so no point in asserting. */
2374 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2380 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2383 gen_compare_reg (rtx cmp)
2385 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2388 /* This function is used for v9 only.
2389 DEST is the target of the Scc insn.
2390 CODE is the code for an Scc's comparison.
2391 X and Y are the values we compare.
2393 This function is needed to turn
2396 (gt (reg:CCX 100 %icc)
2400 (gt:DI (reg:CCX 100 %icc)
2403 IE: The instruction recognizer needs to see the mode of the comparison to
2404 find the right instruction. We could use "gt:DI" right in the
2405 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2408 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2411 && (GET_MODE (x) == DImode
2412 || GET_MODE (dest) == DImode))
2415 /* Try to use the movrCC insns. */
2417 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2419 && v9_regcmp_p (compare_code))
2424 /* Special case for op0 != 0. This can be done with one instruction if
2427 if (compare_code == NE
2428 && GET_MODE (dest) == DImode
2429 && rtx_equal_p (op0, dest))
2431 emit_insn (gen_rtx_SET (VOIDmode, dest,
2432 gen_rtx_IF_THEN_ELSE (DImode,
2433 gen_rtx_fmt_ee (compare_code, DImode,
2440 if (reg_overlap_mentioned_p (dest, op0))
2442 /* Handle the case where dest == x.
2443 We "early clobber" the result. */
2444 op0 = gen_reg_rtx (GET_MODE (x));
2445 emit_move_insn (op0, x);
2448 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2449 if (GET_MODE (op0) != DImode)
2451 temp = gen_reg_rtx (DImode);
2452 convert_move (temp, op0, 0);
2456 emit_insn (gen_rtx_SET (VOIDmode, dest,
2457 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2458 gen_rtx_fmt_ee (compare_code, DImode,
2466 x = gen_compare_reg_1 (compare_code, x, y);
2469 gcc_assert (GET_MODE (x) != CC_NOOVmode
2470 && GET_MODE (x) != CCX_NOOVmode);
2472 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2473 emit_insn (gen_rtx_SET (VOIDmode, dest,
2474 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2475 gen_rtx_fmt_ee (compare_code,
2476 GET_MODE (x), x, y),
2477 const1_rtx, dest)));
2483 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2484 without jumps using the addx/subx instructions. */
2487 emit_scc_insn (rtx operands[])
2494 /* The quad-word fp compare library routines all return nonzero to indicate
2495 true, which is different from the equivalent libgcc routines, so we must
2496 handle them specially here. */
2497 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2499 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2500 GET_CODE (operands[1]));
2501 operands[2] = XEXP (operands[1], 0);
2502 operands[3] = XEXP (operands[1], 1);
2505 code = GET_CODE (operands[1]);
2509 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2510 more applications). The exception to this is "reg != 0" which can
2511 be done in one instruction on v9 (so we do it). */
2514 if (GET_MODE (x) == SImode)
2516 rtx pat = gen_seqsi_special (operands[0], x, y);
2520 else if (GET_MODE (x) == DImode)
2522 rtx pat = gen_seqdi_special (operands[0], x, y);
2530 if (GET_MODE (x) == SImode)
2532 rtx pat = gen_snesi_special (operands[0], x, y);
2536 else if (GET_MODE (x) == DImode)
2540 pat = gen_snedi_special_vis3 (operands[0], x, y);
2542 pat = gen_snedi_special (operands[0], x, y);
2550 && GET_MODE (x) == DImode
2552 && (code == GTU || code == LTU))
2553 && gen_v9_scc (operands[0], code, x, y))
2556 /* We can do LTU and GEU using the addx/subx instructions too. And
2557 for GTU/LEU, if both operands are registers swap them and fall
2558 back to the easy case. */
2559 if (code == GTU || code == LEU)
2561 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2562 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2567 code = swap_condition (code);
2572 || (!TARGET_VIS3 && code == GEU))
2574 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2575 gen_rtx_fmt_ee (code, SImode,
2576 gen_compare_reg_1 (code, x, y),
2581 /* All the posibilities to use addx/subx based sequences has been
2582 exhausted, try for a 3 instruction sequence using v9 conditional
2584 if (TARGET_V9 && gen_v9_scc (operands[0], code, x, y))
2587 /* Nope, do branches. */
2591 /* Emit a conditional jump insn for the v9 architecture using comparison code
2592 CODE and jump target LABEL.
2593 This function exists to take advantage of the v9 brxx insns. */
2596 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2598 emit_jump_insn (gen_rtx_SET (VOIDmode,
2600 gen_rtx_IF_THEN_ELSE (VOIDmode,
2601 gen_rtx_fmt_ee (code, GET_MODE (op0),
2603 gen_rtx_LABEL_REF (VOIDmode, label),
2608 emit_conditional_branch_insn (rtx operands[])
2610 /* The quad-word fp compare library routines all return nonzero to indicate
2611 true, which is different from the equivalent libgcc routines, so we must
2612 handle them specially here. */
2613 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2615 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2616 GET_CODE (operands[0]));
2617 operands[1] = XEXP (operands[0], 0);
2618 operands[2] = XEXP (operands[0], 1);
2621 if (TARGET_ARCH64 && operands[2] == const0_rtx
2622 && GET_CODE (operands[1]) == REG
2623 && GET_MODE (operands[1]) == DImode)
2625 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2629 operands[1] = gen_compare_reg (operands[0]);
2630 operands[2] = const0_rtx;
2631 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2632 operands[1], operands[2]);
2633 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2638 /* Generate a DFmode part of a hard TFmode register.
2639 REG is the TFmode hard register, LOW is 1 for the
2640 low 64bit of the register and 0 otherwise.
2643 gen_df_reg (rtx reg, int low)
2645 int regno = REGNO (reg);
2647 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2648 regno += (TARGET_ARCH64 && SPARC_INT_REG_P (regno)) ? 1 : 2;
2649 return gen_rtx_REG (DFmode, regno);
2652 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2653 Unlike normal calls, TFmode operands are passed by reference. It is
2654 assumed that no more than 3 operands are required. */
2657 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2659 rtx ret_slot = NULL, arg[3], func_sym;
2662 /* We only expect to be called for conversions, unary, and binary ops. */
2663 gcc_assert (nargs == 2 || nargs == 3);
2665 for (i = 0; i < nargs; ++i)
2667 rtx this_arg = operands[i];
2670 /* TFmode arguments and return values are passed by reference. */
2671 if (GET_MODE (this_arg) == TFmode)
2673 int force_stack_temp;
2675 force_stack_temp = 0;
2676 if (TARGET_BUGGY_QP_LIB && i == 0)
2677 force_stack_temp = 1;
2679 if (GET_CODE (this_arg) == MEM
2680 && ! force_stack_temp)
2681 this_arg = XEXP (this_arg, 0);
2682 else if (CONSTANT_P (this_arg)
2683 && ! force_stack_temp)
2685 this_slot = force_const_mem (TFmode, this_arg);
2686 this_arg = XEXP (this_slot, 0);
2690 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2692 /* Operand 0 is the return value. We'll copy it out later. */
2694 emit_move_insn (this_slot, this_arg);
2696 ret_slot = this_slot;
2698 this_arg = XEXP (this_slot, 0);
2705 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2707 if (GET_MODE (operands[0]) == TFmode)
2710 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2711 arg[0], GET_MODE (arg[0]),
2712 arg[1], GET_MODE (arg[1]));
2714 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2715 arg[0], GET_MODE (arg[0]),
2716 arg[1], GET_MODE (arg[1]),
2717 arg[2], GET_MODE (arg[2]));
2720 emit_move_insn (operands[0], ret_slot);
2726 gcc_assert (nargs == 2);
2728 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2729 GET_MODE (operands[0]), 1,
2730 arg[1], GET_MODE (arg[1]));
2732 if (ret != operands[0])
2733 emit_move_insn (operands[0], ret);
2737 /* Expand soft-float TFmode calls to sparc abi routines. */
2740 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2762 emit_soft_tfmode_libcall (func, 3, operands);
2766 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2770 gcc_assert (code == SQRT);
2773 emit_soft_tfmode_libcall (func, 2, operands);
2777 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2784 switch (GET_MODE (operands[1]))
2797 case FLOAT_TRUNCATE:
2798 switch (GET_MODE (operands[0]))
2812 switch (GET_MODE (operands[1]))
2817 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2827 case UNSIGNED_FLOAT:
2828 switch (GET_MODE (operands[1]))
2833 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2844 switch (GET_MODE (operands[0]))
2858 switch (GET_MODE (operands[0]))
2875 emit_soft_tfmode_libcall (func, 2, operands);
2878 /* Expand a hard-float tfmode operation. All arguments must be in
2882 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2886 if (GET_RTX_CLASS (code) == RTX_UNARY)
2888 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2889 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2893 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2894 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2895 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2896 operands[1], operands[2]);
2899 if (register_operand (operands[0], VOIDmode))
2902 dest = gen_reg_rtx (GET_MODE (operands[0]));
2904 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2906 if (dest != operands[0])
2907 emit_move_insn (operands[0], dest);
2911 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2913 if (TARGET_HARD_QUAD)
2914 emit_hard_tfmode_operation (code, operands);
2916 emit_soft_tfmode_binop (code, operands);
2920 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2922 if (TARGET_HARD_QUAD)
2923 emit_hard_tfmode_operation (code, operands);
2925 emit_soft_tfmode_unop (code, operands);
2929 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2931 if (TARGET_HARD_QUAD)
2932 emit_hard_tfmode_operation (code, operands);
2934 emit_soft_tfmode_cvt (code, operands);
2937 /* Return nonzero if a branch/jump/call instruction will be emitting
2938 nop into its delay slot. */
2941 empty_delay_slot (rtx insn)
2945 /* If no previous instruction (should not happen), return true. */
2946 if (PREV_INSN (insn) == NULL)
2949 seq = NEXT_INSN (PREV_INSN (insn));
2950 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2956 /* Return nonzero if TRIAL can go into the call delay slot. */
2959 tls_call_delay (rtx trial)
2964 call __tls_get_addr, %tgd_call (foo)
2965 add %l7, %o0, %o0, %tgd_add (foo)
2966 while Sun as/ld does not. */
2967 if (TARGET_GNU_TLS || !TARGET_TLS)
2970 pat = PATTERN (trial);
2972 /* We must reject tgd_add{32|64}, i.e.
2973 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2974 and tldm_add{32|64}, i.e.
2975 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2977 if (GET_CODE (pat) == SET
2978 && GET_CODE (SET_SRC (pat)) == PLUS)
2980 rtx unspec = XEXP (SET_SRC (pat), 1);
2982 if (GET_CODE (unspec) == UNSPEC
2983 && (XINT (unspec, 1) == UNSPEC_TLSGD
2984 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2991 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2992 instruction. RETURN_P is true if the v9 variant 'return' is to be
2993 considered in the test too.
2995 TRIAL must be a SET whose destination is a REG appropriate for the
2996 'restore' instruction or, if RETURN_P is true, for the 'return'
3000 eligible_for_restore_insn (rtx trial, bool return_p)
3002 rtx pat = PATTERN (trial);
3003 rtx src = SET_SRC (pat);
3004 bool src_is_freg = false;
3007 /* Since we now can do moves between float and integer registers when
3008 VIS3 is enabled, we have to catch this case. We can allow such
3009 moves when doing a 'return' however. */
3011 if (GET_CODE (src_reg) == SUBREG)
3012 src_reg = SUBREG_REG (src_reg);
3013 if (GET_CODE (src_reg) == REG
3014 && SPARC_FP_REG_P (REGNO (src_reg)))
3017 /* The 'restore src,%g0,dest' pattern for word mode and below. */
3018 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3019 && arith_operand (src, GET_MODE (src))
3023 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3025 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
3028 /* The 'restore src,%g0,dest' pattern for double-word mode. */
3029 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3030 && arith_double_operand (src, GET_MODE (src))
3032 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3034 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
3035 else if (! TARGET_FPU && register_operand (src, SFmode))
3038 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
3039 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
3042 /* If we have the 'return' instruction, anything that does not use
3043 local or output registers and can go into a delay slot wins. */
3046 && !epilogue_renumber (&pat, 1)
3047 && get_attr_in_uncond_branch_delay (trial)
3048 == IN_UNCOND_BRANCH_DELAY_TRUE)
3051 /* The 'restore src1,src2,dest' pattern for SImode. */
3052 else if (GET_CODE (src) == PLUS
3053 && register_operand (XEXP (src, 0), SImode)
3054 && arith_operand (XEXP (src, 1), SImode))
3057 /* The 'restore src1,src2,dest' pattern for DImode. */
3058 else if (GET_CODE (src) == PLUS
3059 && register_operand (XEXP (src, 0), DImode)
3060 && arith_double_operand (XEXP (src, 1), DImode))
3063 /* The 'restore src1,%lo(src2),dest' pattern. */
3064 else if (GET_CODE (src) == LO_SUM
3065 && ! TARGET_CM_MEDMID
3066 && ((register_operand (XEXP (src, 0), SImode)
3067 && immediate_operand (XEXP (src, 1), SImode))
3069 && register_operand (XEXP (src, 0), DImode)
3070 && immediate_operand (XEXP (src, 1), DImode))))
3073 /* The 'restore src,src,dest' pattern. */
3074 else if (GET_CODE (src) == ASHIFT
3075 && (register_operand (XEXP (src, 0), SImode)
3076 || register_operand (XEXP (src, 0), DImode))
3077 && XEXP (src, 1) == const1_rtx)
3083 /* Return nonzero if TRIAL can go into the function return's delay slot. */
3086 eligible_for_return_delay (rtx trial)
3091 if (GET_CODE (trial) != INSN)
3094 if (get_attr_length (trial) != 1)
3097 /* If the function uses __builtin_eh_return, the eh_return machinery
3098 occupies the delay slot. */
3099 if (crtl->calls_eh_return)
3102 /* In the case of a leaf or flat function, anything can go into the slot. */
3103 if (sparc_leaf_function_p || TARGET_FLAT)
3105 get_attr_in_uncond_branch_delay (trial) == IN_UNCOND_BRANCH_DELAY_TRUE;
3107 pat = PATTERN (trial);
3108 if (GET_CODE (pat) == PARALLEL)
3114 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
3116 rtx expr = XVECEXP (pat, 0, i);
3117 if (GET_CODE (expr) != SET)
3119 if (GET_CODE (SET_DEST (expr)) != REG)
3121 regno = REGNO (SET_DEST (expr));
3122 if (regno >= 8 && regno < 24)
3125 return !epilogue_renumber (&pat, 1)
3126 && (get_attr_in_uncond_branch_delay (trial)
3127 == IN_UNCOND_BRANCH_DELAY_TRUE);
3130 if (GET_CODE (pat) != SET)
3133 if (GET_CODE (SET_DEST (pat)) != REG)
3136 regno = REGNO (SET_DEST (pat));
3138 /* Otherwise, only operations which can be done in tandem with
3139 a `restore' or `return' insn can go into the delay slot. */
3140 if (regno >= 8 && regno < 24)
3143 /* If this instruction sets up floating point register and we have a return
3144 instruction, it can probably go in. But restore will not work
3146 if (! SPARC_INT_REG_P (regno))
3148 && !epilogue_renumber (&pat, 1)
3149 && get_attr_in_uncond_branch_delay (trial)
3150 == IN_UNCOND_BRANCH_DELAY_TRUE);
3152 return eligible_for_restore_insn (trial, true);
3155 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
3158 eligible_for_sibcall_delay (rtx trial)
3162 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
3165 if (get_attr_length (trial) != 1)
3168 pat = PATTERN (trial);
3170 if (sparc_leaf_function_p || TARGET_FLAT)
3172 /* If the tail call is done using the call instruction,
3173 we have to restore %o7 in the delay slot. */
3174 if (LEAF_SIBCALL_SLOT_RESERVED_P)
3177 /* %g1 is used to build the function address */
3178 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
3184 /* Otherwise, only operations which can be done in tandem with
3185 a `restore' insn can go into the delay slot. */
3186 if (GET_CODE (SET_DEST (pat)) != REG
3187 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
3188 || ! SPARC_INT_REG_P (REGNO (SET_DEST (pat))))
3191 /* If it mentions %o7, it can't go in, because sibcall will clobber it
3193 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
3196 return eligible_for_restore_insn (trial, false);
3199 /* Determine if it's legal to put X into the constant pool. This
3200 is not possible if X contains the address of a symbol that is
3201 not constant (TLS) or not known at final link time (PIC). */
3204 sparc_cannot_force_const_mem (enum machine_mode mode, rtx x)
3206 switch (GET_CODE (x))
3211 /* Accept all non-symbolic constants. */
3215 /* Labels are OK iff we are non-PIC. */
3216 return flag_pic != 0;
3219 /* 'Naked' TLS symbol references are never OK,
3220 non-TLS symbols are OK iff we are non-PIC. */
3221 if (SYMBOL_REF_TLS_MODEL (x))
3224 return flag_pic != 0;
3227 return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
3230 return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
3231 || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
3239 /* Global Offset Table support. */
3240 static GTY(()) rtx got_helper_rtx = NULL_RTX;
3241 static GTY(()) rtx global_offset_table_rtx = NULL_RTX;
3243 /* Return the SYMBOL_REF for the Global Offset Table. */
3245 static GTY(()) rtx sparc_got_symbol = NULL_RTX;
3250 if (!sparc_got_symbol)
3251 sparc_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3253 return sparc_got_symbol;
3256 /* Ensure that we are not using patterns that are not OK with PIC. */
3266 op = recog_data.operand[i];
3267 gcc_assert (GET_CODE (op) != SYMBOL_REF
3268 && (GET_CODE (op) != CONST
3269 || (GET_CODE (XEXP (op, 0)) == MINUS
3270 && XEXP (XEXP (op, 0), 0) == sparc_got ()
3271 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
3278 /* Return true if X is an address which needs a temporary register when
3279 reloaded while generating PIC code. */
3282 pic_address_needs_scratch (rtx x)
3284 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
3285 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
3286 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
3287 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3288 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
3294 /* Determine if a given RTX is a valid constant. We already know this
3295 satisfies CONSTANT_P. */
3298 sparc_legitimate_constant_p (enum machine_mode mode, rtx x)
3300 switch (GET_CODE (x))
3304 if (sparc_tls_referenced_p (x))
3309 if (GET_MODE (x) == VOIDmode)
3312 /* Floating point constants are generally not ok.
3313 The only exception is 0.0 and all-ones in VIS. */
3315 && SCALAR_FLOAT_MODE_P (mode)
3316 && (const_zero_operand (x, mode)
3317 || const_all_ones_operand (x, mode)))
3323 /* Vector constants are generally not ok.
3324 The only exception is 0 or -1 in VIS. */
3326 && (const_zero_operand (x, mode)
3327 || const_all_ones_operand (x, mode)))
3339 /* Determine if a given RTX is a valid constant address. */
3342 constant_address_p (rtx x)
3344 switch (GET_CODE (x))
3352 if (flag_pic && pic_address_needs_scratch (x))
3354 return sparc_legitimate_constant_p (Pmode, x);
3357 return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
3364 /* Nonzero if the constant value X is a legitimate general operand
3365 when generating PIC code. It is given that flag_pic is on and
3366 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3369 legitimate_pic_operand_p (rtx x)
3371 if (pic_address_needs_scratch (x))
3373 if (sparc_tls_referenced_p (x))
3378 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
3380 && INTVAL (X) >= -0x1000 \
3381 && INTVAL (X) < (0x1000 - GET_MODE_SIZE (MODE)))
3383 #define RTX_OK_FOR_OLO10_P(X, MODE) \
3385 && INTVAL (X) >= -0x1000 \
3386 && INTVAL (X) < (0xc00 - GET_MODE_SIZE (MODE)))
3388 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
3390 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
3391 ordinarily. This changes a bit when generating PIC. */
3394 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3396 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3398 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3400 else if (GET_CODE (addr) == PLUS)
3402 rs1 = XEXP (addr, 0);
3403 rs2 = XEXP (addr, 1);
3405 /* Canonicalize. REG comes first, if there are no regs,
3406 LO_SUM comes first. */
3408 && GET_CODE (rs1) != SUBREG
3410 || GET_CODE (rs2) == SUBREG
3411 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3413 rs1 = XEXP (addr, 1);
3414 rs2 = XEXP (addr, 0);
3418 && rs1 == pic_offset_table_rtx
3420 && GET_CODE (rs2) != SUBREG
3421 && GET_CODE (rs2) != LO_SUM
3422 && GET_CODE (rs2) != MEM
3423 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
3424 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3425 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3427 || GET_CODE (rs1) == SUBREG)
3428 && RTX_OK_FOR_OFFSET_P (rs2, mode)))
3433 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3434 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3436 /* We prohibit REG + REG for TFmode when there are no quad move insns
3437 and we consequently need to split. We do this because REG+REG
3438 is not an offsettable address. If we get the situation in reload
3439 where source and destination of a movtf pattern are both MEMs with
3440 REG+REG address, then only one of them gets converted to an
3441 offsettable address. */
3443 && ! (TARGET_ARCH64 && TARGET_HARD_QUAD))
3446 /* We prohibit REG + REG on ARCH32 if not optimizing for
3447 DFmode/DImode because then mem_min_alignment is likely to be zero
3448 after reload and the forced split would lack a matching splitter
3450 if (TARGET_ARCH32 && !optimize
3451 && (mode == DFmode || mode == DImode))
3454 else if (USE_AS_OFFSETABLE_LO10
3455 && GET_CODE (rs1) == LO_SUM
3457 && ! TARGET_CM_MEDMID
3458 && RTX_OK_FOR_OLO10_P (rs2, mode))
3461 imm1 = XEXP (rs1, 1);
3462 rs1 = XEXP (rs1, 0);
3463 if (!CONSTANT_P (imm1)
3464 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3468 else if (GET_CODE (addr) == LO_SUM)
3470 rs1 = XEXP (addr, 0);
3471 imm1 = XEXP (addr, 1);
3473 if (!CONSTANT_P (imm1)
3474 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3477 /* We can't allow TFmode in 32-bit mode, because an offset greater
3478 than the alignment (8) may cause the LO_SUM to overflow. */
3479 if (mode == TFmode && TARGET_ARCH32)
3482 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3487 if (GET_CODE (rs1) == SUBREG)
3488 rs1 = SUBREG_REG (rs1);
3494 if (GET_CODE (rs2) == SUBREG)
3495 rs2 = SUBREG_REG (rs2);
3502 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3503 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3508 if ((! SPARC_INT_REG_P (REGNO (rs1))
3509 && REGNO (rs1) != FRAME_POINTER_REGNUM
3510 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3512 && (! SPARC_INT_REG_P (REGNO (rs2))
3513 && REGNO (rs2) != FRAME_POINTER_REGNUM
3514 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3520 /* Return the SYMBOL_REF for the tls_get_addr function. */
3522 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
3525 sparc_tls_get_addr (void)
3527 if (!sparc_tls_symbol)
3528 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3530 return sparc_tls_symbol;
3533 /* Return the Global Offset Table to be used in TLS mode. */
3536 sparc_tls_got (void)
3538 /* In PIC mode, this is just the PIC offset table. */
3541 crtl->uses_pic_offset_table = 1;
3542 return pic_offset_table_rtx;
3545 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
3546 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
3547 if (TARGET_SUN_TLS && TARGET_ARCH32)
3549 load_got_register ();
3550 return global_offset_table_rtx;
3553 /* In all other cases, we load a new pseudo with the GOT symbol. */
3554 return copy_to_reg (sparc_got ());
3557 /* Return true if X contains a thread-local symbol. */
3560 sparc_tls_referenced_p (rtx x)
3562 if (!TARGET_HAVE_TLS)
3565 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3566 x = XEXP (XEXP (x, 0), 0);
3568 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
3571 /* That's all we handle in sparc_legitimize_tls_address for now. */
3575 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3576 this (thread-local) address. */
3579 sparc_legitimize_tls_address (rtx addr)
3581 rtx temp1, temp2, temp3, ret, o0, got, insn;
3583 gcc_assert (can_create_pseudo_p ());
3585 if (GET_CODE (addr) == SYMBOL_REF)
3586 switch (SYMBOL_REF_TLS_MODEL (addr))
3588 case TLS_MODEL_GLOBAL_DYNAMIC:
3590 temp1 = gen_reg_rtx (SImode);
3591 temp2 = gen_reg_rtx (SImode);
3592 ret = gen_reg_rtx (Pmode);
3593 o0 = gen_rtx_REG (Pmode, 8);
3594 got = sparc_tls_got ();
3595 emit_insn (gen_tgd_hi22 (temp1, addr));
3596 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3599 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3600 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3605 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3606 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3609 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3610 insn = get_insns ();
3612 emit_libcall_block (insn, ret, o0, addr);
3615 case TLS_MODEL_LOCAL_DYNAMIC:
3617 temp1 = gen_reg_rtx (SImode);
3618 temp2 = gen_reg_rtx (SImode);
3619 temp3 = gen_reg_rtx (Pmode);
3620 ret = gen_reg_rtx (Pmode);
3621 o0 = gen_rtx_REG (Pmode, 8);
3622 got = sparc_tls_got ();
3623 emit_insn (gen_tldm_hi22 (temp1));
3624 emit_insn (gen_tldm_lo10 (temp2, temp1));
3627 emit_insn (gen_tldm_add32 (o0, got, temp2));
3628 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3633 emit_insn (gen_tldm_add64 (o0, got, temp2));
3634 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3637 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3638 insn = get_insns ();
3640 emit_libcall_block (insn, temp3, o0,
3641 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3642 UNSPEC_TLSLD_BASE));
3643 temp1 = gen_reg_rtx (SImode);
3644 temp2 = gen_reg_rtx (SImode);
3645 emit_insn (gen_tldo_hix22 (temp1, addr));
3646 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3648 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3650 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3653 case TLS_MODEL_INITIAL_EXEC:
3654 temp1 = gen_reg_rtx (SImode);
3655 temp2 = gen_reg_rtx (SImode);
3656 temp3 = gen_reg_rtx (Pmode);
3657 got = sparc_tls_got ();
3658 emit_insn (gen_tie_hi22 (temp1, addr));
3659 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3661 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3663 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3666 ret = gen_reg_rtx (Pmode);
3668 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3671 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3675 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3678 case TLS_MODEL_LOCAL_EXEC:
3679 temp1 = gen_reg_rtx (Pmode);
3680 temp2 = gen_reg_rtx (Pmode);
3683 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3684 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3688 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3689 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3691 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3698 else if (GET_CODE (addr) == CONST)
3702 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
3704 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
3705 offset = XEXP (XEXP (addr, 0), 1);
3707 base = force_operand (base, NULL_RTX);
3708 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
3709 offset = force_reg (Pmode, offset);
3710 ret = gen_rtx_PLUS (Pmode, base, offset);
3714 gcc_unreachable (); /* for now ... */
3719 /* Legitimize PIC addresses. If the address is already position-independent,
3720 we return ORIG. Newly generated position-independent addresses go into a
3721 reg. This is REG if nonzero, otherwise we allocate register(s) as
3725 sparc_legitimize_pic_address (rtx orig, rtx reg)
3727 bool gotdata_op = false;
3729 if (GET_CODE (orig) == SYMBOL_REF
3730 /* See the comment in sparc_expand_move. */
3731 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
3733 rtx pic_ref, address;
3738 gcc_assert (can_create_pseudo_p ());
3739 reg = gen_reg_rtx (Pmode);
3744 /* If not during reload, allocate another temp reg here for loading
3745 in the address, so that these instructions can be optimized
3747 rtx temp_reg = (! can_create_pseudo_p ()
3748 ? reg : gen_reg_rtx (Pmode));
3750 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3751 won't get confused into thinking that these two instructions
3752 are loading in the true address of the symbol. If in the
3753 future a PIC rtx exists, that should be used instead. */
3756 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3757 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3761 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3762 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3770 crtl->uses_pic_offset_table = 1;
3774 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
3775 pic_offset_table_rtx,
3778 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
3779 pic_offset_table_rtx,
3785 = gen_const_mem (Pmode,
3786 gen_rtx_PLUS (Pmode,
3787 pic_offset_table_rtx, address));
3788 insn = emit_move_insn (reg, pic_ref);
3791 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3793 set_unique_reg_note (insn, REG_EQUAL, orig);
3796 else if (GET_CODE (orig) == CONST)
3800 if (GET_CODE (XEXP (orig, 0)) == PLUS
3801 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3806 gcc_assert (can_create_pseudo_p ());
3807 reg = gen_reg_rtx (Pmode);
3810 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3811 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
3812 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
3813 base == reg ? NULL_RTX : reg);
3815 if (GET_CODE (offset) == CONST_INT)
3817 if (SMALL_INT (offset))
3818 return plus_constant (base, INTVAL (offset));
3819 else if (can_create_pseudo_p ())
3820 offset = force_reg (Pmode, offset);
3822 /* If we reach here, then something is seriously wrong. */
3825 return gen_rtx_PLUS (Pmode, base, offset);
3827 else if (GET_CODE (orig) == LABEL_REF)
3828 /* ??? We ought to be checking that the register is live instead, in case
3829 it is eliminated. */
3830 crtl->uses_pic_offset_table = 1;
3835 /* Try machine-dependent ways of modifying an illegitimate address X
3836 to be legitimate. If we find one, return the new, valid address.
3838 OLDX is the address as it was before break_out_memory_refs was called.
3839 In some cases it is useful to look at this to decide what needs to be done.
3841 MODE is the mode of the operand pointed to by X.
3843 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3846 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3847 enum machine_mode mode)
3851 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3852 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3853 force_operand (XEXP (x, 0), NULL_RTX));
3854 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3855 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3856 force_operand (XEXP (x, 1), NULL_RTX));
3857 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3858 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3860 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3861 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3862 force_operand (XEXP (x, 1), NULL_RTX));
3864 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3867 if (sparc_tls_referenced_p (x))
3868 x = sparc_legitimize_tls_address (x);
3870 x = sparc_legitimize_pic_address (x, NULL_RTX);
3871 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3872 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3873 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3874 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3875 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3876 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3877 else if (GET_CODE (x) == SYMBOL_REF
3878 || GET_CODE (x) == CONST
3879 || GET_CODE (x) == LABEL_REF)
3880 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3885 /* Delegitimize an address that was legitimized by the above function. */
3888 sparc_delegitimize_address (rtx x)
3890 x = delegitimize_mem_from_attrs (x);
3892 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 1)) == UNSPEC)
3893 switch (XINT (XEXP (x, 1), 1))
3895 case UNSPEC_MOVE_PIC:
3897 x = XVECEXP (XEXP (x, 1), 0, 0);
3898 gcc_assert (GET_CODE (x) == SYMBOL_REF);
3904 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
3905 if (GET_CODE (x) == MINUS
3906 && REG_P (XEXP (x, 0))
3907 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
3908 && GET_CODE (XEXP (x, 1)) == LO_SUM
3909 && GET_CODE (XEXP (XEXP (x, 1), 1)) == UNSPEC
3910 && XINT (XEXP (XEXP (x, 1), 1), 1) == UNSPEC_MOVE_PIC_LABEL)
3912 x = XVECEXP (XEXP (XEXP (x, 1), 1), 0, 0);
3913 gcc_assert (GET_CODE (x) == LABEL_REF);
3919 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
3920 replace the input X, or the original X if no replacement is called for.
3921 The output parameter *WIN is 1 if the calling macro should goto WIN,
3924 For SPARC, we wish to handle addresses by splitting them into
3925 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
3926 This cuts the number of extra insns by one.
3928 Do nothing when generating PIC code and the address is a symbolic
3929 operand or requires a scratch register. */
3932 sparc_legitimize_reload_address (rtx x, enum machine_mode mode,
3933 int opnum, int type,
3934 int ind_levels ATTRIBUTE_UNUSED, int *win)
3936 /* Decompose SImode constants into HIGH+LO_SUM. */
3938 && (mode != TFmode || TARGET_ARCH64)
3939 && GET_MODE (x) == SImode
3940 && GET_CODE (x) != LO_SUM
3941 && GET_CODE (x) != HIGH
3942 && sparc_cmodel <= CM_MEDLOW
3944 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
3946 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
3947 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3948 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3949 opnum, (enum reload_type)type);
3954 /* We have to recognize what we have already generated above. */
3955 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
3957 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3958 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3959 opnum, (enum reload_type)type);
3968 /* Return true if ADDR (a legitimate address expression)
3969 has an effect that depends on the machine mode it is used for.
3975 is not equivalent to
3977 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
3979 because [%l7+a+1] is interpreted as the address of (a+1). */
3983 sparc_mode_dependent_address_p (const_rtx addr)
3985 if (flag_pic && GET_CODE (addr) == PLUS)
3987 rtx op0 = XEXP (addr, 0);
3988 rtx op1 = XEXP (addr, 1);
3989 if (op0 == pic_offset_table_rtx
3990 && symbolic_operand (op1, VOIDmode))
3997 #ifdef HAVE_GAS_HIDDEN
3998 # define USE_HIDDEN_LINKONCE 1
4000 # define USE_HIDDEN_LINKONCE 0
4004 get_pc_thunk_name (char name[32], unsigned int regno)
4006 const char *reg_name = reg_names[regno];
4008 /* Skip the leading '%' as that cannot be used in a
4012 if (USE_HIDDEN_LINKONCE)
4013 sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
4015 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
4018 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
4021 gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2, rtx op3)
4023 int orig_flag_pic = flag_pic;
4026 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
4029 insn = gen_load_pcrel_symdi (op0, op1, op2, op3);
4031 insn = gen_load_pcrel_symsi (op0, op1, op2, op3);
4032 flag_pic = orig_flag_pic;
4037 /* Emit code to load the GOT register. */
4040 load_got_register (void)
4042 /* In PIC mode, this will retrieve pic_offset_table_rtx. */
4043 if (!global_offset_table_rtx)
4044 global_offset_table_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
4046 if (TARGET_VXWORKS_RTP)
4047 emit_insn (gen_vxworks_load_got ());
4050 /* The GOT symbol is subject to a PC-relative relocation so we need a
4051 helper function to add the PC value and thus get the final value. */
4052 if (!got_helper_rtx)
4055 get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
4056 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4059 emit_insn (gen_load_pcrel_sym (global_offset_table_rtx, sparc_got (),
4061 GEN_INT (GLOBAL_OFFSET_TABLE_REGNUM)));
4064 /* Need to emit this whether or not we obey regdecls,
4065 since setjmp/longjmp can cause life info to screw up.
4066 ??? In the case where we don't obey regdecls, this is not sufficient
4067 since we may not fall out the bottom. */
4068 emit_use (global_offset_table_rtx);
4071 /* Emit a call instruction with the pattern given by PAT. ADDR is the
4072 address of the call target. */
4075 sparc_emit_call_insn (rtx pat, rtx addr)
4079 insn = emit_call_insn (pat);
4081 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
4082 if (TARGET_VXWORKS_RTP
4084 && GET_CODE (addr) == SYMBOL_REF
4085 && (SYMBOL_REF_DECL (addr)
4086 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
4087 : !SYMBOL_REF_LOCAL_P (addr)))
4089 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
4090 crtl->uses_pic_offset_table = 1;
4094 /* Return 1 if RTX is a MEM which is known to be aligned to at
4095 least a DESIRED byte boundary. */
4098 mem_min_alignment (rtx mem, int desired)
4100 rtx addr, base, offset;
4102 /* If it's not a MEM we can't accept it. */
4103 if (GET_CODE (mem) != MEM)
4107 if (!TARGET_UNALIGNED_DOUBLES
4108 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
4111 /* ??? The rest of the function predates MEM_ALIGN so
4112 there is probably a bit of redundancy. */
4113 addr = XEXP (mem, 0);
4114 base = offset = NULL_RTX;
4115 if (GET_CODE (addr) == PLUS)
4117 if (GET_CODE (XEXP (addr, 0)) == REG)
4119 base = XEXP (addr, 0);
4121 /* What we are saying here is that if the base
4122 REG is aligned properly, the compiler will make
4123 sure any REG based index upon it will be so
4125 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
4126 offset = XEXP (addr, 1);
4128 offset = const0_rtx;
4131 else if (GET_CODE (addr) == REG)
4134 offset = const0_rtx;
4137 if (base != NULL_RTX)
4139 int regno = REGNO (base);
4141 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
4143 /* Check if the compiler has recorded some information
4144 about the alignment of the base REG. If reload has
4145 completed, we already matched with proper alignments.
4146 If not running global_alloc, reload might give us
4147 unaligned pointer to local stack though. */
4149 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
4150 || (optimize && reload_completed))
4151 && (INTVAL (offset) & (desired - 1)) == 0)
4156 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
4160 else if (! TARGET_UNALIGNED_DOUBLES
4161 || CONSTANT_P (addr)
4162 || GET_CODE (addr) == LO_SUM)
4164 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
4165 is true, in which case we can only assume that an access is aligned if
4166 it is to a constant address, or the address involves a LO_SUM. */
4170 /* An obviously unaligned address. */
4175 /* Vectors to keep interesting information about registers where it can easily
4176 be got. We used to use the actual mode value as the bit number, but there
4177 are more than 32 modes now. Instead we use two tables: one indexed by
4178 hard register number, and one indexed by mode. */
4180 /* The purpose of sparc_mode_class is to shrink the range of modes so that
4181 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
4182 mapped into one sparc_mode_class mode. */
4184 enum sparc_mode_class {
4185 S_MODE, D_MODE, T_MODE, O_MODE,
4186 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
4190 /* Modes for single-word and smaller quantities. */
4191 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
4193 /* Modes for double-word and smaller quantities. */
4194 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
4196 /* Modes for quad-word and smaller quantities. */
4197 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
4199 /* Modes for 8-word and smaller quantities. */
4200 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
4202 /* Modes for single-float quantities. We must allow any single word or
4203 smaller quantity. This is because the fix/float conversion instructions
4204 take integer inputs/outputs from the float registers. */
4205 #define SF_MODES (S_MODES)
4207 /* Modes for double-float and smaller quantities. */
4208 #define DF_MODES (D_MODES)
4210 /* Modes for quad-float and smaller quantities. */
4211 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
4213 /* Modes for quad-float pairs and smaller quantities. */
4214 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
4216 /* Modes for double-float only quantities. */
4217 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
4219 /* Modes for quad-float and double-float only quantities. */
4220 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
4222 /* Modes for quad-float pairs and double-float only quantities. */
4223 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
4225 /* Modes for condition codes. */
4226 #define CC_MODES (1 << (int) CC_MODE)
4227 #define CCFP_MODES (1 << (int) CCFP_MODE)
4229 /* Value is 1 if register/mode pair is acceptable on sparc.
4230 The funny mixture of D and T modes is because integer operations
4231 do not specially operate on tetra quantities, so non-quad-aligned
4232 registers can hold quadword quantities (except %o4 and %i4 because
4233 they cross fixed registers). */
4235 /* This points to either the 32 bit or the 64 bit version. */
4236 const int *hard_regno_mode_classes;
4238 static const int hard_32bit_mode_classes[] = {
4239 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4240 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4241 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4242 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4244 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4245 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4246 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4247 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4249 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4250 and none can hold SFmode/SImode values. */
4251 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4252 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4253 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4254 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4257 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4259 /* %icc, %sfp, %gsr */
4260 CC_MODES, 0, D_MODES
4263 static const int hard_64bit_mode_classes[] = {
4264 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4265 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4266 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4267 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4269 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4270 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4271 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4272 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4274 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4275 and none can hold SFmode/SImode values. */
4276 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4277 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4278 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4279 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4282 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4284 /* %icc, %sfp, %gsr */
4285 CC_MODES, 0, D_MODES
4288 int sparc_mode_class [NUM_MACHINE_MODES];
4290 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
4293 sparc_init_modes (void)
4297 for (i = 0; i < NUM_MACHINE_MODES; i++)
4299 switch (GET_MODE_CLASS (i))
4302 case MODE_PARTIAL_INT:
4303 case MODE_COMPLEX_INT:
4304 if (GET_MODE_SIZE (i) <= 4)
4305 sparc_mode_class[i] = 1 << (int) S_MODE;
4306 else if (GET_MODE_SIZE (i) == 8)
4307 sparc_mode_class[i] = 1 << (int) D_MODE;
4308 else if (GET_MODE_SIZE (i) == 16)
4309 sparc_mode_class[i] = 1 << (int) T_MODE;
4310 else if (GET_MODE_SIZE (i) == 32)
4311 sparc_mode_class[i] = 1 << (int) O_MODE;
4313 sparc_mode_class[i] = 0;
4315 case MODE_VECTOR_INT:
4316 if (GET_MODE_SIZE (i) <= 4)
4317 sparc_mode_class[i] = 1 << (int)SF_MODE;
4318 else if (GET_MODE_SIZE (i) == 8)
4319 sparc_mode_class[i] = 1 << (int)DF_MODE;
4322 case MODE_COMPLEX_FLOAT:
4323 if (GET_MODE_SIZE (i) <= 4)
4324 sparc_mode_class[i] = 1 << (int) SF_MODE;
4325 else if (GET_MODE_SIZE (i) == 8)
4326 sparc_mode_class[i] = 1 << (int) DF_MODE;
4327 else if (GET_MODE_SIZE (i) == 16)
4328 sparc_mode_class[i] = 1 << (int) TF_MODE;
4329 else if (GET_MODE_SIZE (i) == 32)
4330 sparc_mode_class[i] = 1 << (int) OF_MODE;
4332 sparc_mode_class[i] = 0;
4335 if (i == (int) CCFPmode || i == (int) CCFPEmode)
4336 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
4338 sparc_mode_class[i] = 1 << (int) CC_MODE;
4341 sparc_mode_class[i] = 0;
4347 hard_regno_mode_classes = hard_64bit_mode_classes;
4349 hard_regno_mode_classes = hard_32bit_mode_classes;
4351 /* Initialize the array used by REGNO_REG_CLASS. */
4352 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4354 if (i < 16 && TARGET_V8PLUS)
4355 sparc_regno_reg_class[i] = I64_REGS;
4356 else if (i < 32 || i == FRAME_POINTER_REGNUM)
4357 sparc_regno_reg_class[i] = GENERAL_REGS;
4359 sparc_regno_reg_class[i] = FP_REGS;
4361 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
4363 sparc_regno_reg_class[i] = FPCC_REGS;
4365 sparc_regno_reg_class[i] = NO_REGS;
4369 /* Return whether REGNO, a global or FP register, must be saved/restored. */
4372 save_global_or_fp_reg_p (unsigned int regno,
4373 int leaf_function ATTRIBUTE_UNUSED)
4375 return !call_used_regs[regno] && df_regs_ever_live_p (regno);
4378 /* Return whether the return address register (%i7) is needed. */
4381 return_addr_reg_needed_p (int leaf_function)
4383 /* If it is live, for example because of __builtin_return_address (0). */
4384 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM))
4387 /* Otherwise, it is needed as save register if %o7 is clobbered. */
4389 /* Loading the GOT register clobbers %o7. */
4390 || crtl->uses_pic_offset_table
4391 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM))
4397 /* Return whether REGNO, a local or in register, must be saved/restored. */
4400 save_local_or_in_reg_p (unsigned int regno, int leaf_function)
4402 /* General case: call-saved registers live at some point. */
4403 if (!call_used_regs[regno] && df_regs_ever_live_p (regno))
4406 /* Frame pointer register (%fp) if needed. */
4407 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
4410 /* Return address register (%i7) if needed. */
4411 if (regno == RETURN_ADDR_REGNUM && return_addr_reg_needed_p (leaf_function))
4414 /* GOT register (%l7) if needed. */
4415 if (regno == PIC_OFFSET_TABLE_REGNUM && crtl->uses_pic_offset_table)
4418 /* If the function accesses prior frames, the frame pointer and the return
4419 address of the previous frame must be saved on the stack. */
4420 if (crtl->accesses_prior_frames
4421 && (regno == HARD_FRAME_POINTER_REGNUM || regno == RETURN_ADDR_REGNUM))
4427 /* Compute the frame size required by the function. This function is called
4428 during the reload pass and also by sparc_expand_prologue. */
4431 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function)
4433 HOST_WIDE_INT frame_size, apparent_frame_size;
4434 int args_size, n_global_fp_regs = 0;
4435 bool save_local_in_regs_p = false;
4438 /* If the function allocates dynamic stack space, the dynamic offset is
4439 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
4440 if (leaf_function && !cfun->calls_alloca)
4443 args_size = crtl->outgoing_args_size + REG_PARM_STACK_SPACE (cfun->decl);
4445 /* Calculate space needed for global registers. */
4447 for (i = 0; i < 8; i++)
4448 if (save_global_or_fp_reg_p (i, 0))
4449 n_global_fp_regs += 2;
4451 for (i = 0; i < 8; i += 2)
4452 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4453 n_global_fp_regs += 2;
4455 /* In the flat window model, find out which local and in registers need to
4456 be saved. We don't reserve space in the current frame for them as they
4457 will be spilled into the register window save area of the caller's frame.
4458 However, as soon as we use this register window save area, we must create
4459 that of the current frame to make it the live one. */
4461 for (i = 16; i < 32; i++)
4462 if (save_local_or_in_reg_p (i, leaf_function))
4464 save_local_in_regs_p = true;
4468 /* Calculate space needed for FP registers. */
4469 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
4470 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4471 n_global_fp_regs += 2;
4474 && n_global_fp_regs == 0
4476 && !save_local_in_regs_p)
4477 frame_size = apparent_frame_size = 0;
4480 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
4481 apparent_frame_size = (size - STARTING_FRAME_OFFSET + 7) & -8;
4482 apparent_frame_size += n_global_fp_regs * 4;
4484 /* We need to add the size of the outgoing argument area. */
4485 frame_size = apparent_frame_size + ((args_size + 7) & -8);
4487 /* And that of the register window save area. */
4488 frame_size += FIRST_PARM_OFFSET (cfun->decl);
4490 /* Finally, bump to the appropriate alignment. */
4491 frame_size = SPARC_STACK_ALIGN (frame_size);
4494 /* Set up values for use in prologue and epilogue. */
4495 sparc_frame_size = frame_size;
4496 sparc_apparent_frame_size = apparent_frame_size;
4497 sparc_n_global_fp_regs = n_global_fp_regs;
4498 sparc_save_local_in_regs_p = save_local_in_regs_p;
4503 /* Output any necessary .register pseudo-ops. */
4506 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
4508 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
4514 /* Check if %g[2367] were used without
4515 .register being printed for them already. */
4516 for (i = 2; i < 8; i++)
4518 if (df_regs_ever_live_p (i)
4519 && ! sparc_hard_reg_printed [i])
4521 sparc_hard_reg_printed [i] = 1;
4522 /* %g7 is used as TLS base register, use #ignore
4523 for it instead of #scratch. */
4524 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
4525 i == 7 ? "ignore" : "scratch");
4532 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
4534 #if PROBE_INTERVAL > 4096
4535 #error Cannot use indexed addressing mode for stack probing
4538 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
4539 inclusive. These are offsets from the current stack pointer.
4541 Note that we don't use the REG+REG addressing mode for the probes because
4542 of the stack bias in 64-bit mode. And it doesn't really buy us anything
4543 so the advantages of having a single code win here. */
4546 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
4548 rtx g1 = gen_rtx_REG (Pmode, 1);
4550 /* See if we have a constant small number of probes to generate. If so,
4551 that's the easy case. */
4552 if (size <= PROBE_INTERVAL)
4554 emit_move_insn (g1, GEN_INT (first));
4555 emit_insn (gen_rtx_SET (VOIDmode, g1,
4556 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4557 emit_stack_probe (plus_constant (g1, -size));
4560 /* The run-time loop is made up of 10 insns in the generic case while the
4561 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
4562 else if (size <= 5 * PROBE_INTERVAL)
4566 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
4567 emit_insn (gen_rtx_SET (VOIDmode, g1,
4568 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4569 emit_stack_probe (g1);
4571 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
4572 it exceeds SIZE. If only two probes are needed, this will not
4573 generate any code. Then probe at FIRST + SIZE. */
4574 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
4576 emit_insn (gen_rtx_SET (VOIDmode, g1,
4577 plus_constant (g1, -PROBE_INTERVAL)));
4578 emit_stack_probe (g1);
4581 emit_stack_probe (plus_constant (g1, (i - PROBE_INTERVAL) - size));
4584 /* Otherwise, do the same as above, but in a loop. Note that we must be
4585 extra careful with variables wrapping around because we might be at
4586 the very top (or the very bottom) of the address space and we have
4587 to be able to handle this case properly; in particular, we use an
4588 equality test for the loop condition. */
4591 HOST_WIDE_INT rounded_size;
4592 rtx g4 = gen_rtx_REG (Pmode, 4);
4594 emit_move_insn (g1, GEN_INT (first));
4597 /* Step 1: round SIZE to the previous multiple of the interval. */
4599 rounded_size = size & -PROBE_INTERVAL;
4600 emit_move_insn (g4, GEN_INT (rounded_size));
4603 /* Step 2: compute initial and final value of the loop counter. */
4605 /* TEST_ADDR = SP + FIRST. */
4606 emit_insn (gen_rtx_SET (VOIDmode, g1,
4607 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4609 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
4610 emit_insn (gen_rtx_SET (VOIDmode, g4, gen_rtx_MINUS (Pmode, g1, g4)));
4615 while (TEST_ADDR != LAST_ADDR)
4617 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
4621 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
4622 until it is equal to ROUNDED_SIZE. */
4625 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
4627 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
4630 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
4631 that SIZE is equal to ROUNDED_SIZE. */
4633 if (size != rounded_size)
4634 emit_stack_probe (plus_constant (g4, rounded_size - size));
4637 /* Make sure nothing is scheduled before we are done. */
4638 emit_insn (gen_blockage ());
4641 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
4642 absolute addresses. */
4645 output_probe_stack_range (rtx reg1, rtx reg2)
4647 static int labelno = 0;
4648 char loop_lab[32], end_lab[32];
4651 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
4652 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
4654 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
4656 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
4659 output_asm_insn ("cmp\t%0, %1", xops);
4661 fputs ("\tbe,pn\t%xcc,", asm_out_file);
4663 fputs ("\tbe\t", asm_out_file);
4664 assemble_name_raw (asm_out_file, end_lab);
4665 fputc ('\n', asm_out_file);
4667 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
4668 xops[1] = GEN_INT (-PROBE_INTERVAL);
4669 output_asm_insn (" add\t%0, %1, %0", xops);
4671 /* Probe at TEST_ADDR and branch. */
4673 fputs ("\tba,pt\t%xcc,", asm_out_file);
4675 fputs ("\tba\t", asm_out_file);
4676 assemble_name_raw (asm_out_file, loop_lab);
4677 fputc ('\n', asm_out_file);
4678 xops[1] = GEN_INT (SPARC_STACK_BIAS);
4679 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
4681 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
4686 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
4687 needed. LOW is supposed to be double-word aligned for 32-bit registers.
4688 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
4689 is the action to be performed if SAVE_P returns true and ACTION_FALSE
4690 the action to be performed if it returns false. Return the new offset. */
4692 typedef bool (*sorr_pred_t) (unsigned int, int);
4693 typedef enum { SORR_NONE, SORR_ADVANCE, SORR_SAVE, SORR_RESTORE } sorr_act_t;
4696 emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
4697 int offset, int leaf_function, sorr_pred_t save_p,
4698 sorr_act_t action_true, sorr_act_t action_false)
4703 if (TARGET_ARCH64 && high <= 32)
4707 for (i = low; i < high; i++)
4709 if (save_p (i, leaf_function))
4711 mem = gen_frame_mem (DImode, plus_constant (base, offset));
4712 if (action_true == SORR_SAVE)
4714 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4715 RTX_FRAME_RELATED_P (insn) = 1;
4717 else /* action_true == SORR_RESTORE */
4719 /* The frame pointer must be restored last since its old
4720 value may be used as base address for the frame. This
4721 is problematic in 64-bit mode only because of the lack
4722 of double-word load instruction. */
4723 if (i == HARD_FRAME_POINTER_REGNUM)
4726 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4730 else if (action_false == SORR_ADVANCE)
4736 mem = gen_frame_mem (DImode, plus_constant (base, fp_offset));
4737 emit_move_insn (hard_frame_pointer_rtx, mem);
4742 for (i = low; i < high; i += 2)
4744 bool reg0 = save_p (i, leaf_function);
4745 bool reg1 = save_p (i + 1, leaf_function);
4746 enum machine_mode mode;
4751 mode = SPARC_INT_REG_P (i) ? DImode : DFmode;
4756 mode = SPARC_INT_REG_P (i) ? SImode : SFmode;
4761 mode = SPARC_INT_REG_P (i) ? SImode : SFmode;
4767 if (action_false == SORR_ADVANCE)
4772 mem = gen_frame_mem (mode, plus_constant (base, offset));
4773 if (action_true == SORR_SAVE)
4775 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4776 RTX_FRAME_RELATED_P (insn) = 1;
4780 mem = gen_frame_mem (SImode, plus_constant (base, offset));
4781 set1 = gen_rtx_SET (VOIDmode, mem,
4782 gen_rtx_REG (SImode, regno));
4783 RTX_FRAME_RELATED_P (set1) = 1;
4785 = gen_frame_mem (SImode, plus_constant (base, offset + 4));
4786 set2 = gen_rtx_SET (VOIDmode, mem,
4787 gen_rtx_REG (SImode, regno + 1));
4788 RTX_FRAME_RELATED_P (set2) = 1;
4789 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4790 gen_rtx_PARALLEL (VOIDmode,
4791 gen_rtvec (2, set1, set2)));
4794 else /* action_true == SORR_RESTORE */
4795 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4797 /* Always preserve double-word alignment. */
4798 offset = (offset + 8) & -8;
4805 /* Emit code to adjust BASE to OFFSET. Return the new base. */
4808 emit_adjust_base_to_offset (rtx base, int offset)
4810 /* ??? This might be optimized a little as %g1 might already have a
4811 value close enough that a single add insn will do. */
4812 /* ??? Although, all of this is probably only a temporary fix because
4813 if %g1 can hold a function result, then sparc_expand_epilogue will
4814 lose (the result will be clobbered). */
4815 rtx new_base = gen_rtx_REG (Pmode, 1);
4816 emit_move_insn (new_base, GEN_INT (offset));
4817 emit_insn (gen_rtx_SET (VOIDmode,
4818 new_base, gen_rtx_PLUS (Pmode, base, new_base)));
4822 /* Emit code to save/restore call-saved global and FP registers. */
4825 emit_save_or_restore_global_fp_regs (rtx base, int offset, sorr_act_t action)
4827 if (offset < -4096 || offset + sparc_n_global_fp_regs * 4 > 4095)
4829 base = emit_adjust_base_to_offset (base, offset);
4834 = emit_save_or_restore_regs (0, 8, base, offset, 0,
4835 save_global_or_fp_reg_p, action, SORR_NONE);
4836 emit_save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, 0,
4837 save_global_or_fp_reg_p, action, SORR_NONE);
4840 /* Emit code to save/restore call-saved local and in registers. */
4843 emit_save_or_restore_local_in_regs (rtx base, int offset, sorr_act_t action)
4845 if (offset < -4096 || offset + 16 * UNITS_PER_WORD > 4095)
4847 base = emit_adjust_base_to_offset (base, offset);
4851 emit_save_or_restore_regs (16, 32, base, offset, sparc_leaf_function_p,
4852 save_local_or_in_reg_p, action, SORR_ADVANCE);
4855 /* Emit a window_save insn. */
4858 emit_window_save (rtx increment)
4860 rtx insn = emit_insn (gen_window_save (increment));
4861 RTX_FRAME_RELATED_P (insn) = 1;
4863 /* The incoming return address (%o7) is saved in %i7. */
4864 add_reg_note (insn, REG_CFA_REGISTER,
4865 gen_rtx_SET (VOIDmode,
4866 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
4868 INCOMING_RETURN_ADDR_REGNUM)));
4870 /* The window save event. */
4871 add_reg_note (insn, REG_CFA_WINDOW_SAVE, const0_rtx);
4873 /* The CFA is %fp, the hard frame pointer. */
4874 add_reg_note (insn, REG_CFA_DEF_CFA,
4875 plus_constant (hard_frame_pointer_rtx,
4876 INCOMING_FRAME_SP_OFFSET));
4881 /* Generate an increment for the stack pointer. */
4884 gen_stack_pointer_inc (rtx increment)
4886 return gen_rtx_SET (VOIDmode,
4888 gen_rtx_PLUS (Pmode,
4893 /* Generate a decrement for the stack pointer. */
4896 gen_stack_pointer_dec (rtx decrement)
4898 return gen_rtx_SET (VOIDmode,
4900 gen_rtx_MINUS (Pmode,
4905 /* Expand the function prologue. The prologue is responsible for reserving
4906 storage for the frame, saving the call-saved registers and loading the
4907 GOT register if needed. */
4910 sparc_expand_prologue (void)
4915 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4916 on the final value of the flag means deferring the prologue/epilogue
4917 expansion until just before the second scheduling pass, which is too
4918 late to emit multiple epilogues or return insns.
4920 Of course we are making the assumption that the value of the flag
4921 will not change between now and its final value. Of the three parts
4922 of the formula, only the last one can reasonably vary. Let's take a
4923 closer look, after assuming that the first two ones are set to true
4924 (otherwise the last value is effectively silenced).
4926 If only_leaf_regs_used returns false, the global predicate will also
4927 be false so the actual frame size calculated below will be positive.
4928 As a consequence, the save_register_window insn will be emitted in
4929 the instruction stream; now this insn explicitly references %fp
4930 which is not a leaf register so only_leaf_regs_used will always
4931 return false subsequently.
4933 If only_leaf_regs_used returns true, we hope that the subsequent
4934 optimization passes won't cause non-leaf registers to pop up. For
4935 example, the regrename pass has special provisions to not rename to
4936 non-leaf registers in a leaf function. */
4937 sparc_leaf_function_p
4938 = optimize > 0 && current_function_is_leaf && only_leaf_regs_used ();
4940 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4942 if (flag_stack_usage_info)
4943 current_function_static_stack_size = size;
4945 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
4946 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
4950 else if (sparc_leaf_function_p)
4952 rtx size_int_rtx = GEN_INT (-size);
4955 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
4956 else if (size <= 8192)
4958 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4959 /* %sp is still the CFA register. */
4960 RTX_FRAME_RELATED_P (insn) = 1;
4961 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
4965 rtx size_rtx = gen_rtx_REG (Pmode, 1);
4966 emit_move_insn (size_rtx, size_int_rtx);
4967 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
4968 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4969 gen_stack_pointer_inc (size_int_rtx));
4972 RTX_FRAME_RELATED_P (insn) = 1;
4976 rtx size_int_rtx = GEN_INT (-size);
4979 emit_window_save (size_int_rtx);
4980 else if (size <= 8192)
4982 emit_window_save (GEN_INT (-4096));
4983 /* %sp is not the CFA register anymore. */
4984 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
4988 rtx size_rtx = gen_rtx_REG (Pmode, 1);
4989 emit_move_insn (size_rtx, size_int_rtx);
4990 emit_window_save (size_rtx);
4994 if (sparc_leaf_function_p)
4996 sparc_frame_base_reg = stack_pointer_rtx;
4997 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
5001 sparc_frame_base_reg = hard_frame_pointer_rtx;
5002 sparc_frame_base_offset = SPARC_STACK_BIAS;
5005 if (sparc_n_global_fp_regs > 0)
5006 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5007 sparc_frame_base_offset
5008 - sparc_apparent_frame_size,
5011 /* Load the GOT register if needed. */
5012 if (crtl->uses_pic_offset_table)
5013 load_got_register ();
5015 /* Advertise that the data calculated just above are now valid. */
5016 sparc_prologue_data_valid_p = true;
5019 /* Expand the function prologue. The prologue is responsible for reserving
5020 storage for the frame, saving the call-saved registers and loading the
5021 GOT register if needed. */
5024 sparc_flat_expand_prologue (void)
5029 sparc_leaf_function_p = optimize > 0 && current_function_is_leaf;
5031 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
5033 if (flag_stack_usage_info)
5034 current_function_static_stack_size = size;
5036 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
5037 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
5039 if (sparc_save_local_in_regs_p)
5040 emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
5047 rtx size_int_rtx, size_rtx;
5049 size_rtx = size_int_rtx = GEN_INT (-size);
5051 /* We establish the frame (i.e. decrement the stack pointer) first, even
5052 if we use a frame pointer, because we cannot clobber any call-saved
5053 registers, including the frame pointer, if we haven't created a new
5054 register save area, for the sake of compatibility with the ABI. */
5056 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
5057 else if (size <= 8192 && !frame_pointer_needed)
5059 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
5060 RTX_FRAME_RELATED_P (insn) = 1;
5061 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
5065 size_rtx = gen_rtx_REG (Pmode, 1);
5066 emit_move_insn (size_rtx, size_int_rtx);
5067 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
5068 add_reg_note (insn, REG_CFA_ADJUST_CFA,
5069 gen_stack_pointer_inc (size_int_rtx));
5071 RTX_FRAME_RELATED_P (insn) = 1;
5073 /* Ensure nothing is scheduled until after the frame is established. */
5074 emit_insn (gen_blockage ());
5076 if (frame_pointer_needed)
5078 insn = emit_insn (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
5079 gen_rtx_MINUS (Pmode,
5082 RTX_FRAME_RELATED_P (insn) = 1;
5084 add_reg_note (insn, REG_CFA_ADJUST_CFA,
5085 gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
5086 plus_constant (stack_pointer_rtx,
5090 if (return_addr_reg_needed_p (sparc_leaf_function_p))
5092 rtx o7 = gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM);
5093 rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
5095 insn = emit_move_insn (i7, o7);
5096 RTX_FRAME_RELATED_P (insn) = 1;
5098 add_reg_note (insn, REG_CFA_REGISTER,
5099 gen_rtx_SET (VOIDmode, i7, o7));
5101 /* Prevent this instruction from ever being considered dead,
5102 even if this function has no epilogue. */
5103 emit_insn (gen_rtx_USE (VOIDmode, i7));
5107 if (frame_pointer_needed)
5109 sparc_frame_base_reg = hard_frame_pointer_rtx;
5110 sparc_frame_base_offset = SPARC_STACK_BIAS;
5114 sparc_frame_base_reg = stack_pointer_rtx;
5115 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
5118 if (sparc_n_global_fp_regs > 0)
5119 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5120 sparc_frame_base_offset
5121 - sparc_apparent_frame_size,
5124 /* Load the GOT register if needed. */
5125 if (crtl->uses_pic_offset_table)
5126 load_got_register ();
5128 /* Advertise that the data calculated just above are now valid. */
5129 sparc_prologue_data_valid_p = true;
5132 /* This function generates the assembly code for function entry, which boils
5133 down to emitting the necessary .register directives. */
5136 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5138 /* Check that the assumption we made in sparc_expand_prologue is valid. */
5140 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
5142 sparc_output_scratch_registers (file);
5145 /* Expand the function epilogue, either normal or part of a sibcall.
5146 We emit all the instructions except the return or the call. */
5149 sparc_expand_epilogue (bool for_eh)
5151 HOST_WIDE_INT size = sparc_frame_size;
5153 if (sparc_n_global_fp_regs > 0)
5154 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5155 sparc_frame_base_offset
5156 - sparc_apparent_frame_size,
5159 if (size == 0 || for_eh)
5161 else if (sparc_leaf_function_p)
5164 emit_insn (gen_stack_pointer_dec (GEN_INT (-size)));
5165 else if (size <= 8192)
5167 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
5168 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size)));
5172 rtx reg = gen_rtx_REG (Pmode, 1);
5173 emit_move_insn (reg, GEN_INT (-size));
5174 emit_insn (gen_stack_pointer_dec (reg));
5179 /* Expand the function epilogue, either normal or part of a sibcall.
5180 We emit all the instructions except the return or the call. */
5183 sparc_flat_expand_epilogue (bool for_eh)
5185 HOST_WIDE_INT size = sparc_frame_size;
5187 if (sparc_n_global_fp_regs > 0)
5188 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5189 sparc_frame_base_offset
5190 - sparc_apparent_frame_size,
5193 /* If we have a frame pointer, we'll need both to restore it before the
5194 frame is destroyed and use its current value in destroying the frame.
5195 Since we don't have an atomic way to do that in the flat window model,
5196 we save the current value into a temporary register (%g1). */
5197 if (frame_pointer_needed && !for_eh)
5198 emit_move_insn (gen_rtx_REG (Pmode, 1), hard_frame_pointer_rtx);
5200 if (return_addr_reg_needed_p (sparc_leaf_function_p))
5201 emit_move_insn (gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM),
5202 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM));
5204 if (sparc_save_local_in_regs_p)
5205 emit_save_or_restore_local_in_regs (sparc_frame_base_reg,
5206 sparc_frame_base_offset,
5209 if (size == 0 || for_eh)
5211 else if (frame_pointer_needed)
5213 /* Make sure the frame is destroyed after everything else is done. */
5214 emit_insn (gen_blockage ());
5216 emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
5221 emit_insn (gen_blockage ());
5224 emit_insn (gen_stack_pointer_dec (GEN_INT (-size)));
5225 else if (size <= 8192)
5227 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
5228 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size)));
5232 rtx reg = gen_rtx_REG (Pmode, 1);
5233 emit_move_insn (reg, GEN_INT (-size));
5234 emit_insn (gen_stack_pointer_dec (reg));
5239 /* Return true if it is appropriate to emit `return' instructions in the
5240 body of a function. */
5243 sparc_can_use_return_insn_p (void)
5245 return sparc_prologue_data_valid_p
5246 && sparc_n_global_fp_regs == 0
5248 ? (sparc_frame_size == 0 && !sparc_save_local_in_regs_p)
5249 : (sparc_frame_size == 0 || !sparc_leaf_function_p);
5252 /* This function generates the assembly code for function exit. */
5255 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5257 /* If the last two instructions of a function are "call foo; dslot;"
5258 the return address might point to the first instruction in the next
5259 function and we have to output a dummy nop for the sake of sane
5260 backtraces in such cases. This is pointless for sibling calls since
5261 the return address is explicitly adjusted. */
5263 rtx insn, last_real_insn;
5265 insn = get_last_insn ();
5267 last_real_insn = prev_real_insn (insn);
5269 && GET_CODE (last_real_insn) == INSN
5270 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
5271 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
5274 && CALL_P (last_real_insn)
5275 && !SIBLING_CALL_P (last_real_insn))
5276 fputs("\tnop\n", file);
5278 sparc_output_deferred_case_vectors ();
5281 /* Output a 'restore' instruction. */
5284 output_restore (rtx pat)
5290 fputs ("\t restore\n", asm_out_file);
5294 gcc_assert (GET_CODE (pat) == SET);
5296 operands[0] = SET_DEST (pat);
5297 pat = SET_SRC (pat);
5299 switch (GET_CODE (pat))
5302 operands[1] = XEXP (pat, 0);
5303 operands[2] = XEXP (pat, 1);
5304 output_asm_insn (" restore %r1, %2, %Y0", operands);
5307 operands[1] = XEXP (pat, 0);
5308 operands[2] = XEXP (pat, 1);
5309 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
5312 operands[1] = XEXP (pat, 0);
5313 gcc_assert (XEXP (pat, 1) == const1_rtx);
5314 output_asm_insn (" restore %r1, %r1, %Y0", operands);
5318 output_asm_insn (" restore %%g0, %1, %Y0", operands);
5323 /* Output a return. */
5326 output_return (rtx insn)
5328 if (crtl->calls_eh_return)
5330 /* If the function uses __builtin_eh_return, the eh_return
5331 machinery occupies the delay slot. */
5332 gcc_assert (!final_sequence);
5334 if (flag_delayed_branch)
5336 if (!TARGET_FLAT && TARGET_V9)
5337 fputs ("\treturn\t%i7+8\n", asm_out_file);
5341 fputs ("\trestore\n", asm_out_file);
5343 fputs ("\tjmp\t%o7+8\n", asm_out_file);
5346 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
5351 fputs ("\trestore\n", asm_out_file);
5353 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file);
5354 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file);
5357 else if (sparc_leaf_function_p || TARGET_FLAT)
5359 /* This is a leaf or flat function so we don't have to bother restoring
5360 the register window, which frees us from dealing with the convoluted
5361 semantics of restore/return. We simply output the jump to the
5362 return address and the insn in the delay slot (if any). */
5364 return "jmp\t%%o7+%)%#";
5368 /* This is a regular function so we have to restore the register window.
5369 We may have a pending insn for the delay slot, which will be either
5370 combined with the 'restore' instruction or put in the delay slot of
5371 the 'return' instruction. */
5377 delay = NEXT_INSN (insn);
5380 pat = PATTERN (delay);
5382 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
5384 epilogue_renumber (&pat, 0);
5385 return "return\t%%i7+%)%#";
5389 output_asm_insn ("jmp\t%%i7+%)", NULL);
5390 output_restore (pat);
5391 PATTERN (delay) = gen_blockage ();
5392 INSN_CODE (delay) = -1;
5397 /* The delay slot is empty. */
5399 return "return\t%%i7+%)\n\t nop";
5400 else if (flag_delayed_branch)
5401 return "jmp\t%%i7+%)\n\t restore";
5403 return "restore\n\tjmp\t%%o7+%)\n\t nop";
5410 /* Output a sibling call. */
5413 output_sibcall (rtx insn, rtx call_operand)
5417 gcc_assert (flag_delayed_branch);
5419 operands[0] = call_operand;
5421 if (sparc_leaf_function_p || TARGET_FLAT)
5423 /* This is a leaf or flat function so we don't have to bother restoring
5424 the register window. We simply output the jump to the function and
5425 the insn in the delay slot (if any). */
5427 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
5430 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
5433 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
5434 it into branch if possible. */
5435 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
5440 /* This is a regular function so we have to restore the register window.
5441 We may have a pending insn for the delay slot, which will be combined
5442 with the 'restore' instruction. */
5444 output_asm_insn ("call\t%a0, 0", operands);
5448 rtx delay = NEXT_INSN (insn);
5451 output_restore (PATTERN (delay));
5453 PATTERN (delay) = gen_blockage ();
5454 INSN_CODE (delay) = -1;
5457 output_restore (NULL_RTX);
5463 /* Functions for handling argument passing.
5465 For 32-bit, the first 6 args are normally in registers and the rest are
5466 pushed. Any arg that starts within the first 6 words is at least
5467 partially passed in a register unless its data type forbids.
5469 For 64-bit, the argument registers are laid out as an array of 16 elements
5470 and arguments are added sequentially. The first 6 int args and up to the
5471 first 16 fp args (depending on size) are passed in regs.
5473 Slot Stack Integral Float Float in structure Double Long Double
5474 ---- ----- -------- ----- ------------------ ------ -----------
5475 15 [SP+248] %f31 %f30,%f31 %d30
5476 14 [SP+240] %f29 %f28,%f29 %d28 %q28
5477 13 [SP+232] %f27 %f26,%f27 %d26
5478 12 [SP+224] %f25 %f24,%f25 %d24 %q24
5479 11 [SP+216] %f23 %f22,%f23 %d22
5480 10 [SP+208] %f21 %f20,%f21 %d20 %q20
5481 9 [SP+200] %f19 %f18,%f19 %d18
5482 8 [SP+192] %f17 %f16,%f17 %d16 %q16
5483 7 [SP+184] %f15 %f14,%f15 %d14
5484 6 [SP+176] %f13 %f12,%f13 %d12 %q12
5485 5 [SP+168] %o5 %f11 %f10,%f11 %d10
5486 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
5487 3 [SP+152] %o3 %f7 %f6,%f7 %d6
5488 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
5489 1 [SP+136] %o1 %f3 %f2,%f3 %d2
5490 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
5492 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
5494 Integral arguments are always passed as 64-bit quantities appropriately
5497 Passing of floating point values is handled as follows.
5498 If a prototype is in scope:
5499 If the value is in a named argument (i.e. not a stdarg function or a
5500 value not part of the `...') then the value is passed in the appropriate
5502 If the value is part of the `...' and is passed in one of the first 6
5503 slots then the value is passed in the appropriate int reg.
5504 If the value is part of the `...' and is not passed in one of the first 6
5505 slots then the value is passed in memory.
5506 If a prototype is not in scope:
5507 If the value is one of the first 6 arguments the value is passed in the
5508 appropriate integer reg and the appropriate fp reg.
5509 If the value is not one of the first 6 arguments the value is passed in
5510 the appropriate fp reg and in memory.
5513 Summary of the calling conventions implemented by GCC on the SPARC:
5516 size argument return value
5518 small integer <4 int. reg. int. reg.
5519 word 4 int. reg. int. reg.
5520 double word 8 int. reg. int. reg.
5522 _Complex small integer <8 int. reg. int. reg.
5523 _Complex word 8 int. reg. int. reg.
5524 _Complex double word 16 memory int. reg.
5526 vector integer <=8 int. reg. FP reg.
5527 vector integer >8 memory memory
5529 float 4 int. reg. FP reg.
5530 double 8 int. reg. FP reg.
5531 long double 16 memory memory
5533 _Complex float 8 memory FP reg.
5534 _Complex double 16 memory FP reg.
5535 _Complex long double 32 memory FP reg.
5537 vector float any memory memory
5539 aggregate any memory memory
5544 size argument return value
5546 small integer <8 int. reg. int. reg.
5547 word 8 int. reg. int. reg.
5548 double word 16 int. reg. int. reg.
5550 _Complex small integer <16 int. reg. int. reg.
5551 _Complex word 16 int. reg. int. reg.
5552 _Complex double word 32 memory int. reg.
5554 vector integer <=16 FP reg. FP reg.
5555 vector integer 16<s<=32 memory FP reg.
5556 vector integer >32 memory memory
5558 float 4 FP reg. FP reg.
5559 double 8 FP reg. FP reg.
5560 long double 16 FP reg. FP reg.
5562 _Complex float 8 FP reg. FP reg.
5563 _Complex double 16 FP reg. FP reg.
5564 _Complex long double 32 memory FP reg.
5566 vector float <=16 FP reg. FP reg.
5567 vector float 16<s<=32 memory FP reg.
5568 vector float >32 memory memory
5570 aggregate <=16 reg. reg.
5571 aggregate 16<s<=32 memory reg.
5572 aggregate >32 memory memory
5576 Note #1: complex floating-point types follow the extended SPARC ABIs as
5577 implemented by the Sun compiler.
5579 Note #2: integral vector types follow the scalar floating-point types
5580 conventions to match what is implemented by the Sun VIS SDK.
5582 Note #3: floating-point vector types follow the aggregate types
5586 /* Maximum number of int regs for args. */
5587 #define SPARC_INT_ARG_MAX 6
5588 /* Maximum number of fp regs for args. */
5589 #define SPARC_FP_ARG_MAX 16
5591 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
5593 /* Handle the INIT_CUMULATIVE_ARGS macro.
5594 Initialize a variable CUM of type CUMULATIVE_ARGS
5595 for a call to a function whose data type is FNTYPE.
5596 For a library call, FNTYPE is 0. */
5599 init_cumulative_args (struct sparc_args *cum, tree fntype,
5600 rtx libname ATTRIBUTE_UNUSED,
5601 tree fndecl ATTRIBUTE_UNUSED)
5604 cum->prototype_p = fntype && prototype_p (fntype);
5605 cum->libcall_p = fntype == 0;
5608 /* Handle promotion of pointer and integer arguments. */
5610 static enum machine_mode
5611 sparc_promote_function_mode (const_tree type,
5612 enum machine_mode mode,
5614 const_tree fntype ATTRIBUTE_UNUSED,
5615 int for_return ATTRIBUTE_UNUSED)
5617 if (type != NULL_TREE && POINTER_TYPE_P (type))
5619 *punsignedp = POINTERS_EXTEND_UNSIGNED;
5623 /* Integral arguments are passed as full words, as per the ABI. */
5624 if (GET_MODE_CLASS (mode) == MODE_INT
5625 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5631 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
5634 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
5636 return TARGET_ARCH64 ? true : false;
5639 /* Scan the record type TYPE and return the following predicates:
5640 - INTREGS_P: the record contains at least one field or sub-field
5641 that is eligible for promotion in integer registers.
5642 - FP_REGS_P: the record contains at least one field or sub-field
5643 that is eligible for promotion in floating-point registers.
5644 - PACKED_P: the record contains at least one field that is packed.
5646 Sub-fields are not taken into account for the PACKED_P predicate. */
5649 scan_record_type (const_tree type, int *intregs_p, int *fpregs_p,
5654 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5656 if (TREE_CODE (field) == FIELD_DECL)
5658 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5659 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
5660 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5661 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5667 if (packed_p && DECL_PACKED (field))
5673 /* Compute the slot number to pass an argument in.
5674 Return the slot number or -1 if passing on the stack.
5676 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5677 the preceding args and about the function being called.
5678 MODE is the argument's machine mode.
5679 TYPE is the data type of the argument (as a tree).
5680 This is null for libcalls where that information may
5682 NAMED is nonzero if this argument is a named parameter
5683 (otherwise it is an extra parameter matching an ellipsis).
5684 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
5685 *PREGNO records the register number to use if scalar type.
5686 *PPADDING records the amount of padding needed in words. */
5689 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
5690 const_tree type, bool named, bool incoming_p,
5691 int *pregno, int *ppadding)
5693 int regbase = (incoming_p
5694 ? SPARC_INCOMING_INT_ARG_FIRST
5695 : SPARC_OUTGOING_INT_ARG_FIRST);
5696 int slotno = cum->words;
5697 enum mode_class mclass;
5702 if (type && TREE_ADDRESSABLE (type))
5708 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
5711 /* For SPARC64, objects requiring 16-byte alignment get it. */
5713 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
5714 && (slotno & 1) != 0)
5715 slotno++, *ppadding = 1;
5717 mclass = GET_MODE_CLASS (mode);
5718 if (type && TREE_CODE (type) == VECTOR_TYPE)
5720 /* Vector types deserve special treatment because they are
5721 polymorphic wrt their mode, depending upon whether VIS
5722 instructions are enabled. */
5723 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5725 /* The SPARC port defines no floating-point vector modes. */
5726 gcc_assert (mode == BLKmode);
5730 /* Integral vector types should either have a vector
5731 mode or an integral mode, because we are guaranteed
5732 by pass_by_reference that their size is not greater
5733 than 16 bytes and TImode is 16-byte wide. */
5734 gcc_assert (mode != BLKmode);
5736 /* Vector integers are handled like floats according to
5738 mclass = MODE_FLOAT;
5745 case MODE_COMPLEX_FLOAT:
5746 case MODE_VECTOR_INT:
5747 if (TARGET_ARCH64 && TARGET_FPU && named)
5749 if (slotno >= SPARC_FP_ARG_MAX)
5751 regno = SPARC_FP_ARG_FIRST + slotno * 2;
5752 /* Arguments filling only one single FP register are
5753 right-justified in the outer double FP register. */
5754 if (GET_MODE_SIZE (mode) <= 4)
5761 case MODE_COMPLEX_INT:
5762 if (slotno >= SPARC_INT_ARG_MAX)
5764 regno = regbase + slotno;
5768 if (mode == VOIDmode)
5769 /* MODE is VOIDmode when generating the actual call. */
5772 gcc_assert (mode == BLKmode);
5776 || (TREE_CODE (type) != VECTOR_TYPE
5777 && TREE_CODE (type) != RECORD_TYPE))
5779 if (slotno >= SPARC_INT_ARG_MAX)
5781 regno = regbase + slotno;
5783 else /* TARGET_ARCH64 && type */
5785 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
5787 /* First see what kinds of registers we would need. */
5788 if (TREE_CODE (type) == VECTOR_TYPE)
5791 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
5793 /* The ABI obviously doesn't specify how packed structures
5794 are passed. These are defined to be passed in int regs
5795 if possible, otherwise memory. */
5796 if (packed_p || !named)
5797 fpregs_p = 0, intregs_p = 1;
5799 /* If all arg slots are filled, then must pass on stack. */
5800 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
5803 /* If there are only int args and all int arg slots are filled,
5804 then must pass on stack. */
5805 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
5808 /* Note that even if all int arg slots are filled, fp members may
5809 still be passed in regs if such regs are available.
5810 *PREGNO isn't set because there may be more than one, it's up
5811 to the caller to compute them. */
5824 /* Handle recursive register counting for structure field layout. */
5826 struct function_arg_record_value_parms
5828 rtx ret; /* return expression being built. */
5829 int slotno; /* slot number of the argument. */
5830 int named; /* whether the argument is named. */
5831 int regbase; /* regno of the base register. */
5832 int stack; /* 1 if part of the argument is on the stack. */
5833 int intoffset; /* offset of the first pending integer field. */
5834 unsigned int nregs; /* number of words passed in registers. */
5837 static void function_arg_record_value_3
5838 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
5839 static void function_arg_record_value_2
5840 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5841 static void function_arg_record_value_1
5842 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5843 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
5844 static rtx function_arg_union_value (int, enum machine_mode, int, int);
5846 /* A subroutine of function_arg_record_value. Traverse the structure
5847 recursively and determine how many registers will be required. */
5850 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
5851 struct function_arg_record_value_parms *parms,
5856 /* We need to compute how many registers are needed so we can
5857 allocate the PARALLEL but before we can do that we need to know
5858 whether there are any packed fields. The ABI obviously doesn't
5859 specify how structures are passed in this case, so they are
5860 defined to be passed in int regs if possible, otherwise memory,
5861 regardless of whether there are fp values present. */
5864 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5866 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5873 /* Compute how many registers we need. */
5874 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5876 if (TREE_CODE (field) == FIELD_DECL)
5878 HOST_WIDE_INT bitpos = startbitpos;
5880 if (DECL_SIZE (field) != 0)
5882 if (integer_zerop (DECL_SIZE (field)))
5885 if (host_integerp (bit_position (field), 1))
5886 bitpos += int_bit_position (field);
5889 /* ??? FIXME: else assume zero offset. */
5891 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5892 function_arg_record_value_1 (TREE_TYPE (field),
5896 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5897 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5902 if (parms->intoffset != -1)
5904 unsigned int startbit, endbit;
5905 int intslots, this_slotno;
5907 startbit = parms->intoffset & -BITS_PER_WORD;
5908 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5910 intslots = (endbit - startbit) / BITS_PER_WORD;
5911 this_slotno = parms->slotno + parms->intoffset
5914 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5916 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5917 /* We need to pass this field on the stack. */
5921 parms->nregs += intslots;
5922 parms->intoffset = -1;
5925 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
5926 If it wasn't true we wouldn't be here. */
5927 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5928 && DECL_MODE (field) == BLKmode)
5929 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5930 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5937 if (parms->intoffset == -1)
5938 parms->intoffset = bitpos;
5944 /* A subroutine of function_arg_record_value. Assign the bits of the
5945 structure between parms->intoffset and bitpos to integer registers. */
5948 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
5949 struct function_arg_record_value_parms *parms)
5951 enum machine_mode mode;
5953 unsigned int startbit, endbit;
5954 int this_slotno, intslots, intoffset;
5957 if (parms->intoffset == -1)
5960 intoffset = parms->intoffset;
5961 parms->intoffset = -1;
5963 startbit = intoffset & -BITS_PER_WORD;
5964 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5965 intslots = (endbit - startbit) / BITS_PER_WORD;
5966 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
5968 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
5972 /* If this is the trailing part of a word, only load that much into
5973 the register. Otherwise load the whole register. Note that in
5974 the latter case we may pick up unwanted bits. It's not a problem
5975 at the moment but may wish to revisit. */
5977 if (intoffset % BITS_PER_WORD != 0)
5978 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5983 intoffset /= BITS_PER_UNIT;
5986 regno = parms->regbase + this_slotno;
5987 reg = gen_rtx_REG (mode, regno);
5988 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5989 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5992 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5997 while (intslots > 0);
6000 /* A subroutine of function_arg_record_value. Traverse the structure
6001 recursively and assign bits to floating point registers. Track which
6002 bits in between need integer registers; invoke function_arg_record_value_3
6003 to make that happen. */
6006 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
6007 struct function_arg_record_value_parms *parms,
6013 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6015 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
6022 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6024 if (TREE_CODE (field) == FIELD_DECL)
6026 HOST_WIDE_INT bitpos = startbitpos;
6028 if (DECL_SIZE (field) != 0)
6030 if (integer_zerop (DECL_SIZE (field)))
6033 if (host_integerp (bit_position (field), 1))
6034 bitpos += int_bit_position (field);
6037 /* ??? FIXME: else assume zero offset. */
6039 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
6040 function_arg_record_value_2 (TREE_TYPE (field),
6044 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
6045 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
6050 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
6051 int regno, nregs, pos;
6052 enum machine_mode mode = DECL_MODE (field);
6055 function_arg_record_value_3 (bitpos, parms);
6057 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
6060 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
6061 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
6063 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
6065 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
6071 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
6072 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
6074 reg = gen_rtx_REG (mode, regno);
6075 pos = bitpos / BITS_PER_UNIT;
6076 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
6077 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
6081 regno += GET_MODE_SIZE (mode) / 4;
6082 reg = gen_rtx_REG (mode, regno);
6083 pos += GET_MODE_SIZE (mode);
6084 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
6085 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
6091 if (parms->intoffset == -1)
6092 parms->intoffset = bitpos;
6098 /* Used by function_arg and sparc_function_value_1 to implement the complex
6099 conventions of the 64-bit ABI for passing and returning structures.
6100 Return an expression valid as a return value for the FUNCTION_ARG
6101 and TARGET_FUNCTION_VALUE.
6103 TYPE is the data type of the argument (as a tree).
6104 This is null for libcalls where that information may
6106 MODE is the argument's machine mode.
6107 SLOTNO is the index number of the argument's slot in the parameter array.
6108 NAMED is nonzero if this argument is a named parameter
6109 (otherwise it is an extra parameter matching an ellipsis).
6110 REGBASE is the regno of the base register for the parameter array. */
6113 function_arg_record_value (const_tree type, enum machine_mode mode,
6114 int slotno, int named, int regbase)
6116 HOST_WIDE_INT typesize = int_size_in_bytes (type);
6117 struct function_arg_record_value_parms parms;
6120 parms.ret = NULL_RTX;
6121 parms.slotno = slotno;
6122 parms.named = named;
6123 parms.regbase = regbase;
6126 /* Compute how many registers we need. */
6128 parms.intoffset = 0;
6129 function_arg_record_value_1 (type, 0, &parms, false);
6131 /* Take into account pending integer fields. */
6132 if (parms.intoffset != -1)
6134 unsigned int startbit, endbit;
6135 int intslots, this_slotno;
6137 startbit = parms.intoffset & -BITS_PER_WORD;
6138 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
6139 intslots = (endbit - startbit) / BITS_PER_WORD;
6140 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
6142 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
6144 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
6145 /* We need to pass this field on the stack. */
6149 parms.nregs += intslots;
6151 nregs = parms.nregs;
6153 /* Allocate the vector and handle some annoying special cases. */
6156 /* ??? Empty structure has no value? Duh? */
6159 /* Though there's nothing really to store, return a word register
6160 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
6161 leads to breakage due to the fact that there are zero bytes to
6163 return gen_rtx_REG (mode, regbase);
6167 /* ??? C++ has structures with no fields, and yet a size. Give up
6168 for now and pass everything back in integer registers. */
6169 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6171 if (nregs + slotno > SPARC_INT_ARG_MAX)
6172 nregs = SPARC_INT_ARG_MAX - slotno;
6174 gcc_assert (nregs != 0);
6176 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
6178 /* If at least one field must be passed on the stack, generate
6179 (parallel [(expr_list (nil) ...) ...]) so that all fields will
6180 also be passed on the stack. We can't do much better because the
6181 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
6182 of structures for which the fields passed exclusively in registers
6183 are not at the beginning of the structure. */
6185 XVECEXP (parms.ret, 0, 0)
6186 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6188 /* Fill in the entries. */
6190 parms.intoffset = 0;
6191 function_arg_record_value_2 (type, 0, &parms, false);
6192 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
6194 gcc_assert (parms.nregs == nregs);
6199 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6200 of the 64-bit ABI for passing and returning unions.
6201 Return an expression valid as a return value for the FUNCTION_ARG
6202 and TARGET_FUNCTION_VALUE.
6204 SIZE is the size in bytes of the union.
6205 MODE is the argument's machine mode.
6206 REGNO is the hard register the union will be passed in. */
6209 function_arg_union_value (int size, enum machine_mode mode, int slotno,
6212 int nwords = ROUND_ADVANCE (size), i;
6215 /* See comment in previous function for empty structures. */
6217 return gen_rtx_REG (mode, regno);
6219 if (slotno == SPARC_INT_ARG_MAX - 1)
6222 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
6224 for (i = 0; i < nwords; i++)
6226 /* Unions are passed left-justified. */
6227 XVECEXP (regs, 0, i)
6228 = gen_rtx_EXPR_LIST (VOIDmode,
6229 gen_rtx_REG (word_mode, regno),
6230 GEN_INT (UNITS_PER_WORD * i));
6237 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6238 for passing and returning large (BLKmode) vectors.
6239 Return an expression valid as a return value for the FUNCTION_ARG
6240 and TARGET_FUNCTION_VALUE.
6242 SIZE is the size in bytes of the vector (at least 8 bytes).
6243 REGNO is the FP hard register the vector will be passed in. */
6246 function_arg_vector_value (int size, int regno)
6248 int i, nregs = size / 8;
6251 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
6253 for (i = 0; i < nregs; i++)
6255 XVECEXP (regs, 0, i)
6256 = gen_rtx_EXPR_LIST (VOIDmode,
6257 gen_rtx_REG (DImode, regno + 2*i),
6264 /* Determine where to put an argument to a function.
6265 Value is zero to push the argument on the stack,
6266 or a hard register in which to store the argument.
6268 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6269 the preceding args and about the function being called.
6270 MODE is the argument's machine mode.
6271 TYPE is the data type of the argument (as a tree).
6272 This is null for libcalls where that information may
6274 NAMED is true if this argument is a named parameter
6275 (otherwise it is an extra parameter matching an ellipsis).
6276 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
6277 TARGET_FUNCTION_INCOMING_ARG. */
6280 sparc_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode,
6281 const_tree type, bool named, bool incoming_p)
6283 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6285 int regbase = (incoming_p
6286 ? SPARC_INCOMING_INT_ARG_FIRST
6287 : SPARC_OUTGOING_INT_ARG_FIRST);
6288 int slotno, regno, padding;
6289 enum mode_class mclass = GET_MODE_CLASS (mode);
6291 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
6296 /* Vector types deserve special treatment because they are polymorphic wrt
6297 their mode, depending upon whether VIS instructions are enabled. */
6298 if (type && TREE_CODE (type) == VECTOR_TYPE)
6300 HOST_WIDE_INT size = int_size_in_bytes (type);
6301 gcc_assert ((TARGET_ARCH32 && size <= 8)
6302 || (TARGET_ARCH64 && size <= 16));
6304 if (mode == BLKmode)
6305 return function_arg_vector_value (size,
6306 SPARC_FP_ARG_FIRST + 2*slotno);
6308 mclass = MODE_FLOAT;
6312 return gen_rtx_REG (mode, regno);
6314 /* Structures up to 16 bytes in size are passed in arg slots on the stack
6315 and are promoted to registers if possible. */
6316 if (type && TREE_CODE (type) == RECORD_TYPE)
6318 HOST_WIDE_INT size = int_size_in_bytes (type);
6319 gcc_assert (size <= 16);
6321 return function_arg_record_value (type, mode, slotno, named, regbase);
6324 /* Unions up to 16 bytes in size are passed in integer registers. */
6325 else if (type && TREE_CODE (type) == UNION_TYPE)
6327 HOST_WIDE_INT size = int_size_in_bytes (type);
6328 gcc_assert (size <= 16);
6330 return function_arg_union_value (size, mode, slotno, regno);
6333 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
6334 but also have the slot allocated for them.
6335 If no prototype is in scope fp values in register slots get passed
6336 in two places, either fp regs and int regs or fp regs and memory. */
6337 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6338 && SPARC_FP_REG_P (regno))
6340 rtx reg = gen_rtx_REG (mode, regno);
6341 if (cum->prototype_p || cum->libcall_p)
6343 /* "* 2" because fp reg numbers are recorded in 4 byte
6346 /* ??? This will cause the value to be passed in the fp reg and
6347 in the stack. When a prototype exists we want to pass the
6348 value in the reg but reserve space on the stack. That's an
6349 optimization, and is deferred [for a bit]. */
6350 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
6351 return gen_rtx_PARALLEL (mode,
6353 gen_rtx_EXPR_LIST (VOIDmode,
6354 NULL_RTX, const0_rtx),
6355 gen_rtx_EXPR_LIST (VOIDmode,
6359 /* ??? It seems that passing back a register even when past
6360 the area declared by REG_PARM_STACK_SPACE will allocate
6361 space appropriately, and will not copy the data onto the
6362 stack, exactly as we desire.
6364 This is due to locate_and_pad_parm being called in
6365 expand_call whenever reg_parm_stack_space > 0, which
6366 while beneficial to our example here, would seem to be
6367 in error from what had been intended. Ho hum... -- r~ */
6375 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
6379 /* On incoming, we don't need to know that the value
6380 is passed in %f0 and %i0, and it confuses other parts
6381 causing needless spillage even on the simplest cases. */
6385 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
6386 + (regno - SPARC_FP_ARG_FIRST) / 2);
6388 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6389 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
6391 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6395 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6396 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6397 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6402 /* All other aggregate types are passed in an integer register in a mode
6403 corresponding to the size of the type. */
6404 else if (type && AGGREGATE_TYPE_P (type))
6406 HOST_WIDE_INT size = int_size_in_bytes (type);
6407 gcc_assert (size <= 16);
6409 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6412 return gen_rtx_REG (mode, regno);
6415 /* Handle the TARGET_FUNCTION_ARG target hook. */
6418 sparc_function_arg (cumulative_args_t cum, enum machine_mode mode,
6419 const_tree type, bool named)
6421 return sparc_function_arg_1 (cum, mode, type, named, false);
6424 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
6427 sparc_function_incoming_arg (cumulative_args_t cum, enum machine_mode mode,
6428 const_tree type, bool named)
6430 return sparc_function_arg_1 (cum, mode, type, named, true);
6433 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
6436 sparc_function_arg_boundary (enum machine_mode mode, const_tree type)
6438 return ((TARGET_ARCH64
6439 && (GET_MODE_ALIGNMENT (mode) == 128
6440 || (type && TYPE_ALIGN (type) == 128)))
6445 /* For an arg passed partly in registers and partly in memory,
6446 this is the number of bytes of registers used.
6447 For args passed entirely in registers or entirely in memory, zero.
6449 Any arg that starts in the first 6 regs but won't entirely fit in them
6450 needs partial registers on v8. On v9, structures with integer
6451 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
6452 values that begin in the last fp reg [where "last fp reg" varies with the
6453 mode] will be split between that reg and memory. */
6456 sparc_arg_partial_bytes (cumulative_args_t cum, enum machine_mode mode,
6457 tree type, bool named)
6459 int slotno, regno, padding;
6461 /* We pass false for incoming_p here, it doesn't matter. */
6462 slotno = function_arg_slotno (get_cumulative_args (cum), mode, type, named,
6463 false, ®no, &padding);
6470 if ((slotno + (mode == BLKmode
6471 ? ROUND_ADVANCE (int_size_in_bytes (type))
6472 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
6473 > SPARC_INT_ARG_MAX)
6474 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
6478 /* We are guaranteed by pass_by_reference that the size of the
6479 argument is not greater than 16 bytes, so we only need to return
6480 one word if the argument is partially passed in registers. */
6482 if (type && AGGREGATE_TYPE_P (type))
6484 int size = int_size_in_bytes (type);
6486 if (size > UNITS_PER_WORD
6487 && slotno == SPARC_INT_ARG_MAX - 1)
6488 return UNITS_PER_WORD;
6490 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
6491 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
6492 && ! (TARGET_FPU && named)))
6494 /* The complex types are passed as packed types. */
6495 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
6496 && slotno == SPARC_INT_ARG_MAX - 1)
6497 return UNITS_PER_WORD;
6499 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6501 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
6503 return UNITS_PER_WORD;
6510 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
6511 Specify whether to pass the argument by reference. */
6514 sparc_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
6515 enum machine_mode mode, const_tree type,
6516 bool named ATTRIBUTE_UNUSED)
6519 /* Original SPARC 32-bit ABI says that structures and unions,
6520 and quad-precision floats are passed by reference. For Pascal,
6521 also pass arrays by reference. All other base types are passed
6524 Extended ABI (as implemented by the Sun compiler) says that all
6525 complex floats are passed by reference. Pass complex integers
6526 in registers up to 8 bytes. More generally, enforce the 2-word
6527 cap for passing arguments in registers.
6529 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6530 integers are passed like floats of the same size, that is in
6531 registers up to 8 bytes. Pass all vector floats by reference
6532 like structure and unions. */
6533 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
6535 /* Catch CDImode, TFmode, DCmode and TCmode. */
6536 || GET_MODE_SIZE (mode) > 8
6538 && TREE_CODE (type) == VECTOR_TYPE
6539 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6541 /* Original SPARC 64-bit ABI says that structures and unions
6542 smaller than 16 bytes are passed in registers, as well as
6543 all other base types.
6545 Extended ABI (as implemented by the Sun compiler) says that
6546 complex floats are passed in registers up to 16 bytes. Pass
6547 all complex integers in registers up to 16 bytes. More generally,
6548 enforce the 2-word cap for passing arguments in registers.
6550 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6551 integers are passed like floats of the same size, that is in
6552 registers (up to 16 bytes). Pass all vector floats like structure
6555 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
6556 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
6557 /* Catch CTImode and TCmode. */
6558 || GET_MODE_SIZE (mode) > 16);
6561 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
6562 Update the data in CUM to advance over an argument
6563 of mode MODE and data type TYPE.
6564 TYPE is null for libcalls where that information may not be available. */
6567 sparc_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
6568 const_tree type, bool named)
6570 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6573 /* We pass false for incoming_p here, it doesn't matter. */
6574 function_arg_slotno (cum, mode, type, named, false, ®no, &padding);
6576 /* If argument requires leading padding, add it. */
6577 cum->words += padding;
6581 cum->words += (mode != BLKmode
6582 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
6583 : ROUND_ADVANCE (int_size_in_bytes (type)));
6587 if (type && AGGREGATE_TYPE_P (type))
6589 int size = int_size_in_bytes (type);
6593 else if (size <= 16)
6595 else /* passed by reference */
6600 cum->words += (mode != BLKmode
6601 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
6602 : ROUND_ADVANCE (int_size_in_bytes (type)));
6607 /* Handle the FUNCTION_ARG_PADDING macro.
6608 For the 64 bit ABI structs are always stored left shifted in their
6612 function_arg_padding (enum machine_mode mode, const_tree type)
6614 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
6617 /* Fall back to the default. */
6618 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
6621 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
6622 Specify whether to return the return value in memory. */
6625 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6628 /* Original SPARC 32-bit ABI says that structures and unions,
6629 and quad-precision floats are returned in memory. All other
6630 base types are returned in registers.
6632 Extended ABI (as implemented by the Sun compiler) says that
6633 all complex floats are returned in registers (8 FP registers
6634 at most for '_Complex long double'). Return all complex integers
6635 in registers (4 at most for '_Complex long long').
6637 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6638 integers are returned like floats of the same size, that is in
6639 registers up to 8 bytes and in memory otherwise. Return all
6640 vector floats in memory like structure and unions; note that
6641 they always have BLKmode like the latter. */
6642 return (TYPE_MODE (type) == BLKmode
6643 || TYPE_MODE (type) == TFmode
6644 || (TREE_CODE (type) == VECTOR_TYPE
6645 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6647 /* Original SPARC 64-bit ABI says that structures and unions
6648 smaller than 32 bytes are returned in registers, as well as
6649 all other base types.
6651 Extended ABI (as implemented by the Sun compiler) says that all
6652 complex floats are returned in registers (8 FP registers at most
6653 for '_Complex long double'). Return all complex integers in
6654 registers (4 at most for '_Complex TItype').
6656 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6657 integers are returned like floats of the same size, that is in
6658 registers. Return all vector floats like structure and unions;
6659 note that they always have BLKmode like the latter. */
6660 return (TYPE_MODE (type) == BLKmode
6661 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
6664 /* Handle the TARGET_STRUCT_VALUE target hook.
6665 Return where to find the structure return value address. */
6668 sparc_struct_value_rtx (tree fndecl, int incoming)
6677 mem = gen_frame_mem (Pmode, plus_constant (frame_pointer_rtx,
6678 STRUCT_VALUE_OFFSET));
6680 mem = gen_frame_mem (Pmode, plus_constant (stack_pointer_rtx,
6681 STRUCT_VALUE_OFFSET));
6683 /* Only follow the SPARC ABI for fixed-size structure returns.
6684 Variable size structure returns are handled per the normal
6685 procedures in GCC. This is enabled by -mstd-struct-return */
6687 && sparc_std_struct_return
6688 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
6689 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
6691 /* We must check and adjust the return address, as it is
6692 optional as to whether the return object is really
6694 rtx ret_reg = gen_rtx_REG (Pmode, 31);
6695 rtx scratch = gen_reg_rtx (SImode);
6696 rtx endlab = gen_label_rtx ();
6698 /* Calculate the return object size */
6699 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
6700 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
6701 /* Construct a temporary return value */
6703 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
6705 /* Implement SPARC 32-bit psABI callee return struct checking:
6707 Fetch the instruction where we will return to and see if
6708 it's an unimp instruction (the most significant 10 bits
6710 emit_move_insn (scratch, gen_rtx_MEM (SImode,
6711 plus_constant (ret_reg, 8)));
6712 /* Assume the size is valid and pre-adjust */
6713 emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
6714 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
6716 emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
6717 /* Write the address of the memory pointed to by temp_val into
6718 the memory pointed to by mem */
6719 emit_move_insn (mem, XEXP (temp_val, 0));
6720 emit_label (endlab);
6727 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
6728 For v9, function return values are subject to the same rules as arguments,
6729 except that up to 32 bytes may be returned in registers. */
6732 sparc_function_value_1 (const_tree type, enum machine_mode mode,
6735 /* Beware that the two values are swapped here wrt function_arg. */
6736 int regbase = (outgoing
6737 ? SPARC_INCOMING_INT_ARG_FIRST
6738 : SPARC_OUTGOING_INT_ARG_FIRST);
6739 enum mode_class mclass = GET_MODE_CLASS (mode);
6742 /* Vector types deserve special treatment because they are polymorphic wrt
6743 their mode, depending upon whether VIS instructions are enabled. */
6744 if (type && TREE_CODE (type) == VECTOR_TYPE)
6746 HOST_WIDE_INT size = int_size_in_bytes (type);
6747 gcc_assert ((TARGET_ARCH32 && size <= 8)
6748 || (TARGET_ARCH64 && size <= 32));
6750 if (mode == BLKmode)
6751 return function_arg_vector_value (size,
6752 SPARC_FP_ARG_FIRST);
6754 mclass = MODE_FLOAT;
6757 if (TARGET_ARCH64 && type)
6759 /* Structures up to 32 bytes in size are returned in registers. */
6760 if (TREE_CODE (type) == RECORD_TYPE)
6762 HOST_WIDE_INT size = int_size_in_bytes (type);
6763 gcc_assert (size <= 32);
6765 return function_arg_record_value (type, mode, 0, 1, regbase);
6768 /* Unions up to 32 bytes in size are returned in integer registers. */
6769 else if (TREE_CODE (type) == UNION_TYPE)
6771 HOST_WIDE_INT size = int_size_in_bytes (type);
6772 gcc_assert (size <= 32);
6774 return function_arg_union_value (size, mode, 0, regbase);
6777 /* Objects that require it are returned in FP registers. */
6778 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6781 /* All other aggregate types are returned in an integer register in a
6782 mode corresponding to the size of the type. */
6783 else if (AGGREGATE_TYPE_P (type))
6785 /* All other aggregate types are passed in an integer register
6786 in a mode corresponding to the size of the type. */
6787 HOST_WIDE_INT size = int_size_in_bytes (type);
6788 gcc_assert (size <= 32);
6790 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6792 /* ??? We probably should have made the same ABI change in
6793 3.4.0 as the one we made for unions. The latter was
6794 required by the SCD though, while the former is not
6795 specified, so we favored compatibility and efficiency.
6797 Now we're stuck for aggregates larger than 16 bytes,
6798 because OImode vanished in the meantime. Let's not
6799 try to be unduly clever, and simply follow the ABI
6800 for unions in that case. */
6801 if (mode == BLKmode)
6802 return function_arg_union_value (size, mode, 0, regbase);
6807 /* We should only have pointer and integer types at this point. This
6808 must match sparc_promote_function_mode. */
6809 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6813 /* We should only have pointer and integer types at this point. This must
6814 match sparc_promote_function_mode. */
6815 else if (TARGET_ARCH32
6816 && mclass == MODE_INT
6817 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6820 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
6821 regno = SPARC_FP_ARG_FIRST;
6825 return gen_rtx_REG (mode, regno);
6828 /* Handle TARGET_FUNCTION_VALUE.
6829 On the SPARC, the value is found in the first "output" register, but the
6830 called function leaves it in the first "input" register. */
6833 sparc_function_value (const_tree valtype,
6834 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
6837 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
6840 /* Handle TARGET_LIBCALL_VALUE. */
6843 sparc_libcall_value (enum machine_mode mode,
6844 const_rtx fun ATTRIBUTE_UNUSED)
6846 return sparc_function_value_1 (NULL_TREE, mode, false);
6849 /* Handle FUNCTION_VALUE_REGNO_P.
6850 On the SPARC, the first "output" reg is used for integer values, and the
6851 first floating point register is used for floating point values. */
6854 sparc_function_value_regno_p (const unsigned int regno)
6856 return (regno == 8 || regno == 32);
6859 /* Do what is necessary for `va_start'. We look at the current function
6860 to determine if stdarg or varargs is used and return the address of
6861 the first unnamed parameter. */
6864 sparc_builtin_saveregs (void)
6866 int first_reg = crtl->args.info.words;
6870 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
6871 emit_move_insn (gen_rtx_MEM (word_mode,
6872 gen_rtx_PLUS (Pmode,
6874 GEN_INT (FIRST_PARM_OFFSET (0)
6877 gen_rtx_REG (word_mode,
6878 SPARC_INCOMING_INT_ARG_FIRST + regno));
6880 address = gen_rtx_PLUS (Pmode,
6882 GEN_INT (FIRST_PARM_OFFSET (0)
6883 + UNITS_PER_WORD * first_reg));
6888 /* Implement `va_start' for stdarg. */
6891 sparc_va_start (tree valist, rtx nextarg)
6893 nextarg = expand_builtin_saveregs ();
6894 std_expand_builtin_va_start (valist, nextarg);
6897 /* Implement `va_arg' for stdarg. */
6900 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6903 HOST_WIDE_INT size, rsize, align;
6906 tree ptrtype = build_pointer_type (type);
6908 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
6911 size = rsize = UNITS_PER_WORD;
6917 size = int_size_in_bytes (type);
6918 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
6923 /* For SPARC64, objects requiring 16-byte alignment get it. */
6924 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
6925 align = 2 * UNITS_PER_WORD;
6927 /* SPARC-V9 ABI states that structures up to 16 bytes in size
6928 are left-justified in their slots. */
6929 if (AGGREGATE_TYPE_P (type))
6932 size = rsize = UNITS_PER_WORD;
6942 incr = fold_build_pointer_plus_hwi (incr, align - 1);
6943 incr = fold_convert (sizetype, incr);
6944 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
6946 incr = fold_convert (ptr_type_node, incr);
6949 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
6952 if (BYTES_BIG_ENDIAN && size < rsize)
6953 addr = fold_build_pointer_plus_hwi (incr, rsize - size);
6957 addr = fold_convert (build_pointer_type (ptrtype), addr);
6958 addr = build_va_arg_indirect_ref (addr);
6961 /* If the address isn't aligned properly for the type, we need a temporary.
6962 FIXME: This is inefficient, usually we can do this in registers. */
6963 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
6965 tree tmp = create_tmp_var (type, "va_arg_tmp");
6966 tree dest_addr = build_fold_addr_expr (tmp);
6967 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
6968 3, dest_addr, addr, size_int (rsize));
6969 TREE_ADDRESSABLE (tmp) = 1;
6970 gimplify_and_add (copy, pre_p);
6975 addr = fold_convert (ptrtype, addr);
6977 incr = fold_build_pointer_plus_hwi (incr, rsize);
6978 gimplify_assign (valist, incr, post_p);
6980 return build_va_arg_indirect_ref (addr);
6983 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
6984 Specify whether the vector mode is supported by the hardware. */
6987 sparc_vector_mode_supported_p (enum machine_mode mode)
6989 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
6992 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
6994 static enum machine_mode
6995 sparc_preferred_simd_mode (enum machine_mode mode)
7013 /* Return the string to output an unconditional branch to LABEL, which is
7014 the operand number of the label.
7016 DEST is the destination insn (i.e. the label), INSN is the source. */
7019 output_ubranch (rtx dest, int label, rtx insn)
7021 static char string[64];
7022 bool v9_form = false;
7025 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
7027 int delta = (INSN_ADDRESSES (INSN_UID (dest))
7028 - INSN_ADDRESSES (INSN_UID (insn)));
7029 /* Leave some instructions for "slop". */
7030 if (delta >= -260000 && delta < 260000)
7035 strcpy (string, "ba%*,pt\t%%xcc, ");
7037 strcpy (string, "b%*\t");
7039 p = strchr (string, '\0');
7050 /* Return the string to output a conditional branch to LABEL, which is
7051 the operand number of the label. OP is the conditional expression.
7052 XEXP (OP, 0) is assumed to be a condition code register (integer or
7053 floating point) and its mode specifies what kind of comparison we made.
7055 DEST is the destination insn (i.e. the label), INSN is the source.
7057 REVERSED is nonzero if we should reverse the sense of the comparison.
7059 ANNUL is nonzero if we should generate an annulling branch. */
7062 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
7065 static char string[64];
7066 enum rtx_code code = GET_CODE (op);
7067 rtx cc_reg = XEXP (op, 0);
7068 enum machine_mode mode = GET_MODE (cc_reg);
7069 const char *labelno, *branch;
7070 int spaces = 8, far;
7073 /* v9 branches are limited to +-1MB. If it is too far away,
7086 fbne,a,pn %fcc2, .LC29
7094 far = TARGET_V9 && (get_attr_length (insn) >= 3);
7097 /* Reversal of FP compares takes care -- an ordered compare
7098 becomes an unordered compare and vice versa. */
7099 if (mode == CCFPmode || mode == CCFPEmode)
7100 code = reverse_condition_maybe_unordered (code);
7102 code = reverse_condition (code);
7105 /* Start by writing the branch condition. */
7106 if (mode == CCFPmode || mode == CCFPEmode)
7157 /* ??? !v9: FP branches cannot be preceded by another floating point
7158 insn. Because there is currently no concept of pre-delay slots,
7159 we can fix this only by always emitting a nop before a floating
7164 strcpy (string, "nop\n\t");
7165 strcat (string, branch);
7178 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
7190 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
7211 strcpy (string, branch);
7213 spaces -= strlen (branch);
7214 p = strchr (string, '\0');
7216 /* Now add the annulling, the label, and a possible noop. */
7229 if (! far && insn && INSN_ADDRESSES_SET_P ())
7231 int delta = (INSN_ADDRESSES (INSN_UID (dest))
7232 - INSN_ADDRESSES (INSN_UID (insn)));
7233 /* Leave some instructions for "slop". */
7234 if (delta < -260000 || delta >= 260000)
7238 if (mode == CCFPmode || mode == CCFPEmode)
7240 static char v9_fcc_labelno[] = "%%fccX, ";
7241 /* Set the char indicating the number of the fcc reg to use. */
7242 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
7243 labelno = v9_fcc_labelno;
7246 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
7250 else if (mode == CCXmode || mode == CCX_NOOVmode)
7252 labelno = "%%xcc, ";
7257 labelno = "%%icc, ";
7262 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
7265 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
7278 strcpy (p, labelno);
7279 p = strchr (p, '\0');
7282 strcpy (p, ".+12\n\t nop\n\tb\t");
7283 /* Skip the next insn if requested or
7284 if we know that it will be a nop. */
7285 if (annul || ! final_sequence)
7299 /* Emit a library call comparison between floating point X and Y.
7300 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
7301 Return the new operator to be used in the comparison sequence.
7303 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
7304 values as arguments instead of the TFmode registers themselves,
7305 that's why we cannot call emit_float_lib_cmp. */
7308 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
7311 rtx slot0, slot1, result, tem, tem2, libfunc;
7312 enum machine_mode mode;
7313 enum rtx_code new_comparison;
7318 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
7322 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
7326 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
7330 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
7334 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
7338 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
7349 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
7362 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
7363 emit_move_insn (slot0, x);
7370 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
7371 emit_move_insn (slot1, y);
7374 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7375 emit_library_call (libfunc, LCT_NORMAL,
7377 XEXP (slot0, 0), Pmode,
7378 XEXP (slot1, 0), Pmode);
7383 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7384 emit_library_call (libfunc, LCT_NORMAL,
7386 x, TFmode, y, TFmode);
7391 /* Immediately move the result of the libcall into a pseudo
7392 register so reload doesn't clobber the value if it needs
7393 the return register for a spill reg. */
7394 result = gen_reg_rtx (mode);
7395 emit_move_insn (result, hard_libcall_value (mode, libfunc));
7400 return gen_rtx_NE (VOIDmode, result, const0_rtx);
7403 new_comparison = (comparison == UNORDERED ? EQ : NE);
7404 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
7407 new_comparison = (comparison == UNGT ? GT : NE);
7408 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
7410 return gen_rtx_NE (VOIDmode, result, const2_rtx);
7412 tem = gen_reg_rtx (mode);
7414 emit_insn (gen_andsi3 (tem, result, const1_rtx));
7416 emit_insn (gen_anddi3 (tem, result, const1_rtx));
7417 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
7420 tem = gen_reg_rtx (mode);
7422 emit_insn (gen_addsi3 (tem, result, const1_rtx));
7424 emit_insn (gen_adddi3 (tem, result, const1_rtx));
7425 tem2 = gen_reg_rtx (mode);
7427 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
7429 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
7430 new_comparison = (comparison == UNEQ ? EQ : NE);
7431 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
7437 /* Generate an unsigned DImode to FP conversion. This is the same code
7438 optabs would emit if we didn't have TFmode patterns. */
7441 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
7443 rtx neglab, donelab, i0, i1, f0, in, out;
7446 in = force_reg (DImode, operands[1]);
7447 neglab = gen_label_rtx ();
7448 donelab = gen_label_rtx ();
7449 i0 = gen_reg_rtx (DImode);
7450 i1 = gen_reg_rtx (DImode);
7451 f0 = gen_reg_rtx (mode);
7453 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
7455 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
7456 emit_jump_insn (gen_jump (donelab));
7459 emit_label (neglab);
7461 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
7462 emit_insn (gen_anddi3 (i1, in, const1_rtx));
7463 emit_insn (gen_iordi3 (i0, i0, i1));
7464 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
7465 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
7467 emit_label (donelab);
7470 /* Generate an FP to unsigned DImode conversion. This is the same code
7471 optabs would emit if we didn't have TFmode patterns. */
7474 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
7476 rtx neglab, donelab, i0, i1, f0, in, out, limit;
7479 in = force_reg (mode, operands[1]);
7480 neglab = gen_label_rtx ();
7481 donelab = gen_label_rtx ();
7482 i0 = gen_reg_rtx (DImode);
7483 i1 = gen_reg_rtx (DImode);
7484 limit = gen_reg_rtx (mode);
7485 f0 = gen_reg_rtx (mode);
7487 emit_move_insn (limit,
7488 CONST_DOUBLE_FROM_REAL_VALUE (
7489 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
7490 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
7492 emit_insn (gen_rtx_SET (VOIDmode,
7494 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
7495 emit_jump_insn (gen_jump (donelab));
7498 emit_label (neglab);
7500 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
7501 emit_insn (gen_rtx_SET (VOIDmode,
7503 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
7504 emit_insn (gen_movdi (i1, const1_rtx));
7505 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
7506 emit_insn (gen_xordi3 (out, i0, i1));
7508 emit_label (donelab);
7511 /* Return the string to output a conditional branch to LABEL, testing
7512 register REG. LABEL is the operand number of the label; REG is the
7513 operand number of the reg. OP is the conditional expression. The mode
7514 of REG says what kind of comparison we made.
7516 DEST is the destination insn (i.e. the label), INSN is the source.
7518 REVERSED is nonzero if we should reverse the sense of the comparison.
7520 ANNUL is nonzero if we should generate an annulling branch. */
7523 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
7524 int annul, rtx insn)
7526 static char string[64];
7527 enum rtx_code code = GET_CODE (op);
7528 enum machine_mode mode = GET_MODE (XEXP (op, 0));
7533 /* branch on register are limited to +-128KB. If it is too far away,
7546 brgez,a,pn %o1, .LC29
7552 ba,pt %xcc, .LC29 */
7554 far = get_attr_length (insn) >= 3;
7556 /* If not floating-point or if EQ or NE, we can just reverse the code. */
7558 code = reverse_condition (code);
7560 /* Only 64 bit versions of these instructions exist. */
7561 gcc_assert (mode == DImode);
7563 /* Start by writing the branch condition. */
7568 strcpy (string, "brnz");
7572 strcpy (string, "brz");
7576 strcpy (string, "brgez");
7580 strcpy (string, "brlz");
7584 strcpy (string, "brlez");
7588 strcpy (string, "brgz");
7595 p = strchr (string, '\0');
7597 /* Now add the annulling, reg, label, and nop. */
7604 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
7607 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
7612 *p = p < string + 8 ? '\t' : ' ';
7620 int veryfar = 1, delta;
7622 if (INSN_ADDRESSES_SET_P ())
7624 delta = (INSN_ADDRESSES (INSN_UID (dest))
7625 - INSN_ADDRESSES (INSN_UID (insn)));
7626 /* Leave some instructions for "slop". */
7627 if (delta >= -260000 && delta < 260000)
7631 strcpy (p, ".+12\n\t nop\n\t");
7632 /* Skip the next insn if requested or
7633 if we know that it will be a nop. */
7634 if (annul || ! final_sequence)
7644 strcpy (p, "ba,pt\t%%xcc, ");
7658 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
7659 Such instructions cannot be used in the delay slot of return insn on v9.
7660 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
7664 epilogue_renumber (register rtx *where, int test)
7666 register const char *fmt;
7668 register enum rtx_code code;
7673 code = GET_CODE (*where);
7678 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
7680 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
7681 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
7689 /* Do not replace the frame pointer with the stack pointer because
7690 it can cause the delayed instruction to load below the stack.
7691 This occurs when instructions like:
7693 (set (reg/i:SI 24 %i0)
7694 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
7695 (const_int -20 [0xffffffec])) 0))
7697 are in the return delayed slot. */
7699 if (GET_CODE (XEXP (*where, 0)) == REG
7700 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
7701 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
7702 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
7707 if (SPARC_STACK_BIAS
7708 && GET_CODE (XEXP (*where, 0)) == REG
7709 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
7717 fmt = GET_RTX_FORMAT (code);
7719 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7724 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
7725 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
7728 else if (fmt[i] == 'e'
7729 && epilogue_renumber (&(XEXP (*where, i)), test))
7735 /* Leaf functions and non-leaf functions have different needs. */
7738 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
7741 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
7743 static const int *const reg_alloc_orders[] = {
7744 reg_leaf_alloc_order,
7745 reg_nonleaf_alloc_order};
7748 order_regs_for_local_alloc (void)
7750 static int last_order_nonleaf = 1;
7752 if (df_regs_ever_live_p (15) != last_order_nonleaf)
7754 last_order_nonleaf = !last_order_nonleaf;
7755 memcpy ((char *) reg_alloc_order,
7756 (const char *) reg_alloc_orders[last_order_nonleaf],
7757 FIRST_PSEUDO_REGISTER * sizeof (int));
7761 /* Return 1 if REG and MEM are legitimate enough to allow the various
7762 mem<-->reg splits to be run. */
7765 sparc_splitdi_legitimate (rtx reg, rtx mem)
7767 /* Punt if we are here by mistake. */
7768 gcc_assert (reload_completed);
7770 /* We must have an offsettable memory reference. */
7771 if (! offsettable_memref_p (mem))
7774 /* If we have legitimate args for ldd/std, we do not want
7775 the split to happen. */
7776 if ((REGNO (reg) % 2) == 0
7777 && mem_min_alignment (mem, 8))
7784 /* Like sparc_splitdi_legitimate but for REG <--> REG moves. */
7787 sparc_split_regreg_legitimate (rtx reg1, rtx reg2)
7791 if (GET_CODE (reg1) == SUBREG)
7792 reg1 = SUBREG_REG (reg1);
7793 if (GET_CODE (reg1) != REG)
7795 regno1 = REGNO (reg1);
7797 if (GET_CODE (reg2) == SUBREG)
7798 reg2 = SUBREG_REG (reg2);
7799 if (GET_CODE (reg2) != REG)
7801 regno2 = REGNO (reg2);
7803 if (SPARC_INT_REG_P (regno1) && SPARC_INT_REG_P (regno2))
7808 if ((SPARC_INT_REG_P (regno1) && SPARC_FP_REG_P (regno2))
7809 || (SPARC_FP_REG_P (regno1) && SPARC_INT_REG_P (regno2)))
7816 /* Return 1 if x and y are some kind of REG and they refer to
7817 different hard registers. This test is guaranteed to be
7818 run after reload. */
7821 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
7823 if (GET_CODE (x) != REG)
7825 if (GET_CODE (y) != REG)
7827 if (REGNO (x) == REGNO (y))
7832 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
7833 This makes them candidates for using ldd and std insns.
7835 Note reg1 and reg2 *must* be hard registers. */
7838 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
7840 /* We might have been passed a SUBREG. */
7841 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
7844 if (REGNO (reg1) % 2 != 0)
7847 /* Integer ldd is deprecated in SPARC V9 */
7848 if (TARGET_V9 && SPARC_INT_REG_P (REGNO (reg1)))
7851 return (REGNO (reg1) == REGNO (reg2) - 1);
7854 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
7857 This can only happen when addr1 and addr2, the addresses in mem1
7858 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
7859 addr1 must also be aligned on a 64-bit boundary.
7861 Also iff dependent_reg_rtx is not null it should not be used to
7862 compute the address for mem1, i.e. we cannot optimize a sequence
7874 But, note that the transformation from:
7879 is perfectly fine. Thus, the peephole2 patterns always pass us
7880 the destination register of the first load, never the second one.
7882 For stores we don't have a similar problem, so dependent_reg_rtx is
7886 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
7890 HOST_WIDE_INT offset1;
7892 /* The mems cannot be volatile. */
7893 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
7896 /* MEM1 should be aligned on a 64-bit boundary. */
7897 if (MEM_ALIGN (mem1) < 64)
7900 addr1 = XEXP (mem1, 0);
7901 addr2 = XEXP (mem2, 0);
7903 /* Extract a register number and offset (if used) from the first addr. */
7904 if (GET_CODE (addr1) == PLUS)
7906 /* If not a REG, return zero. */
7907 if (GET_CODE (XEXP (addr1, 0)) != REG)
7911 reg1 = REGNO (XEXP (addr1, 0));
7912 /* The offset must be constant! */
7913 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
7915 offset1 = INTVAL (XEXP (addr1, 1));
7918 else if (GET_CODE (addr1) != REG)
7922 reg1 = REGNO (addr1);
7923 /* This was a simple (mem (reg)) expression. Offset is 0. */
7927 /* Make sure the second address is a (mem (plus (reg) (const_int). */
7928 if (GET_CODE (addr2) != PLUS)
7931 if (GET_CODE (XEXP (addr2, 0)) != REG
7932 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
7935 if (reg1 != REGNO (XEXP (addr2, 0)))
7938 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
7941 /* The first offset must be evenly divisible by 8 to ensure the
7942 address is 64 bit aligned. */
7943 if (offset1 % 8 != 0)
7946 /* The offset for the second addr must be 4 more than the first addr. */
7947 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
7950 /* All the tests passed. addr1 and addr2 are valid for ldd and std
7955 /* Return 1 if reg is a pseudo, or is the first register in
7956 a hard register pair. This makes it suitable for use in
7957 ldd and std insns. */
7960 register_ok_for_ldd (rtx reg)
7962 /* We might have been passed a SUBREG. */
7966 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
7967 return (REGNO (reg) % 2 == 0);
7972 /* Return 1 if OP is a memory whose address is known to be
7973 aligned to 8-byte boundary, or a pseudo during reload.
7974 This makes it suitable for use in ldd and std insns. */
7977 memory_ok_for_ldd (rtx op)
7981 /* In 64-bit mode, we assume that the address is word-aligned. */
7982 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
7985 if (! can_create_pseudo_p ()
7986 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
7989 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
7991 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
8000 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
8003 sparc_print_operand_punct_valid_p (unsigned char code)
8016 /* Implement TARGET_PRINT_OPERAND.
8017 Print operand X (an rtx) in assembler syntax to file FILE.
8018 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
8019 For `%' followed by punctuation, CODE is the punctuation and X is null. */
8022 sparc_print_operand (FILE *file, rtx x, int code)
8027 /* Output an insn in a delay slot. */
8029 sparc_indent_opcode = 1;
8031 fputs ("\n\t nop", file);
8034 /* Output an annul flag if there's nothing for the delay slot and we
8035 are optimizing. This is always used with '(' below.
8036 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
8037 this is a dbx bug. So, we only do this when optimizing.
8038 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
8039 Always emit a nop in case the next instruction is a branch. */
8040 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
8044 /* Output a 'nop' if there's nothing for the delay slot and we are
8045 not optimizing. This is always used with '*' above. */
8046 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
8047 fputs ("\n\t nop", file);
8048 else if (final_sequence)
8049 sparc_indent_opcode = 1;
8052 /* Output the right displacement from the saved PC on function return.
8053 The caller may have placed an "unimp" insn immediately after the call
8054 so we have to account for it. This insn is used in the 32-bit ABI
8055 when calling a function that returns a non zero-sized structure. The
8056 64-bit ABI doesn't have it. Be careful to have this test be the same
8057 as that for the call. The exception is when sparc_std_struct_return
8058 is enabled, the psABI is followed exactly and the adjustment is made
8059 by the code in sparc_struct_value_rtx. The call emitted is the same
8060 when sparc_std_struct_return is enabled. */
8062 && cfun->returns_struct
8063 && !sparc_std_struct_return
8064 && DECL_SIZE (DECL_RESULT (current_function_decl))
8065 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
8067 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
8073 /* Output the Embedded Medium/Anywhere code model base register. */
8074 fputs (EMBMEDANY_BASE_REG, file);
8077 /* Print some local dynamic TLS name. */
8078 assemble_name (file, get_some_local_dynamic_name ());
8082 /* Adjust the operand to take into account a RESTORE operation. */
8083 if (GET_CODE (x) == CONST_INT)
8085 else if (GET_CODE (x) != REG)
8086 output_operand_lossage ("invalid %%Y operand");
8087 else if (REGNO (x) < 8)
8088 fputs (reg_names[REGNO (x)], file);
8089 else if (REGNO (x) >= 24 && REGNO (x) < 32)
8090 fputs (reg_names[REGNO (x)-16], file);
8092 output_operand_lossage ("invalid %%Y operand");
8095 /* Print out the low order register name of a register pair. */
8096 if (WORDS_BIG_ENDIAN)
8097 fputs (reg_names[REGNO (x)+1], file);
8099 fputs (reg_names[REGNO (x)], file);
8102 /* Print out the high order register name of a register pair. */
8103 if (WORDS_BIG_ENDIAN)
8104 fputs (reg_names[REGNO (x)], file);
8106 fputs (reg_names[REGNO (x)+1], file);
8109 /* Print out the second register name of a register pair or quad.
8110 I.e., R (%o0) => %o1. */
8111 fputs (reg_names[REGNO (x)+1], file);
8114 /* Print out the third register name of a register quad.
8115 I.e., S (%o0) => %o2. */
8116 fputs (reg_names[REGNO (x)+2], file);
8119 /* Print out the fourth register name of a register quad.
8120 I.e., T (%o0) => %o3. */
8121 fputs (reg_names[REGNO (x)+3], file);
8124 /* Print a condition code register. */
8125 if (REGNO (x) == SPARC_ICC_REG)
8127 /* We don't handle CC[X]_NOOVmode because they're not supposed
8129 if (GET_MODE (x) == CCmode)
8130 fputs ("%icc", file);
8131 else if (GET_MODE (x) == CCXmode)
8132 fputs ("%xcc", file);
8137 /* %fccN register */
8138 fputs (reg_names[REGNO (x)], file);
8141 /* Print the operand's address only. */
8142 output_address (XEXP (x, 0));
8145 /* In this case we need a register. Use %g0 if the
8146 operand is const0_rtx. */
8148 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
8150 fputs ("%g0", file);
8157 switch (GET_CODE (x))
8159 case IOR: fputs ("or", file); break;
8160 case AND: fputs ("and", file); break;
8161 case XOR: fputs ("xor", file); break;
8162 default: output_operand_lossage ("invalid %%A operand");
8167 switch (GET_CODE (x))
8169 case IOR: fputs ("orn", file); break;
8170 case AND: fputs ("andn", file); break;
8171 case XOR: fputs ("xnor", file); break;
8172 default: output_operand_lossage ("invalid %%B operand");
8176 /* This is used by the conditional move instructions. */
8179 enum rtx_code rc = GET_CODE (x);
8183 case NE: fputs ("ne", file); break;
8184 case EQ: fputs ("e", file); break;
8185 case GE: fputs ("ge", file); break;
8186 case GT: fputs ("g", file); break;
8187 case LE: fputs ("le", file); break;
8188 case LT: fputs ("l", file); break;
8189 case GEU: fputs ("geu", file); break;
8190 case GTU: fputs ("gu", file); break;
8191 case LEU: fputs ("leu", file); break;
8192 case LTU: fputs ("lu", file); break;
8193 case LTGT: fputs ("lg", file); break;
8194 case UNORDERED: fputs ("u", file); break;
8195 case ORDERED: fputs ("o", file); break;
8196 case UNLT: fputs ("ul", file); break;
8197 case UNLE: fputs ("ule", file); break;
8198 case UNGT: fputs ("ug", file); break;
8199 case UNGE: fputs ("uge", file); break;
8200 case UNEQ: fputs ("ue", file); break;
8201 default: output_operand_lossage ("invalid %%C operand");
8206 /* This are used by the movr instruction pattern. */
8209 enum rtx_code rc = GET_CODE (x);
8212 case NE: fputs ("ne", file); break;
8213 case EQ: fputs ("e", file); break;
8214 case GE: fputs ("gez", file); break;
8215 case LT: fputs ("lz", file); break;
8216 case LE: fputs ("lez", file); break;
8217 case GT: fputs ("gz", file); break;
8218 default: output_operand_lossage ("invalid %%D operand");
8225 /* Print a sign-extended character. */
8226 int i = trunc_int_for_mode (INTVAL (x), QImode);
8227 fprintf (file, "%d", i);
8232 /* Operand must be a MEM; write its address. */
8233 if (GET_CODE (x) != MEM)
8234 output_operand_lossage ("invalid %%f operand");
8235 output_address (XEXP (x, 0));
8240 /* Print a sign-extended 32-bit value. */
8242 if (GET_CODE(x) == CONST_INT)
8244 else if (GET_CODE(x) == CONST_DOUBLE)
8245 i = CONST_DOUBLE_LOW (x);
8248 output_operand_lossage ("invalid %%s operand");
8251 i = trunc_int_for_mode (i, SImode);
8252 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
8257 /* Do nothing special. */
8261 /* Undocumented flag. */
8262 output_operand_lossage ("invalid operand output code");
8265 if (GET_CODE (x) == REG)
8266 fputs (reg_names[REGNO (x)], file);
8267 else if (GET_CODE (x) == MEM)
8270 /* Poor Sun assembler doesn't understand absolute addressing. */
8271 if (CONSTANT_P (XEXP (x, 0)))
8272 fputs ("%g0+", file);
8273 output_address (XEXP (x, 0));
8276 else if (GET_CODE (x) == HIGH)
8278 fputs ("%hi(", file);
8279 output_addr_const (file, XEXP (x, 0));
8282 else if (GET_CODE (x) == LO_SUM)
8284 sparc_print_operand (file, XEXP (x, 0), 0);
8285 if (TARGET_CM_MEDMID)
8286 fputs ("+%l44(", file);
8288 fputs ("+%lo(", file);
8289 output_addr_const (file, XEXP (x, 1));
8292 else if (GET_CODE (x) == CONST_DOUBLE
8293 && (GET_MODE (x) == VOIDmode
8294 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
8296 if (CONST_DOUBLE_HIGH (x) == 0)
8297 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
8298 else if (CONST_DOUBLE_HIGH (x) == -1
8299 && CONST_DOUBLE_LOW (x) < 0)
8300 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
8302 output_operand_lossage ("long long constant not a valid immediate operand");
8304 else if (GET_CODE (x) == CONST_DOUBLE)
8305 output_operand_lossage ("floating point constant not a valid immediate operand");
8306 else { output_addr_const (file, x); }
8309 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
8312 sparc_print_operand_address (FILE *file, rtx x)
8314 register rtx base, index = 0;
8316 register rtx addr = x;
8319 fputs (reg_names[REGNO (addr)], file);
8320 else if (GET_CODE (addr) == PLUS)
8322 if (CONST_INT_P (XEXP (addr, 0)))
8323 offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
8324 else if (CONST_INT_P (XEXP (addr, 1)))
8325 offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
8327 base = XEXP (addr, 0), index = XEXP (addr, 1);
8328 if (GET_CODE (base) == LO_SUM)
8330 gcc_assert (USE_AS_OFFSETABLE_LO10
8332 && ! TARGET_CM_MEDMID);
8333 output_operand (XEXP (base, 0), 0);
8334 fputs ("+%lo(", file);
8335 output_address (XEXP (base, 1));
8336 fprintf (file, ")+%d", offset);
8340 fputs (reg_names[REGNO (base)], file);
8342 fprintf (file, "%+d", offset);
8343 else if (REG_P (index))
8344 fprintf (file, "+%s", reg_names[REGNO (index)]);
8345 else if (GET_CODE (index) == SYMBOL_REF
8346 || GET_CODE (index) == LABEL_REF
8347 || GET_CODE (index) == CONST)
8348 fputc ('+', file), output_addr_const (file, index);
8349 else gcc_unreachable ();
8352 else if (GET_CODE (addr) == MINUS
8353 && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
8355 output_addr_const (file, XEXP (addr, 0));
8357 output_addr_const (file, XEXP (addr, 1));
8358 fputs ("-.)", file);
8360 else if (GET_CODE (addr) == LO_SUM)
8362 output_operand (XEXP (addr, 0), 0);
8363 if (TARGET_CM_MEDMID)
8364 fputs ("+%l44(", file);
8366 fputs ("+%lo(", file);
8367 output_address (XEXP (addr, 1));
8371 && GET_CODE (addr) == CONST
8372 && GET_CODE (XEXP (addr, 0)) == MINUS
8373 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
8374 && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
8375 && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
8377 addr = XEXP (addr, 0);
8378 output_addr_const (file, XEXP (addr, 0));
8379 /* Group the args of the second CONST in parenthesis. */
8381 /* Skip past the second CONST--it does nothing for us. */
8382 output_addr_const (file, XEXP (XEXP (addr, 1), 0));
8383 /* Close the parenthesis. */
8388 output_addr_const (file, addr);
8392 /* Target hook for assembling integer objects. The sparc version has
8393 special handling for aligned DI-mode objects. */
8396 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
8398 /* ??? We only output .xword's for symbols and only then in environments
8399 where the assembler can handle them. */
8400 if (aligned_p && size == 8
8401 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
8405 assemble_integer_with_op ("\t.xword\t", x);
8410 assemble_aligned_integer (4, const0_rtx);
8411 assemble_aligned_integer (4, x);
8415 return default_assemble_integer (x, size, aligned_p);
8418 /* Return the value of a code used in the .proc pseudo-op that says
8419 what kind of result this function returns. For non-C types, we pick
8420 the closest C type. */
8422 #ifndef SHORT_TYPE_SIZE
8423 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
8426 #ifndef INT_TYPE_SIZE
8427 #define INT_TYPE_SIZE BITS_PER_WORD
8430 #ifndef LONG_TYPE_SIZE
8431 #define LONG_TYPE_SIZE BITS_PER_WORD
8434 #ifndef LONG_LONG_TYPE_SIZE
8435 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
8438 #ifndef FLOAT_TYPE_SIZE
8439 #define FLOAT_TYPE_SIZE BITS_PER_WORD
8442 #ifndef DOUBLE_TYPE_SIZE
8443 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8446 #ifndef LONG_DOUBLE_TYPE_SIZE
8447 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8451 sparc_type_code (register tree type)
8453 register unsigned long qualifiers = 0;
8454 register unsigned shift;
8456 /* Only the first 30 bits of the qualifier are valid. We must refrain from
8457 setting more, since some assemblers will give an error for this. Also,
8458 we must be careful to avoid shifts of 32 bits or more to avoid getting
8459 unpredictable results. */
8461 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
8463 switch (TREE_CODE (type))
8469 qualifiers |= (3 << shift);
8474 qualifiers |= (2 << shift);
8478 case REFERENCE_TYPE:
8480 qualifiers |= (1 << shift);
8484 return (qualifiers | 8);
8487 case QUAL_UNION_TYPE:
8488 return (qualifiers | 9);
8491 return (qualifiers | 10);
8494 return (qualifiers | 16);
8497 /* If this is a range type, consider it to be the underlying
8499 if (TREE_TYPE (type) != 0)
8502 /* Carefully distinguish all the standard types of C,
8503 without messing up if the language is not C. We do this by
8504 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
8505 look at both the names and the above fields, but that's redundant.
8506 Any type whose size is between two C types will be considered
8507 to be the wider of the two types. Also, we do not have a
8508 special code to use for "long long", so anything wider than
8509 long is treated the same. Note that we can't distinguish
8510 between "int" and "long" in this code if they are the same
8511 size, but that's fine, since neither can the assembler. */
8513 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
8514 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
8516 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
8517 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
8519 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
8520 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
8523 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
8526 /* If this is a range type, consider it to be the underlying
8528 if (TREE_TYPE (type) != 0)
8531 /* Carefully distinguish all the standard types of C,
8532 without messing up if the language is not C. */
8534 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
8535 return (qualifiers | 6);
8538 return (qualifiers | 7);
8540 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
8541 /* ??? We need to distinguish between double and float complex types,
8542 but I don't know how yet because I can't reach this code from
8543 existing front-ends. */
8544 return (qualifiers | 7); /* Who knows? */
8547 case BOOLEAN_TYPE: /* Boolean truth value type. */
8553 gcc_unreachable (); /* Not a type! */
8560 /* Nested function support. */
8562 /* Emit RTL insns to initialize the variable parts of a trampoline.
8563 FNADDR is an RTX for the address of the function's pure code.
8564 CXT is an RTX for the static chain value for the function.
8566 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
8567 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
8568 (to store insns). This is a bit excessive. Perhaps a different
8569 mechanism would be better here.
8571 Emit enough FLUSH insns to synchronize the data and instruction caches. */
8574 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
8576 /* SPARC 32-bit trampoline:
8579 sethi %hi(static), %g2
8581 or %g2, %lo(static), %g2
8583 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
8584 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
8588 (adjust_address (m_tramp, SImode, 0),
8589 expand_binop (SImode, ior_optab,
8590 expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
8591 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
8592 NULL_RTX, 1, OPTAB_DIRECT));
8595 (adjust_address (m_tramp, SImode, 4),
8596 expand_binop (SImode, ior_optab,
8597 expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
8598 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
8599 NULL_RTX, 1, OPTAB_DIRECT));
8602 (adjust_address (m_tramp, SImode, 8),
8603 expand_binop (SImode, ior_optab,
8604 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
8605 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
8606 NULL_RTX, 1, OPTAB_DIRECT));
8609 (adjust_address (m_tramp, SImode, 12),
8610 expand_binop (SImode, ior_optab,
8611 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
8612 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
8613 NULL_RTX, 1, OPTAB_DIRECT));
8615 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
8616 aligned on a 16 byte boundary so one flush clears it all. */
8617 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
8618 if (sparc_cpu != PROCESSOR_ULTRASPARC
8619 && sparc_cpu != PROCESSOR_ULTRASPARC3
8620 && sparc_cpu != PROCESSOR_NIAGARA
8621 && sparc_cpu != PROCESSOR_NIAGARA2
8622 && sparc_cpu != PROCESSOR_NIAGARA3
8623 && sparc_cpu != PROCESSOR_NIAGARA4)
8624 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
8626 /* Call __enable_execute_stack after writing onto the stack to make sure
8627 the stack address is accessible. */
8628 #ifdef HAVE_ENABLE_EXECUTE_STACK
8629 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
8630 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8635 /* The 64-bit version is simpler because it makes more sense to load the
8636 values as "immediate" data out of the trampoline. It's also easier since
8637 we can read the PC without clobbering a register. */
8640 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
8642 /* SPARC 64-bit trampoline:
8651 emit_move_insn (adjust_address (m_tramp, SImode, 0),
8652 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
8653 emit_move_insn (adjust_address (m_tramp, SImode, 4),
8654 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
8655 emit_move_insn (adjust_address (m_tramp, SImode, 8),
8656 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
8657 emit_move_insn (adjust_address (m_tramp, SImode, 12),
8658 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
8659 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
8660 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
8661 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
8663 if (sparc_cpu != PROCESSOR_ULTRASPARC
8664 && sparc_cpu != PROCESSOR_ULTRASPARC3
8665 && sparc_cpu != PROCESSOR_NIAGARA
8666 && sparc_cpu != PROCESSOR_NIAGARA2
8667 && sparc_cpu != PROCESSOR_NIAGARA3
8668 && sparc_cpu != PROCESSOR_NIAGARA4)
8669 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
8671 /* Call __enable_execute_stack after writing onto the stack to make sure
8672 the stack address is accessible. */
8673 #ifdef HAVE_ENABLE_EXECUTE_STACK
8674 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
8675 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8679 /* Worker for TARGET_TRAMPOLINE_INIT. */
8682 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
8684 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
8685 cxt = force_reg (Pmode, cxt);
8687 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
8689 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
8692 /* Adjust the cost of a scheduling dependency. Return the new cost of
8693 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
8696 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8698 enum attr_type insn_type;
8700 if (! recog_memoized (insn))
8703 insn_type = get_attr_type (insn);
8705 if (REG_NOTE_KIND (link) == 0)
8707 /* Data dependency; DEP_INSN writes a register that INSN reads some
8710 /* if a load, then the dependence must be on the memory address;
8711 add an extra "cycle". Note that the cost could be two cycles
8712 if the reg was written late in an instruction group; we ca not tell
8714 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
8717 /* Get the delay only if the address of the store is the dependence. */
8718 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
8720 rtx pat = PATTERN(insn);
8721 rtx dep_pat = PATTERN (dep_insn);
8723 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8724 return cost; /* This should not happen! */
8726 /* The dependency between the two instructions was on the data that
8727 is being stored. Assume that this implies that the address of the
8728 store is not dependent. */
8729 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8732 return cost + 3; /* An approximation. */
8735 /* A shift instruction cannot receive its data from an instruction
8736 in the same cycle; add a one cycle penalty. */
8737 if (insn_type == TYPE_SHIFT)
8738 return cost + 3; /* Split before cascade into shift. */
8742 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
8743 INSN writes some cycles later. */
8745 /* These are only significant for the fpu unit; writing a fp reg before
8746 the fpu has finished with it stalls the processor. */
8748 /* Reusing an integer register causes no problems. */
8749 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8757 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8759 enum attr_type insn_type, dep_type;
8760 rtx pat = PATTERN(insn);
8761 rtx dep_pat = PATTERN (dep_insn);
8763 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
8766 insn_type = get_attr_type (insn);
8767 dep_type = get_attr_type (dep_insn);
8769 switch (REG_NOTE_KIND (link))
8772 /* Data dependency; DEP_INSN writes a register that INSN reads some
8779 /* Get the delay iff the address of the store is the dependence. */
8780 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8783 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8790 /* If a load, then the dependence must be on the memory address. If
8791 the addresses aren't equal, then it might be a false dependency */
8792 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
8794 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
8795 || GET_CODE (SET_DEST (dep_pat)) != MEM
8796 || GET_CODE (SET_SRC (pat)) != MEM
8797 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
8798 XEXP (SET_SRC (pat), 0)))
8806 /* Compare to branch latency is 0. There is no benefit from
8807 separating compare and branch. */
8808 if (dep_type == TYPE_COMPARE)
8810 /* Floating point compare to branch latency is less than
8811 compare to conditional move. */
8812 if (dep_type == TYPE_FPCMP)
8821 /* Anti-dependencies only penalize the fpu unit. */
8822 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8834 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
8838 case PROCESSOR_SUPERSPARC:
8839 cost = supersparc_adjust_cost (insn, link, dep, cost);
8841 case PROCESSOR_HYPERSPARC:
8842 case PROCESSOR_SPARCLITE86X:
8843 cost = hypersparc_adjust_cost (insn, link, dep, cost);
8852 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
8853 int sched_verbose ATTRIBUTE_UNUSED,
8854 int max_ready ATTRIBUTE_UNUSED)
8858 sparc_use_sched_lookahead (void)
8860 if (sparc_cpu == PROCESSOR_NIAGARA
8861 || sparc_cpu == PROCESSOR_NIAGARA2
8862 || sparc_cpu == PROCESSOR_NIAGARA3
8863 || sparc_cpu == PROCESSOR_NIAGARA4)
8865 if (sparc_cpu == PROCESSOR_ULTRASPARC
8866 || sparc_cpu == PROCESSOR_ULTRASPARC3)
8868 if ((1 << sparc_cpu) &
8869 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
8870 (1 << PROCESSOR_SPARCLITE86X)))
8876 sparc_issue_rate (void)
8880 case PROCESSOR_NIAGARA:
8881 case PROCESSOR_NIAGARA2:
8882 case PROCESSOR_NIAGARA3:
8883 case PROCESSOR_NIAGARA4:
8887 /* Assume V9 processors are capable of at least dual-issue. */
8889 case PROCESSOR_SUPERSPARC:
8891 case PROCESSOR_HYPERSPARC:
8892 case PROCESSOR_SPARCLITE86X:
8894 case PROCESSOR_ULTRASPARC:
8895 case PROCESSOR_ULTRASPARC3:
8901 set_extends (rtx insn)
8903 register rtx pat = PATTERN (insn);
8905 switch (GET_CODE (SET_SRC (pat)))
8907 /* Load and some shift instructions zero extend. */
8910 /* sethi clears the high bits */
8912 /* LO_SUM is used with sethi. sethi cleared the high
8913 bits and the values used with lo_sum are positive */
8915 /* Store flag stores 0 or 1 */
8925 rtx op0 = XEXP (SET_SRC (pat), 0);
8926 rtx op1 = XEXP (SET_SRC (pat), 1);
8927 if (GET_CODE (op1) == CONST_INT)
8928 return INTVAL (op1) >= 0;
8929 if (GET_CODE (op0) != REG)
8931 if (sparc_check_64 (op0, insn) == 1)
8933 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8938 rtx op0 = XEXP (SET_SRC (pat), 0);
8939 rtx op1 = XEXP (SET_SRC (pat), 1);
8940 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
8942 if (GET_CODE (op1) == CONST_INT)
8943 return INTVAL (op1) >= 0;
8944 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8947 return GET_MODE (SET_SRC (pat)) == SImode;
8948 /* Positive integers leave the high bits zero. */
8950 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
8952 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
8955 return - (GET_MODE (SET_SRC (pat)) == SImode);
8957 return sparc_check_64 (SET_SRC (pat), insn);
8963 /* We _ought_ to have only one kind per function, but... */
8964 static GTY(()) rtx sparc_addr_diff_list;
8965 static GTY(()) rtx sparc_addr_list;
8968 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
8970 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
8972 sparc_addr_diff_list
8973 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
8975 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
8979 sparc_output_addr_vec (rtx vec)
8981 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8982 int idx, vlen = XVECLEN (body, 0);
8984 #ifdef ASM_OUTPUT_ADDR_VEC_START
8985 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8988 #ifdef ASM_OUTPUT_CASE_LABEL
8989 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8992 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8995 for (idx = 0; idx < vlen; idx++)
8997 ASM_OUTPUT_ADDR_VEC_ELT
8998 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
9001 #ifdef ASM_OUTPUT_ADDR_VEC_END
9002 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
9007 sparc_output_addr_diff_vec (rtx vec)
9009 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
9010 rtx base = XEXP (XEXP (body, 0), 0);
9011 int idx, vlen = XVECLEN (body, 1);
9013 #ifdef ASM_OUTPUT_ADDR_VEC_START
9014 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
9017 #ifdef ASM_OUTPUT_CASE_LABEL
9018 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
9021 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
9024 for (idx = 0; idx < vlen; idx++)
9026 ASM_OUTPUT_ADDR_DIFF_ELT
9029 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
9030 CODE_LABEL_NUMBER (base));
9033 #ifdef ASM_OUTPUT_ADDR_VEC_END
9034 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
9039 sparc_output_deferred_case_vectors (void)
9044 if (sparc_addr_list == NULL_RTX
9045 && sparc_addr_diff_list == NULL_RTX)
9048 /* Align to cache line in the function's code section. */
9049 switch_to_section (current_function_section ());
9051 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9053 ASM_OUTPUT_ALIGN (asm_out_file, align);
9055 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
9056 sparc_output_addr_vec (XEXP (t, 0));
9057 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
9058 sparc_output_addr_diff_vec (XEXP (t, 0));
9060 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
9063 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
9064 unknown. Return 1 if the high bits are zero, -1 if the register is
9067 sparc_check_64 (rtx x, rtx insn)
9069 /* If a register is set only once it is safe to ignore insns this
9070 code does not know how to handle. The loop will either recognize
9071 the single set and return the correct value or fail to recognize
9076 gcc_assert (GET_CODE (x) == REG);
9078 if (GET_MODE (x) == DImode)
9079 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
9081 if (flag_expensive_optimizations
9082 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
9088 insn = get_last_insn_anywhere ();
9093 while ((insn = PREV_INSN (insn)))
9095 switch (GET_CODE (insn))
9108 rtx pat = PATTERN (insn);
9109 if (GET_CODE (pat) != SET)
9111 if (rtx_equal_p (x, SET_DEST (pat)))
9112 return set_extends (insn);
9113 if (y && rtx_equal_p (y, SET_DEST (pat)))
9114 return set_extends (insn);
9115 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
9123 /* Returns assembly code to perform a DImode shift using
9124 a 64-bit global or out register on SPARC-V8+. */
9126 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
9128 static char asm_code[60];
9130 /* The scratch register is only required when the destination
9131 register is not a 64-bit global or out register. */
9132 if (which_alternative != 2)
9133 operands[3] = operands[0];
9135 /* We can only shift by constants <= 63. */
9136 if (GET_CODE (operands[2]) == CONST_INT)
9137 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
9139 if (GET_CODE (operands[1]) == CONST_INT)
9141 output_asm_insn ("mov\t%1, %3", operands);
9145 output_asm_insn ("sllx\t%H1, 32, %3", operands);
9146 if (sparc_check_64 (operands[1], insn) <= 0)
9147 output_asm_insn ("srl\t%L1, 0, %L1", operands);
9148 output_asm_insn ("or\t%L1, %3, %3", operands);
9151 strcpy(asm_code, opcode);
9153 if (which_alternative != 2)
9154 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
9156 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
9159 /* Output rtl to increment the profiler label LABELNO
9160 for profiling a function entry. */
9163 sparc_profile_hook (int labelno)
9168 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
9169 if (NO_PROFILE_COUNTERS)
9171 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
9175 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9176 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
9177 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
9181 #ifdef TARGET_SOLARIS
9182 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
9185 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
9186 tree decl ATTRIBUTE_UNUSED)
9188 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
9190 solaris_elf_asm_comdat_section (name, flags, decl);
9194 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
9196 if (!(flags & SECTION_DEBUG))
9197 fputs (",#alloc", asm_out_file);
9198 if (flags & SECTION_WRITE)
9199 fputs (",#write", asm_out_file);
9200 if (flags & SECTION_TLS)
9201 fputs (",#tls", asm_out_file);
9202 if (flags & SECTION_CODE)
9203 fputs (",#execinstr", asm_out_file);
9205 /* ??? Handle SECTION_BSS. */
9207 fputc ('\n', asm_out_file);
9209 #endif /* TARGET_SOLARIS */
9211 /* We do not allow indirect calls to be optimized into sibling calls.
9213 We cannot use sibling calls when delayed branches are disabled
9214 because they will likely require the call delay slot to be filled.
9216 Also, on SPARC 32-bit we cannot emit a sibling call when the
9217 current function returns a structure. This is because the "unimp
9218 after call" convention would cause the callee to return to the
9219 wrong place. The generic code already disallows cases where the
9220 function being called returns a structure.
9222 It may seem strange how this last case could occur. Usually there
9223 is code after the call which jumps to epilogue code which dumps the
9224 return value into the struct return area. That ought to invalidate
9225 the sibling call right? Well, in the C++ case we can end up passing
9226 the pointer to the struct return area to a constructor (which returns
9227 void) and then nothing else happens. Such a sibling call would look
9228 valid without the added check here.
9230 VxWorks PIC PLT entries require the global pointer to be initialized
9231 on entry. We therefore can't emit sibling calls to them. */
9233 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
9236 && flag_delayed_branch
9237 && (TARGET_ARCH64 || ! cfun->returns_struct)
9238 && !(TARGET_VXWORKS_RTP
9240 && !targetm.binds_local_p (decl)));
9243 /* libfunc renaming. */
9246 sparc_init_libfuncs (void)
9250 /* Use the subroutines that Sun's library provides for integer
9251 multiply and divide. The `*' prevents an underscore from
9252 being prepended by the compiler. .umul is a little faster
9254 set_optab_libfunc (smul_optab, SImode, "*.umul");
9255 set_optab_libfunc (sdiv_optab, SImode, "*.div");
9256 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
9257 set_optab_libfunc (smod_optab, SImode, "*.rem");
9258 set_optab_libfunc (umod_optab, SImode, "*.urem");
9260 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
9261 set_optab_libfunc (add_optab, TFmode, "_Q_add");
9262 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
9263 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
9264 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
9265 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
9267 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
9268 is because with soft-float, the SFmode and DFmode sqrt
9269 instructions will be absent, and the compiler will notice and
9270 try to use the TFmode sqrt instruction for calls to the
9271 builtin function sqrt, but this fails. */
9273 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
9275 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
9276 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
9277 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
9278 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
9279 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
9280 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
9282 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
9283 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
9284 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
9285 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
9287 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
9288 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
9289 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
9290 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
9292 if (DITF_CONVERSION_LIBFUNCS)
9294 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
9295 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
9296 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
9297 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
9300 if (SUN_CONVERSION_LIBFUNCS)
9302 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
9303 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
9304 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
9305 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
9310 /* In the SPARC 64bit ABI, SImode multiply and divide functions
9311 do not exist in the library. Make sure the compiler does not
9312 emit calls to them by accident. (It should always use the
9313 hardware instructions.) */
9314 set_optab_libfunc (smul_optab, SImode, 0);
9315 set_optab_libfunc (sdiv_optab, SImode, 0);
9316 set_optab_libfunc (udiv_optab, SImode, 0);
9317 set_optab_libfunc (smod_optab, SImode, 0);
9318 set_optab_libfunc (umod_optab, SImode, 0);
9320 if (SUN_INTEGER_MULTIPLY_64)
9322 set_optab_libfunc (smul_optab, DImode, "__mul64");
9323 set_optab_libfunc (sdiv_optab, DImode, "__div64");
9324 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
9325 set_optab_libfunc (smod_optab, DImode, "__rem64");
9326 set_optab_libfunc (umod_optab, DImode, "__urem64");
9329 if (SUN_CONVERSION_LIBFUNCS)
9331 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
9332 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
9333 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
9334 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
9339 static tree def_builtin(const char *name, int code, tree type)
9341 return add_builtin_function(name, type, code, BUILT_IN_MD, NULL,
9345 static tree def_builtin_const(const char *name, int code, tree type)
9347 tree t = def_builtin(name, code, type);
9350 TREE_READONLY (t) = 1;
9355 /* Implement the TARGET_INIT_BUILTINS target hook.
9356 Create builtin functions for special SPARC instructions. */
9359 sparc_init_builtins (void)
9362 sparc_vis_init_builtins ();
9365 /* Create builtin functions for VIS 1.0 instructions. */
9368 sparc_vis_init_builtins (void)
9370 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
9371 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
9372 tree v4hi = build_vector_type (intHI_type_node, 4);
9373 tree v2hi = build_vector_type (intHI_type_node, 2);
9374 tree v2si = build_vector_type (intSI_type_node, 2);
9375 tree v1si = build_vector_type (intSI_type_node, 1);
9377 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
9378 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
9379 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
9380 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
9381 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
9382 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
9383 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
9384 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
9385 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
9386 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
9387 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
9388 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
9389 tree v2hi_ftype_v2hi_v2hi = build_function_type_list (v2hi, v2hi, v2hi, 0);
9390 tree v1si_ftype_v1si_v1si = build_function_type_list (v1si, v1si, v1si, 0);
9391 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
9393 intDI_type_node, 0);
9394 tree di_ftype_v8qi_v8qi = build_function_type_list (intDI_type_node,
9396 tree si_ftype_v8qi_v8qi = build_function_type_list (intSI_type_node,
9398 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
9400 intDI_type_node, 0);
9401 tree si_ftype_si_si = build_function_type_list (intSI_type_node,
9403 intSI_type_node, 0);
9404 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
9406 intSI_type_node, 0);
9407 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
9409 intDI_type_node, 0);
9410 tree si_ftype_ptr_ptr = build_function_type_list (intSI_type_node,
9413 tree di_ftype_ptr_ptr = build_function_type_list (intDI_type_node,
9416 tree si_ftype_v4hi_v4hi = build_function_type_list (intSI_type_node,
9418 tree si_ftype_v2si_v2si = build_function_type_list (intSI_type_node,
9420 tree di_ftype_v4hi_v4hi = build_function_type_list (intDI_type_node,
9422 tree di_ftype_v2si_v2si = build_function_type_list (intDI_type_node,
9424 tree void_ftype_di = build_function_type_list (void_type_node,
9425 intDI_type_node, 0);
9426 tree di_ftype_void = build_function_type_list (intDI_type_node,
9428 tree void_ftype_si = build_function_type_list (void_type_node,
9429 intSI_type_node, 0);
9430 tree sf_ftype_sf_sf = build_function_type_list (float_type_node,
9432 float_type_node, 0);
9433 tree df_ftype_df_df = build_function_type_list (double_type_node,
9435 double_type_node, 0);
9437 /* Packing and expanding vectors. */
9438 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis,
9440 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
9441 v8qi_ftype_v2si_v8qi);
9442 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
9444 def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis,
9446 def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
9447 v8qi_ftype_v4qi_v4qi);
9449 /* Multiplications. */
9450 def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
9451 v4hi_ftype_v4qi_v4hi);
9452 def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
9453 v4hi_ftype_v4qi_v2hi);
9454 def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
9455 v4hi_ftype_v4qi_v2hi);
9456 def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
9457 v4hi_ftype_v8qi_v4hi);
9458 def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
9459 v4hi_ftype_v8qi_v4hi);
9460 def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
9461 v2si_ftype_v4qi_v2hi);
9462 def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
9463 v2si_ftype_v4qi_v2hi);
9465 /* Data aligning. */
9466 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
9467 v4hi_ftype_v4hi_v4hi);
9468 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
9469 v8qi_ftype_v8qi_v8qi);
9470 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
9471 v2si_ftype_v2si_v2si);
9472 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis,
9475 def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
9477 def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis,
9482 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
9484 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis,
9489 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
9491 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis,
9495 /* Pixel distance. */
9496 def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
9497 di_ftype_v8qi_v8qi_di);
9499 /* Edge handling. */
9502 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis,
9504 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis,
9506 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis,
9508 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis,
9510 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis,
9512 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis,
9516 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
9518 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
9520 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
9522 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
9524 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
9526 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
9532 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis,
9534 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis,
9536 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis,
9538 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis,
9540 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis,
9542 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis,
9546 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
9548 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
9550 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
9552 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
9554 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
9556 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
9561 /* Pixel compare. */
9564 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis,
9565 di_ftype_v4hi_v4hi);
9566 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis,
9567 di_ftype_v2si_v2si);
9568 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis,
9569 di_ftype_v4hi_v4hi);
9570 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis,
9571 di_ftype_v2si_v2si);
9572 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis,
9573 di_ftype_v4hi_v4hi);
9574 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis,
9575 di_ftype_v2si_v2si);
9576 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis,
9577 di_ftype_v4hi_v4hi);
9578 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis,
9579 di_ftype_v2si_v2si);
9583 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis,
9584 si_ftype_v4hi_v4hi);
9585 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis,
9586 si_ftype_v2si_v2si);
9587 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis,
9588 si_ftype_v4hi_v4hi);
9589 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis,
9590 si_ftype_v2si_v2si);
9591 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis,
9592 si_ftype_v4hi_v4hi);
9593 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis,
9594 si_ftype_v2si_v2si);
9595 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis,
9596 si_ftype_v4hi_v4hi);
9597 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis,
9598 si_ftype_v2si_v2si);
9601 /* Addition and subtraction. */
9602 def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3,
9603 v4hi_ftype_v4hi_v4hi);
9604 def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3,
9605 v2hi_ftype_v2hi_v2hi);
9606 def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
9607 v2si_ftype_v2si_v2si);
9608 def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3,
9609 v1si_ftype_v1si_v1si);
9610 def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
9611 v4hi_ftype_v4hi_v4hi);
9612 def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3,
9613 v2hi_ftype_v2hi_v2hi);
9614 def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
9615 v2si_ftype_v2si_v2si);
9616 def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3,
9617 v1si_ftype_v1si_v1si);
9619 /* Three-dimensional array addressing. */
9622 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis,
9624 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis,
9626 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis,
9631 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis,
9633 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis,
9635 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis,
9641 /* Byte mask and shuffle */
9643 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis,
9646 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis,
9648 def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis,
9649 v4hi_ftype_v4hi_v4hi);
9650 def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis,
9651 v8qi_ftype_v8qi_v8qi);
9652 def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
9653 v2si_ftype_v2si_v2si);
9654 def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis,
9662 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis,
9664 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis,
9666 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis,
9671 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis,
9673 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis,
9675 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis,
9679 def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis,
9680 v4hi_ftype_v4hi_v4hi);
9682 def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3,
9683 v4hi_ftype_v4hi_v4hi);
9684 def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3,
9685 v4hi_ftype_v4hi_v4hi);
9686 def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3,
9687 v4hi_ftype_v4hi_v4hi);
9688 def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3,
9689 v4hi_ftype_v4hi_v4hi);
9690 def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3,
9691 v2si_ftype_v2si_v2si);
9692 def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3,
9693 v2si_ftype_v2si_v2si);
9694 def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3,
9695 v2si_ftype_v2si_v2si);
9696 def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3,
9697 v2si_ftype_v2si_v2si);
9700 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis,
9701 di_ftype_v8qi_v8qi);
9703 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis,
9704 si_ftype_v8qi_v8qi);
9706 def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis,
9707 v4hi_ftype_v4hi_v4hi);
9708 def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis,
9710 def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis,
9713 def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3,
9714 v4hi_ftype_v4hi_v4hi);
9715 def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3,
9716 v2hi_ftype_v2hi_v2hi);
9717 def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3,
9718 v4hi_ftype_v4hi_v4hi);
9719 def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3,
9720 v2hi_ftype_v2hi_v2hi);
9721 def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3,
9722 v2si_ftype_v2si_v2si);
9723 def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3,
9724 v1si_ftype_v1si_v1si);
9725 def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3,
9726 v2si_ftype_v2si_v2si);
9727 def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3,
9728 v1si_ftype_v1si_v1si);
9732 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis,
9733 di_ftype_v8qi_v8qi);
9734 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis,
9735 di_ftype_v8qi_v8qi);
9736 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis,
9737 di_ftype_v8qi_v8qi);
9738 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis,
9739 di_ftype_v8qi_v8qi);
9743 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis,
9744 si_ftype_v8qi_v8qi);
9745 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis,
9746 si_ftype_v8qi_v8qi);
9747 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis,
9748 si_ftype_v8qi_v8qi);
9749 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis,
9750 si_ftype_v8qi_v8qi);
9753 def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis,
9755 def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis,
9757 def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis,
9759 def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis,
9761 def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis,
9763 def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis,
9766 def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis,
9768 def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis,
9770 def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis,
9775 /* Handle TARGET_EXPAND_BUILTIN target hook.
9776 Expand builtin functions for sparc intrinsics. */
9779 sparc_expand_builtin (tree exp, rtx target,
9780 rtx subtarget ATTRIBUTE_UNUSED,
9781 enum machine_mode tmode ATTRIBUTE_UNUSED,
9782 int ignore ATTRIBUTE_UNUSED)
9785 call_expr_arg_iterator iter;
9786 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9787 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
9792 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9796 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9798 || GET_MODE (target) != tmode
9799 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9800 op[0] = gen_reg_rtx (tmode);
9804 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9806 const struct insn_operand_data *insn_op;
9809 if (arg == error_mark_node)
9813 idx = arg_count - !nonvoid;
9814 insn_op = &insn_data[icode].operand[idx];
9815 op[arg_count] = expand_normal (arg);
9817 if (insn_op->mode == V1DImode
9818 && GET_MODE (op[arg_count]) == DImode)
9819 op[arg_count] = gen_lowpart (V1DImode, op[arg_count]);
9820 else if (insn_op->mode == V1SImode
9821 && GET_MODE (op[arg_count]) == SImode)
9822 op[arg_count] = gen_lowpart (V1SImode, op[arg_count]);
9824 if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
9826 op[arg_count] = copy_to_mode_reg (insn_op->mode, op[arg_count]);
9832 pat = GEN_FCN (icode) (op[0]);
9836 pat = GEN_FCN (icode) (op[0], op[1]);
9838 pat = GEN_FCN (icode) (op[1]);
9841 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
9844 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
9862 sparc_vis_mul8x16 (int e8, int e16)
9864 return (e8 * e16 + 128) / 256;
9867 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
9868 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
9869 constants. A tree list with the results of the multiplications is returned,
9870 and each element in the list is of INNER_TYPE. */
9873 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
9875 tree n_elts = NULL_TREE;
9880 case CODE_FOR_fmul8x16_vis:
9881 for (; elts0 && elts1;
9882 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
9885 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9886 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
9887 n_elts = tree_cons (NULL_TREE,
9888 build_int_cst (inner_type, val),
9893 case CODE_FOR_fmul8x16au_vis:
9894 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
9896 for (; elts0; elts0 = TREE_CHAIN (elts0))
9899 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9901 n_elts = tree_cons (NULL_TREE,
9902 build_int_cst (inner_type, val),
9907 case CODE_FOR_fmul8x16al_vis:
9908 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
9910 for (; elts0; elts0 = TREE_CHAIN (elts0))
9913 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9915 n_elts = tree_cons (NULL_TREE,
9916 build_int_cst (inner_type, val),
9925 return nreverse (n_elts);
9928 /* Handle TARGET_FOLD_BUILTIN target hook.
9929 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
9930 result of the function call is ignored. NULL_TREE is returned if the
9931 function could not be folded. */
9934 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
9935 tree *args, bool ignore)
9937 tree arg0, arg1, arg2;
9938 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
9939 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
9943 /* Note that a switch statement instead of the sequence of tests would
9944 be incorrect as many of the CODE_FOR values could be CODE_FOR_nothing
9945 and that would yield multiple alternatives with identical values. */
9946 if (icode == CODE_FOR_alignaddrsi_vis
9947 || icode == CODE_FOR_alignaddrdi_vis
9948 || icode == CODE_FOR_wrgsr_vis
9949 || icode == CODE_FOR_bmasksi_vis
9950 || icode == CODE_FOR_bmaskdi_vis
9951 || icode == CODE_FOR_cmask8si_vis
9952 || icode == CODE_FOR_cmask8di_vis
9953 || icode == CODE_FOR_cmask16si_vis
9954 || icode == CODE_FOR_cmask16di_vis
9955 || icode == CODE_FOR_cmask32si_vis
9956 || icode == CODE_FOR_cmask32di_vis)
9959 return build_zero_cst (rtype);
9964 case CODE_FOR_fexpand_vis:
9968 if (TREE_CODE (arg0) == VECTOR_CST)
9970 tree inner_type = TREE_TYPE (rtype);
9971 tree elts = TREE_VECTOR_CST_ELTS (arg0);
9972 tree n_elts = NULL_TREE;
9974 for (; elts; elts = TREE_CHAIN (elts))
9976 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
9977 n_elts = tree_cons (NULL_TREE,
9978 build_int_cst (inner_type, val),
9981 return build_vector (rtype, nreverse (n_elts));
9985 case CODE_FOR_fmul8x16_vis:
9986 case CODE_FOR_fmul8x16au_vis:
9987 case CODE_FOR_fmul8x16al_vis:
9993 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
9995 tree inner_type = TREE_TYPE (rtype);
9996 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
9997 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
9998 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
10001 return build_vector (rtype, n_elts);
10005 case CODE_FOR_fpmerge_vis:
10011 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
10013 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
10014 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
10015 tree n_elts = NULL_TREE;
10017 for (; elts0 && elts1;
10018 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
10020 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
10021 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
10024 return build_vector (rtype, nreverse (n_elts));
10028 case CODE_FOR_pdist_vis:
10036 if (TREE_CODE (arg0) == VECTOR_CST
10037 && TREE_CODE (arg1) == VECTOR_CST
10038 && TREE_CODE (arg2) == INTEGER_CST)
10041 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
10042 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
10043 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
10044 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
10046 for (; elts0 && elts1;
10047 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
10049 unsigned HOST_WIDE_INT
10050 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
10051 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
10052 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
10053 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
10055 unsigned HOST_WIDE_INT l;
10058 overflow |= neg_double (low1, high1, &l, &h);
10059 overflow |= add_double (low0, high0, l, h, &l, &h);
10061 overflow |= neg_double (l, h, &l, &h);
10063 overflow |= add_double (low, high, l, h, &low, &high);
10066 gcc_assert (overflow == 0);
10068 return build_int_cst_wide (rtype, low, high);
10078 /* ??? This duplicates information provided to the compiler by the
10079 ??? scheduler description. Some day, teach genautomata to output
10080 ??? the latencies and then CSE will just use that. */
10083 sparc_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
10084 int *total, bool speed ATTRIBUTE_UNUSED)
10086 enum machine_mode mode = GET_MODE (x);
10087 bool float_mode_p = FLOAT_MODE_P (mode);
10092 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
10110 if (GET_MODE (x) == VOIDmode
10111 && ((CONST_DOUBLE_HIGH (x) == 0
10112 && CONST_DOUBLE_LOW (x) < 0x1000)
10113 || (CONST_DOUBLE_HIGH (x) == -1
10114 && CONST_DOUBLE_LOW (x) < 0
10115 && CONST_DOUBLE_LOW (x) >= -0x1000)))
10122 /* If outer-code was a sign or zero extension, a cost
10123 of COSTS_N_INSNS (1) was already added in. This is
10124 why we are subtracting it back out. */
10125 if (outer_code == ZERO_EXTEND)
10127 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
10129 else if (outer_code == SIGN_EXTEND)
10131 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
10133 else if (float_mode_p)
10135 *total = sparc_costs->float_load;
10139 *total = sparc_costs->int_load;
10147 *total = sparc_costs->float_plusminus;
10149 *total = COSTS_N_INSNS (1);
10156 gcc_assert (float_mode_p);
10157 *total = sparc_costs->float_mul;
10160 if (GET_CODE (sub) == NEG)
10161 sub = XEXP (sub, 0);
10162 *total += rtx_cost (sub, FMA, 0, speed);
10165 if (GET_CODE (sub) == NEG)
10166 sub = XEXP (sub, 0);
10167 *total += rtx_cost (sub, FMA, 2, speed);
10173 *total = sparc_costs->float_mul;
10174 else if (! TARGET_HARD_MUL)
10175 *total = COSTS_N_INSNS (25);
10181 if (sparc_costs->int_mul_bit_factor)
10185 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
10187 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
10188 for (nbits = 0; value != 0; value &= value - 1)
10191 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
10192 && GET_MODE (XEXP (x, 1)) == VOIDmode)
10194 rtx x1 = XEXP (x, 1);
10195 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
10196 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
10198 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
10200 for (; value2 != 0; value2 &= value2 - 1)
10208 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
10209 bit_cost = COSTS_N_INSNS (bit_cost);
10212 if (mode == DImode)
10213 *total = sparc_costs->int_mulX + bit_cost;
10215 *total = sparc_costs->int_mul + bit_cost;
10222 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
10231 if (mode == DFmode)
10232 *total = sparc_costs->float_div_df;
10234 *total = sparc_costs->float_div_sf;
10238 if (mode == DImode)
10239 *total = sparc_costs->int_divX;
10241 *total = sparc_costs->int_div;
10246 if (! float_mode_p)
10248 *total = COSTS_N_INSNS (1);
10255 case UNSIGNED_FLOAT:
10259 case FLOAT_TRUNCATE:
10260 *total = sparc_costs->float_move;
10264 if (mode == DFmode)
10265 *total = sparc_costs->float_sqrt_df;
10267 *total = sparc_costs->float_sqrt_sf;
10272 *total = sparc_costs->float_cmp;
10274 *total = COSTS_N_INSNS (1);
10279 *total = sparc_costs->float_cmove;
10281 *total = sparc_costs->int_cmove;
10285 /* Handle the NAND vector patterns. */
10286 if (sparc_vector_mode_supported_p (GET_MODE (x))
10287 && GET_CODE (XEXP (x, 0)) == NOT
10288 && GET_CODE (XEXP (x, 1)) == NOT)
10290 *total = COSTS_N_INSNS (1);
10301 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
10304 general_or_i64_p (reg_class_t rclass)
10306 return (rclass == GENERAL_REGS || rclass == I64_REGS);
10309 /* Implement TARGET_REGISTER_MOVE_COST. */
10312 sparc_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10313 reg_class_t from, reg_class_t to)
10315 bool need_memory = false;
10317 if (from == FPCC_REGS || to == FPCC_REGS)
10318 need_memory = true;
10319 else if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
10320 || (general_or_i64_p (from) && FP_REG_CLASS_P (to)))
10324 int size = GET_MODE_SIZE (mode);
10325 if (size == 8 || size == 4)
10327 if (! TARGET_ARCH32 || size == 4)
10333 need_memory = true;
10338 if (sparc_cpu == PROCESSOR_ULTRASPARC
10339 || sparc_cpu == PROCESSOR_ULTRASPARC3
10340 || sparc_cpu == PROCESSOR_NIAGARA
10341 || sparc_cpu == PROCESSOR_NIAGARA2
10342 || sparc_cpu == PROCESSOR_NIAGARA3
10343 || sparc_cpu == PROCESSOR_NIAGARA4)
10352 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
10353 This is achieved by means of a manual dynamic stack space allocation in
10354 the current frame. We make the assumption that SEQ doesn't contain any
10355 function calls, with the possible exception of calls to the GOT helper. */
10358 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
10360 /* We must preserve the lowest 16 words for the register save area. */
10361 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
10362 /* We really need only 2 words of fresh stack space. */
10363 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
10366 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
10367 SPARC_STACK_BIAS + offset));
10369 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
10370 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
10372 emit_insn (gen_rtx_SET (VOIDmode,
10373 adjust_address (slot, word_mode, UNITS_PER_WORD),
10377 emit_insn (gen_rtx_SET (VOIDmode,
10379 adjust_address (slot, word_mode, UNITS_PER_WORD)));
10380 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
10381 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
10384 /* Output the assembler code for a thunk function. THUNK_DECL is the
10385 declaration for the thunk function itself, FUNCTION is the decl for
10386 the target function. DELTA is an immediate constant offset to be
10387 added to THIS. If VCALL_OFFSET is nonzero, the word at address
10388 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
10391 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
10392 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10395 rtx this_rtx, insn, funexp;
10396 unsigned int int_arg_first;
10398 reload_completed = 1;
10399 epilogue_completed = 1;
10401 emit_note (NOTE_INSN_PROLOGUE_END);
10405 sparc_leaf_function_p = 1;
10407 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
10409 else if (flag_delayed_branch)
10411 /* We will emit a regular sibcall below, so we need to instruct
10412 output_sibcall that we are in a leaf function. */
10413 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
10415 /* This will cause final.c to invoke leaf_renumber_regs so we
10416 must behave as if we were in a not-yet-leafified function. */
10417 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
10421 /* We will emit the sibcall manually below, so we will need to
10422 manually spill non-leaf registers. */
10423 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
10425 /* We really are in a leaf function. */
10426 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
10429 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
10430 returns a structure, the structure return pointer is there instead. */
10432 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
10433 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
10435 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
10437 /* Add DELTA. When possible use a plain add, otherwise load it into
10438 a register first. */
10441 rtx delta_rtx = GEN_INT (delta);
10443 if (! SPARC_SIMM13_P (delta))
10445 rtx scratch = gen_rtx_REG (Pmode, 1);
10446 emit_move_insn (scratch, delta_rtx);
10447 delta_rtx = scratch;
10450 /* THIS_RTX += DELTA. */
10451 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
10454 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
10457 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10458 rtx scratch = gen_rtx_REG (Pmode, 1);
10460 gcc_assert (vcall_offset < 0);
10462 /* SCRATCH = *THIS_RTX. */
10463 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
10465 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
10466 may not have any available scratch register at this point. */
10467 if (SPARC_SIMM13_P (vcall_offset))
10469 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
10470 else if (! fixed_regs[5]
10471 /* The below sequence is made up of at least 2 insns,
10472 while the default method may need only one. */
10473 && vcall_offset < -8192)
10475 rtx scratch2 = gen_rtx_REG (Pmode, 5);
10476 emit_move_insn (scratch2, vcall_offset_rtx);
10477 vcall_offset_rtx = scratch2;
10481 rtx increment = GEN_INT (-4096);
10483 /* VCALL_OFFSET is a negative number whose typical range can be
10484 estimated as -32768..0 in 32-bit mode. In almost all cases
10485 it is therefore cheaper to emit multiple add insns than
10486 spilling and loading the constant into a register (at least
10488 while (! SPARC_SIMM13_P (vcall_offset))
10490 emit_insn (gen_add2_insn (scratch, increment));
10491 vcall_offset += 4096;
10493 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
10496 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
10497 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
10498 gen_rtx_PLUS (Pmode,
10500 vcall_offset_rtx)));
10502 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
10503 emit_insn (gen_add2_insn (this_rtx, scratch));
10506 /* Generate a tail call to the target function. */
10507 if (! TREE_USED (function))
10509 assemble_external (function);
10510 TREE_USED (function) = 1;
10512 funexp = XEXP (DECL_RTL (function), 0);
10514 if (flag_delayed_branch)
10516 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10517 insn = emit_call_insn (gen_sibcall (funexp));
10518 SIBLING_CALL_P (insn) = 1;
10522 /* The hoops we have to jump through in order to generate a sibcall
10523 without using delay slots... */
10524 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
10528 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
10530 load_got_register (); /* clobbers %o7 */
10531 scratch = sparc_legitimize_pic_address (funexp, scratch);
10532 seq = get_insns ();
10534 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
10536 else if (TARGET_ARCH32)
10538 emit_insn (gen_rtx_SET (VOIDmode,
10540 gen_rtx_HIGH (SImode, funexp)));
10541 emit_insn (gen_rtx_SET (VOIDmode,
10543 gen_rtx_LO_SUM (SImode, scratch, funexp)));
10545 else /* TARGET_ARCH64 */
10547 switch (sparc_cmodel)
10551 /* The destination can serve as a temporary. */
10552 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
10557 /* The destination cannot serve as a temporary. */
10558 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
10560 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
10561 seq = get_insns ();
10563 emit_and_preserve (seq, spill_reg, 0);
10567 gcc_unreachable ();
10571 emit_jump_insn (gen_indirect_jump (scratch));
10576 /* Run just enough of rest_of_compilation to get the insns emitted.
10577 There's not really enough bulk here to make other passes such as
10578 instruction scheduling worth while. Note that use_thunk calls
10579 assemble_start_function and assemble_end_function. */
10580 insn = get_insns ();
10581 insn_locators_alloc ();
10582 shorten_branches (insn);
10583 final_start_function (insn, file, 1);
10584 final (insn, file, 1);
10585 final_end_function ();
10587 reload_completed = 0;
10588 epilogue_completed = 0;
10591 /* Return true if sparc_output_mi_thunk would be able to output the
10592 assembler code for the thunk function specified by the arguments
10593 it is passed, and false otherwise. */
10595 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
10596 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
10597 HOST_WIDE_INT vcall_offset,
10598 const_tree function ATTRIBUTE_UNUSED)
10600 /* Bound the loop used in the default method above. */
10601 return (vcall_offset >= -32768 || ! fixed_regs[5]);
10604 /* We use the machine specific reorg pass to enable workarounds for errata. */
10611 /* The only erratum we handle for now is that of the AT697F processor. */
10612 if (!sparc_fix_at697f)
10615 /* We need to have the (essentially) final form of the insn stream in order
10616 to properly detect the various hazards. Run delay slot scheduling. */
10617 if (optimize > 0 && flag_delayed_branch)
10618 dbr_schedule (get_insns ());
10620 /* Now look for specific patterns in the insn stream. */
10621 for (insn = get_insns (); insn; insn = next)
10623 bool insert_nop = false;
10626 /* Look for a single-word load into an odd-numbered FP register. */
10627 if (NONJUMP_INSN_P (insn)
10628 && (set = single_set (insn)) != NULL_RTX
10629 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
10630 && MEM_P (SET_SRC (set))
10631 && REG_P (SET_DEST (set))
10632 && REGNO (SET_DEST (set)) > 31
10633 && REGNO (SET_DEST (set)) % 2 != 0)
10635 /* The wrong dependency is on the enclosing double register. */
10636 unsigned int x = REGNO (SET_DEST (set)) - 1;
10637 unsigned int src1, src2, dest;
10640 /* If the insn has a delay slot, then it cannot be problematic. */
10641 next = next_active_insn (insn);
10642 if (NONJUMP_INSN_P (next) && GET_CODE (PATTERN (next)) == SEQUENCE)
10646 extract_insn (next);
10647 code = INSN_CODE (next);
10652 case CODE_FOR_adddf3:
10653 case CODE_FOR_subdf3:
10654 case CODE_FOR_muldf3:
10655 case CODE_FOR_divdf3:
10656 dest = REGNO (recog_data.operand[0]);
10657 src1 = REGNO (recog_data.operand[1]);
10658 src2 = REGNO (recog_data.operand[2]);
10662 ld [address], %fx+1
10663 FPOPd %f{x,y}, %f{y,x}, %f{x,y} */
10664 if ((src1 == x || src2 == x)
10665 && (dest == src1 || dest == src2))
10671 ld [address], %fx+1
10672 FPOPd %fx, %fx, %fx */
10675 && (code == CODE_FOR_adddf3 || code == CODE_FOR_muldf3))
10680 case CODE_FOR_sqrtdf2:
10681 dest = REGNO (recog_data.operand[0]);
10682 src1 = REGNO (recog_data.operand[1]);
10684 ld [address], %fx+1
10686 if (src1 == x && dest == src1)
10695 next = NEXT_INSN (insn);
10698 emit_insn_after (gen_nop (), insn);
10702 /* How to allocate a 'struct machine_function'. */
10704 static struct machine_function *
10705 sparc_init_machine_status (void)
10707 return ggc_alloc_cleared_machine_function ();
10710 /* Locate some local-dynamic symbol still in use by this function
10711 so that we can print its name in local-dynamic base patterns. */
10713 static const char *
10714 get_some_local_dynamic_name (void)
10718 if (cfun->machine->some_ld_name)
10719 return cfun->machine->some_ld_name;
10721 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
10723 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
10724 return cfun->machine->some_ld_name;
10726 gcc_unreachable ();
10730 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
10735 && GET_CODE (x) == SYMBOL_REF
10736 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
10738 cfun->machine->some_ld_name = XSTR (x, 0);
10745 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10746 We need to emit DTP-relative relocations. */
10749 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
10754 fputs ("\t.word\t%r_tls_dtpoff32(", file);
10757 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
10760 gcc_unreachable ();
10762 output_addr_const (file, x);
10766 /* Do whatever processing is required at the end of a file. */
10769 sparc_file_end (void)
10771 /* If we need to emit the special GOT helper function, do so now. */
10772 if (got_helper_rtx)
10774 const char *name = XSTR (got_helper_rtx, 0);
10775 const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
10776 #ifdef DWARF2_UNWIND_INFO
10780 if (USE_HIDDEN_LINKONCE)
10782 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
10783 get_identifier (name),
10784 build_function_type_list (void_type_node,
10786 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
10787 NULL_TREE, void_type_node);
10788 TREE_STATIC (decl) = 1;
10789 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
10790 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
10791 DECL_VISIBILITY_SPECIFIED (decl) = 1;
10792 resolve_unique_section (decl, 0, flag_function_sections);
10793 allocate_struct_function (decl, true);
10794 cfun->is_thunk = 1;
10795 current_function_decl = decl;
10796 init_varasm_status ();
10797 assemble_start_function (decl, name);
10801 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10802 switch_to_section (text_section);
10804 ASM_OUTPUT_ALIGN (asm_out_file, align);
10805 ASM_OUTPUT_LABEL (asm_out_file, name);
10808 #ifdef DWARF2_UNWIND_INFO
10809 do_cfi = dwarf2out_do_cfi_asm ();
10811 fprintf (asm_out_file, "\t.cfi_startproc\n");
10813 if (flag_delayed_branch)
10814 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
10815 reg_name, reg_name);
10817 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
10818 reg_name, reg_name);
10819 #ifdef DWARF2_UNWIND_INFO
10821 fprintf (asm_out_file, "\t.cfi_endproc\n");
10825 if (NEED_INDICATE_EXEC_STACK)
10826 file_end_indicate_exec_stack ();
10828 #ifdef TARGET_SOLARIS
10829 solaris_file_end ();
10833 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10834 /* Implement TARGET_MANGLE_TYPE. */
10836 static const char *
10837 sparc_mangle_type (const_tree type)
10840 && TYPE_MAIN_VARIANT (type) == long_double_type_node
10841 && TARGET_LONG_DOUBLE_128)
10844 /* For all other types, use normal C++ mangling. */
10849 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
10850 compare and swap on the word containing the byte or half-word. */
10853 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
10855 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
10856 rtx addr = gen_reg_rtx (Pmode);
10857 rtx off = gen_reg_rtx (SImode);
10858 rtx oldv = gen_reg_rtx (SImode);
10859 rtx newv = gen_reg_rtx (SImode);
10860 rtx oldvalue = gen_reg_rtx (SImode);
10861 rtx newvalue = gen_reg_rtx (SImode);
10862 rtx res = gen_reg_rtx (SImode);
10863 rtx resv = gen_reg_rtx (SImode);
10864 rtx memsi, val, mask, end_label, loop_label, cc;
10866 emit_insn (gen_rtx_SET (VOIDmode, addr,
10867 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
10869 if (Pmode != SImode)
10870 addr1 = gen_lowpart (SImode, addr1);
10871 emit_insn (gen_rtx_SET (VOIDmode, off,
10872 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
10874 memsi = gen_rtx_MEM (SImode, addr);
10875 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
10876 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
10878 val = force_reg (SImode, memsi);
10880 emit_insn (gen_rtx_SET (VOIDmode, off,
10881 gen_rtx_XOR (SImode, off,
10882 GEN_INT (GET_MODE (mem) == QImode
10885 emit_insn (gen_rtx_SET (VOIDmode, off,
10886 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
10888 if (GET_MODE (mem) == QImode)
10889 mask = force_reg (SImode, GEN_INT (0xff));
10891 mask = force_reg (SImode, GEN_INT (0xffff));
10893 emit_insn (gen_rtx_SET (VOIDmode, mask,
10894 gen_rtx_ASHIFT (SImode, mask, off)));
10896 emit_insn (gen_rtx_SET (VOIDmode, val,
10897 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
10900 oldval = gen_lowpart (SImode, oldval);
10901 emit_insn (gen_rtx_SET (VOIDmode, oldv,
10902 gen_rtx_ASHIFT (SImode, oldval, off)));
10904 newval = gen_lowpart_common (SImode, newval);
10905 emit_insn (gen_rtx_SET (VOIDmode, newv,
10906 gen_rtx_ASHIFT (SImode, newval, off)));
10908 emit_insn (gen_rtx_SET (VOIDmode, oldv,
10909 gen_rtx_AND (SImode, oldv, mask)));
10911 emit_insn (gen_rtx_SET (VOIDmode, newv,
10912 gen_rtx_AND (SImode, newv, mask)));
10914 end_label = gen_label_rtx ();
10915 loop_label = gen_label_rtx ();
10916 emit_label (loop_label);
10918 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
10919 gen_rtx_IOR (SImode, oldv, val)));
10921 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
10922 gen_rtx_IOR (SImode, newv, val)));
10924 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
10926 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
10928 emit_insn (gen_rtx_SET (VOIDmode, resv,
10929 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
10932 cc = gen_compare_reg_1 (NE, resv, val);
10933 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
10935 /* Use cbranchcc4 to separate the compare and branch! */
10936 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
10937 cc, const0_rtx, loop_label));
10939 emit_label (end_label);
10941 emit_insn (gen_rtx_SET (VOIDmode, res,
10942 gen_rtx_AND (SImode, res, mask)));
10944 emit_insn (gen_rtx_SET (VOIDmode, res,
10945 gen_rtx_LSHIFTRT (SImode, res, off)));
10947 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
10951 sparc_expand_vec_perm_bmask (enum machine_mode vmode, rtx sel)
10955 sel = gen_lowpart (DImode, sel);
10959 /* inp = xxxxxxxAxxxxxxxB */
10960 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
10961 NULL_RTX, 1, OPTAB_DIRECT);
10962 /* t_1 = ....xxxxxxxAxxx. */
10963 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
10964 GEN_INT (3), NULL_RTX, 1, OPTAB_DIRECT);
10965 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
10966 GEN_INT (0x30000), NULL_RTX, 1, OPTAB_DIRECT);
10967 /* sel = .......B */
10968 /* t_1 = ...A.... */
10969 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
10970 /* sel = ...A...B */
10971 sel = expand_mult (SImode, sel, GEN_INT (0x4444), sel, 1);
10972 /* sel = AAAABBBB * 4 */
10973 t_1 = force_reg (SImode, GEN_INT (0x01230123));
10974 /* sel = { A*4, A*4+1, A*4+2, ... } */
10978 /* inp = xxxAxxxBxxxCxxxD */
10979 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
10980 NULL_RTX, 1, OPTAB_DIRECT);
10981 t_2 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
10982 NULL_RTX, 1, OPTAB_DIRECT);
10983 t_3 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (24),
10984 NULL_RTX, 1, OPTAB_DIRECT);
10985 /* t_1 = ..xxxAxxxBxxxCxx */
10986 /* t_2 = ....xxxAxxxBxxxC */
10987 /* t_3 = ......xxxAxxxBxx */
10988 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
10990 NULL_RTX, 1, OPTAB_DIRECT);
10991 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
10993 NULL_RTX, 1, OPTAB_DIRECT);
10994 t_2 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_2),
10995 GEN_INT (0x070000),
10996 NULL_RTX, 1, OPTAB_DIRECT);
10997 t_3 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_3),
10998 GEN_INT (0x07000000),
10999 NULL_RTX, 1, OPTAB_DIRECT);
11000 /* sel = .......D */
11001 /* t_1 = .....C.. */
11002 /* t_2 = ...B.... */
11003 /* t_3 = .A...... */
11004 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
11005 t_2 = expand_simple_binop (SImode, IOR, t_2, t_3, t_2, 1, OPTAB_DIRECT);
11006 sel = expand_simple_binop (SImode, IOR, sel, t_2, sel, 1, OPTAB_DIRECT);
11007 /* sel = .A.B.C.D */
11008 sel = expand_mult (SImode, sel, GEN_INT (0x22), sel, 1);
11009 /* sel = AABBCCDD * 2 */
11010 t_1 = force_reg (SImode, GEN_INT (0x01010101));
11011 /* sel = { A*2, A*2+1, B*2, B*2+1, ... } */
11015 /* input = xAxBxCxDxExFxGxH */
11016 sel = expand_simple_binop (DImode, AND, sel,
11017 GEN_INT ((HOST_WIDE_INT)0x0f0f0f0f << 32
11019 NULL_RTX, 1, OPTAB_DIRECT);
11020 /* sel = .A.B.C.D.E.F.G.H */
11021 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (4),
11022 NULL_RTX, 1, OPTAB_DIRECT);
11023 /* t_1 = ..A.B.C.D.E.F.G. */
11024 sel = expand_simple_binop (DImode, IOR, sel, t_1,
11025 NULL_RTX, 1, OPTAB_DIRECT);
11026 /* sel = .AABBCCDDEEFFGGH */
11027 sel = expand_simple_binop (DImode, AND, sel,
11028 GEN_INT ((HOST_WIDE_INT)0xff00ff << 32
11030 NULL_RTX, 1, OPTAB_DIRECT);
11031 /* sel = ..AB..CD..EF..GH */
11032 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
11033 NULL_RTX, 1, OPTAB_DIRECT);
11034 /* t_1 = ....AB..CD..EF.. */
11035 sel = expand_simple_binop (DImode, IOR, sel, t_1,
11036 NULL_RTX, 1, OPTAB_DIRECT);
11037 /* sel = ..ABABCDCDEFEFGH */
11038 sel = expand_simple_binop (DImode, AND, sel,
11039 GEN_INT ((HOST_WIDE_INT)0xffff << 32 | 0xffff),
11040 NULL_RTX, 1, OPTAB_DIRECT);
11041 /* sel = ....ABCD....EFGH */
11042 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
11043 NULL_RTX, 1, OPTAB_DIRECT);
11044 /* t_1 = ........ABCD.... */
11045 sel = gen_lowpart (SImode, sel);
11046 t_1 = gen_lowpart (SImode, t_1);
11050 gcc_unreachable ();
11053 /* Always perform the final addition/merge within the bmask insn. */
11054 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, t_1));
11057 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
11060 sparc_frame_pointer_required (void)
11062 /* If the stack pointer is dynamically modified in the function, it cannot
11063 serve as the frame pointer. */
11064 if (cfun->calls_alloca)
11067 /* If the function receives nonlocal gotos, it needs to save the frame
11068 pointer in the nonlocal_goto_save_area object. */
11069 if (cfun->has_nonlocal_label)
11072 /* In flat mode, that's it. */
11076 /* Otherwise, the frame pointer is required if the function isn't leaf. */
11077 return !(current_function_is_leaf && only_leaf_regs_used ());
11080 /* The way this is structured, we can't eliminate SFP in favor of SP
11081 if the frame pointer is required: we want to use the SFP->HFP elimination
11082 in that case. But the test in update_eliminables doesn't know we are
11083 assuming below that we only do the former elimination. */
11086 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
11088 return to == HARD_FRAME_POINTER_REGNUM || !sparc_frame_pointer_required ();
11091 /* Return the hard frame pointer directly to bypass the stack bias. */
11094 sparc_builtin_setjmp_frame_value (void)
11096 return hard_frame_pointer_rtx;
11099 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
11100 they won't be allocated. */
11103 sparc_conditional_register_usage (void)
11105 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
11107 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
11108 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
11110 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
11111 /* then honor it. */
11112 if (TARGET_ARCH32 && fixed_regs[5])
11114 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
11119 for (regno = SPARC_FIRST_V9_FP_REG;
11120 regno <= SPARC_LAST_V9_FP_REG;
11122 fixed_regs[regno] = 1;
11123 /* %fcc0 is used by v8 and v9. */
11124 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
11125 regno <= SPARC_LAST_V9_FCC_REG;
11127 fixed_regs[regno] = 1;
11132 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
11133 fixed_regs[regno] = 1;
11135 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
11136 /* then honor it. Likewise with g3 and g4. */
11137 if (fixed_regs[2] == 2)
11138 fixed_regs[2] = ! TARGET_APP_REGS;
11139 if (fixed_regs[3] == 2)
11140 fixed_regs[3] = ! TARGET_APP_REGS;
11141 if (TARGET_ARCH32 && fixed_regs[4] == 2)
11142 fixed_regs[4] = ! TARGET_APP_REGS;
11143 else if (TARGET_CM_EMBMEDANY)
11145 else if (fixed_regs[4] == 2)
11150 /* Disable leaf functions. */
11151 memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER);
11152 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
11153 leaf_reg_remap [regno] = regno;
11156 global_regs[SPARC_GSR_REG] = 1;
11159 /* Implement TARGET_PREFERRED_RELOAD_CLASS
11161 - We can't load constants into FP registers.
11162 - We can't load FP constants into integer registers when soft-float,
11163 because there is no soft-float pattern with a r/F constraint.
11164 - We can't load FP constants into integer registers for TFmode unless
11165 it is 0.0L, because there is no movtf pattern with a r/F constraint.
11166 - Try and reload integer constants (symbolic or otherwise) back into
11167 registers directly, rather than having them dumped to memory. */
11170 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
11172 enum machine_mode mode = GET_MODE (x);
11173 if (CONSTANT_P (x))
11175 if (FP_REG_CLASS_P (rclass)
11176 || rclass == GENERAL_OR_FP_REGS
11177 || rclass == GENERAL_OR_EXTRA_FP_REGS
11178 || (GET_MODE_CLASS (mode) == MODE_FLOAT && ! TARGET_FPU)
11179 || (mode == TFmode && ! const_zero_operand (x, mode)))
11182 if (GET_MODE_CLASS (mode) == MODE_INT)
11183 return GENERAL_REGS;
11185 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
11187 if (! FP_REG_CLASS_P (rclass)
11188 || !(const_zero_operand (x, mode)
11189 || const_all_ones_operand (x, mode)))
11196 && (rclass == EXTRA_FP_REGS
11197 || rclass == GENERAL_OR_EXTRA_FP_REGS))
11199 int regno = true_regnum (x);
11201 if (SPARC_INT_REG_P (regno))
11202 return (rclass == EXTRA_FP_REGS
11203 ? FP_REGS : GENERAL_OR_FP_REGS);
11210 output_v8plus_mult (rtx insn, rtx *operands, const char *name)
11214 gcc_assert (! TARGET_ARCH64);
11216 if (sparc_check_64 (operands[1], insn) <= 0)
11217 output_asm_insn ("srl\t%L1, 0, %L1", operands);
11218 if (which_alternative == 1)
11219 output_asm_insn ("sllx\t%H1, 32, %H1", operands);
11220 if (GET_CODE (operands[2]) == CONST_INT)
11222 if (which_alternative == 1)
11224 output_asm_insn ("or\t%L1, %H1, %H1", operands);
11225 sprintf (mulstr, "%s\t%%H1, %%2, %%L0", name);
11226 output_asm_insn (mulstr, operands);
11227 return "srlx\t%L0, 32, %H0";
11231 output_asm_insn ("sllx\t%H1, 32, %3", operands);
11232 output_asm_insn ("or\t%L1, %3, %3", operands);
11233 sprintf (mulstr, "%s\t%%3, %%2, %%3", name);
11234 output_asm_insn (mulstr, operands);
11235 output_asm_insn ("srlx\t%3, 32, %H0", operands);
11236 return "mov\t%3, %L0";
11239 else if (rtx_equal_p (operands[1], operands[2]))
11241 if (which_alternative == 1)
11243 output_asm_insn ("or\t%L1, %H1, %H1", operands);
11244 sprintf (mulstr, "%s\t%%H1, %%H1, %%L0", name);
11245 output_asm_insn (mulstr, operands);
11246 return "srlx\t%L0, 32, %H0";
11250 output_asm_insn ("sllx\t%H1, 32, %3", operands);
11251 output_asm_insn ("or\t%L1, %3, %3", operands);
11252 sprintf (mulstr, "%s\t%%3, %%3, %%3", name);
11253 output_asm_insn (mulstr, operands);
11254 output_asm_insn ("srlx\t%3, 32, %H0", operands);
11255 return "mov\t%3, %L0";
11258 if (sparc_check_64 (operands[2], insn) <= 0)
11259 output_asm_insn ("srl\t%L2, 0, %L2", operands);
11260 if (which_alternative == 1)
11262 output_asm_insn ("or\t%L1, %H1, %H1", operands);
11263 output_asm_insn ("sllx\t%H2, 32, %L1", operands);
11264 output_asm_insn ("or\t%L2, %L1, %L1", operands);
11265 sprintf (mulstr, "%s\t%%H1, %%L1, %%L0", name);
11266 output_asm_insn (mulstr, operands);
11267 return "srlx\t%L0, 32, %H0";
11271 output_asm_insn ("sllx\t%H1, 32, %3", operands);
11272 output_asm_insn ("sllx\t%H2, 32, %4", operands);
11273 output_asm_insn ("or\t%L1, %3, %3", operands);
11274 output_asm_insn ("or\t%L2, %4, %4", operands);
11275 sprintf (mulstr, "%s\t%%3, %%4, %%3", name);
11276 output_asm_insn (mulstr, operands);
11277 output_asm_insn ("srlx\t%3, 32, %H0", operands);
11278 return "mov\t%3, %L0";
11283 vector_init_bshuffle (rtx target, rtx elt, enum machine_mode mode,
11284 enum machine_mode inner_mode)
11286 rtx t1, final_insn;
11289 t1 = gen_reg_rtx (mode);
11291 elt = convert_modes (SImode, inner_mode, elt, true);
11292 emit_move_insn (gen_lowpart(SImode, t1), elt);
11297 final_insn = gen_bshufflev2si_vis (target, t1, t1);
11298 bmask = 0x45674567;
11301 final_insn = gen_bshufflev4hi_vis (target, t1, t1);
11302 bmask = 0x67676767;
11305 final_insn = gen_bshufflev8qi_vis (target, t1, t1);
11306 bmask = 0x77777777;
11309 gcc_unreachable ();
11312 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), CONST0_RTX (SImode),
11313 force_reg (SImode, GEN_INT (bmask))));
11314 emit_insn (final_insn);
11318 vector_init_fpmerge (rtx target, rtx elt, enum machine_mode inner_mode)
11320 rtx t1, t2, t3, t3_low;
11322 t1 = gen_reg_rtx (V4QImode);
11323 elt = convert_modes (SImode, inner_mode, elt, true);
11324 emit_move_insn (gen_lowpart (SImode, t1), elt);
11326 t2 = gen_reg_rtx (V4QImode);
11327 emit_move_insn (t2, t1);
11329 t3 = gen_reg_rtx (V8QImode);
11330 t3_low = gen_lowpart (V4QImode, t3);
11332 emit_insn (gen_fpmerge_vis (t3, t1, t2));
11333 emit_move_insn (t1, t3_low);
11334 emit_move_insn (t2, t3_low);
11336 emit_insn (gen_fpmerge_vis (t3, t1, t2));
11337 emit_move_insn (t1, t3_low);
11338 emit_move_insn (t2, t3_low);
11340 emit_insn (gen_fpmerge_vis (gen_lowpart (V8QImode, target), t1, t2));
11344 vector_init_faligndata (rtx target, rtx elt, enum machine_mode inner_mode)
11346 rtx t1 = gen_reg_rtx (V4HImode);
11348 elt = convert_modes (SImode, inner_mode, elt, true);
11350 emit_move_insn (gen_lowpart (SImode, t1), elt);
11352 emit_insn (gen_alignaddrsi_vis (gen_reg_rtx (SImode),
11353 force_reg (SImode, GEN_INT (6)),
11354 CONST0_RTX (SImode)));
11356 emit_insn (gen_faligndatav4hi_vis (target, t1, target));
11357 emit_insn (gen_faligndatav4hi_vis (target, t1, target));
11358 emit_insn (gen_faligndatav4hi_vis (target, t1, target));
11359 emit_insn (gen_faligndatav4hi_vis (target, t1, target));
11363 sparc_expand_vector_init (rtx target, rtx vals)
11365 enum machine_mode mode = GET_MODE (target);
11366 enum machine_mode inner_mode = GET_MODE_INNER (mode);
11367 int n_elts = GET_MODE_NUNITS (mode);
11373 for (i = 0; i < n_elts; i++)
11375 rtx x = XVECEXP (vals, 0, i);
11376 if (!CONSTANT_P (x))
11379 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
11385 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
11389 if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (mode))
11391 if (GET_MODE_SIZE (inner_mode) == 4)
11393 emit_move_insn (gen_lowpart (SImode, target),
11394 gen_lowpart (SImode, XVECEXP (vals, 0, 0)));
11397 else if (GET_MODE_SIZE (inner_mode) == 8)
11399 emit_move_insn (gen_lowpart (DImode, target),
11400 gen_lowpart (DImode, XVECEXP (vals, 0, 0)));
11404 else if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (word_mode)
11405 && GET_MODE_SIZE (mode) == 2 * GET_MODE_SIZE (word_mode))
11407 emit_move_insn (gen_highpart (word_mode, target),
11408 gen_lowpart (word_mode, XVECEXP (vals, 0, 0)));
11409 emit_move_insn (gen_lowpart (word_mode, target),
11410 gen_lowpart (word_mode, XVECEXP (vals, 0, 1)));
11414 if (all_same && GET_MODE_SIZE (mode) == 8)
11418 vector_init_bshuffle (target, XVECEXP (vals, 0, 0), mode, inner_mode);
11421 if (mode == V8QImode)
11423 vector_init_fpmerge (target, XVECEXP (vals, 0, 0), inner_mode);
11426 if (mode == V4HImode)
11428 vector_init_faligndata (target, XVECEXP (vals, 0, 0), inner_mode);
11433 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
11434 for (i = 0; i < n_elts; i++)
11435 emit_move_insn (adjust_address_nv (mem, inner_mode,
11436 i * GET_MODE_SIZE (inner_mode)),
11437 XVECEXP (vals, 0, i));
11438 emit_move_insn (target, mem);
11442 sparc_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
11443 enum machine_mode mode, secondary_reload_info *sri)
11445 enum reg_class rclass = (enum reg_class) rclass_i;
11447 sri->icode = CODE_FOR_nothing;
11448 sri->extra_cost = 0;
11450 /* We need a temporary when loading/storing a HImode/QImode value
11451 between memory and the FPU registers. This can happen when combine puts
11452 a paradoxical subreg in a float/fix conversion insn. */
11453 if (FP_REG_CLASS_P (rclass)
11454 && (mode == HImode || mode == QImode)
11455 && (GET_CODE (x) == MEM
11456 || ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
11457 && true_regnum (x) == -1)))
11458 return GENERAL_REGS;
11460 /* On 32-bit we need a temporary when loading/storing a DFmode value
11461 between unaligned memory and the upper FPU registers. */
11463 && rclass == EXTRA_FP_REGS
11465 && GET_CODE (x) == MEM
11466 && ! mem_min_alignment (x, 8))
11469 if (((TARGET_CM_MEDANY
11470 && symbolic_operand (x, mode))
11471 || (TARGET_CM_EMBMEDANY
11472 && text_segment_operand (x, mode)))
11476 sri->icode = direct_optab_handler (reload_in_optab, mode);
11478 sri->icode = direct_optab_handler (reload_out_optab, mode);
11482 if (TARGET_VIS3 && TARGET_ARCH32)
11484 int regno = true_regnum (x);
11486 /* When using VIS3 fp<-->int register moves, on 32-bit we have
11487 to move 8-byte values in 4-byte pieces. This only works via
11488 FP_REGS, and not via EXTRA_FP_REGS. Therefore if we try to
11489 move between EXTRA_FP_REGS and GENERAL_REGS, we will need
11490 an FP_REGS intermediate move. */
11491 if ((rclass == EXTRA_FP_REGS && SPARC_INT_REG_P (regno))
11492 || ((general_or_i64_p (rclass)
11493 || rclass == GENERAL_OR_FP_REGS)
11494 && SPARC_FP_REG_P (regno)))
11496 sri->extra_cost = 2;
11505 sparc_expand_conditional_move (enum machine_mode mode, rtx *operands)
11507 enum rtx_code rc = GET_CODE (operands[1]);
11508 enum machine_mode cmp_mode;
11509 rtx cc_reg, dst, cmp;
11512 cmp_mode = GET_MODE (XEXP (cmp, 0));
11513 if (cmp_mode == DImode && !TARGET_ARCH64)
11518 if (! rtx_equal_p (operands[2], dst)
11519 && ! rtx_equal_p (operands[3], dst))
11521 if (reg_overlap_mentioned_p (dst, cmp))
11522 dst = gen_reg_rtx (mode);
11524 emit_move_insn (dst, operands[3]);
11526 else if (operands[2] == dst)
11528 operands[2] = operands[3];
11530 if (GET_MODE_CLASS (cmp_mode) == MODE_FLOAT)
11531 rc = reverse_condition_maybe_unordered (rc);
11533 rc = reverse_condition (rc);
11536 if (cmp_mode == TFmode && !TARGET_HARD_QUAD)
11537 cmp = sparc_emit_float_lib_cmp (XEXP (cmp, 0), XEXP (cmp, 1), rc);
11539 if (XEXP (cmp, 1) == const0_rtx
11540 && GET_CODE (XEXP (cmp, 0)) == REG
11541 && cmp_mode == DImode
11542 && v9_regcmp_p (rc))
11543 cc_reg = XEXP (cmp, 0);
11545 cc_reg = gen_compare_reg_1 (rc, XEXP (cmp, 0), XEXP (cmp, 1));
11547 cmp = gen_rtx_fmt_ee (rc, GET_MODE (cc_reg), cc_reg, const0_rtx);
11549 emit_insn (gen_rtx_SET (VOIDmode, dst,
11550 gen_rtx_IF_THEN_ELSE (mode, cmp, operands[2], dst)));
11552 if (dst != operands[0])
11553 emit_move_insn (operands[0], dst);
11559 sparc_expand_vcond (enum machine_mode mode, rtx *operands, int ccode, int fcode)
11561 rtx mask, cop0, cop1, fcmp, cmask, bshuf, gsr;
11562 enum rtx_code code = GET_CODE (operands[3]);
11564 mask = gen_reg_rtx (Pmode);
11565 cop0 = operands[4];
11566 cop1 = operands[5];
11567 if (code == LT || code == GE)
11571 code = swap_condition (code);
11572 t = cop0; cop0 = cop1; cop1 = t;
11575 gsr = gen_rtx_REG (DImode, SPARC_GSR_REG);
11577 fcmp = gen_rtx_UNSPEC (Pmode,
11578 gen_rtvec (1, gen_rtx_fmt_ee (code, mode, cop0, cop1)),
11581 cmask = gen_rtx_UNSPEC (DImode,
11582 gen_rtvec (2, mask, gsr),
11585 bshuf = gen_rtx_UNSPEC (mode,
11586 gen_rtvec (3, operands[1], operands[2], gsr),
11589 emit_insn (gen_rtx_SET (VOIDmode, mask, fcmp));
11590 emit_insn (gen_rtx_SET (VOIDmode, gsr, cmask));
11592 emit_insn (gen_rtx_SET (VOIDmode, operands[0], bshuf));
11595 #include "gt-sparc.h"