1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com)
7 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
38 #include "insn-attr.h"
45 #include "diagnostic-core.h"
50 #include "target-def.h"
51 #include "cfglayout.h"
53 #include "langhooks.h"
57 #include "dwarf2out.h"
62 struct processor_costs cypress_costs = {
63 COSTS_N_INSNS (2), /* int load */
64 COSTS_N_INSNS (2), /* int signed load */
65 COSTS_N_INSNS (2), /* int zeroed load */
66 COSTS_N_INSNS (2), /* float load */
67 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
68 COSTS_N_INSNS (5), /* fadd, fsub */
69 COSTS_N_INSNS (1), /* fcmp */
70 COSTS_N_INSNS (1), /* fmov, fmovr */
71 COSTS_N_INSNS (7), /* fmul */
72 COSTS_N_INSNS (37), /* fdivs */
73 COSTS_N_INSNS (37), /* fdivd */
74 COSTS_N_INSNS (63), /* fsqrts */
75 COSTS_N_INSNS (63), /* fsqrtd */
76 COSTS_N_INSNS (1), /* imul */
77 COSTS_N_INSNS (1), /* imulX */
78 0, /* imul bit factor */
79 COSTS_N_INSNS (1), /* idiv */
80 COSTS_N_INSNS (1), /* idivX */
81 COSTS_N_INSNS (1), /* movcc/movr */
82 0, /* shift penalty */
86 struct processor_costs supersparc_costs = {
87 COSTS_N_INSNS (1), /* int load */
88 COSTS_N_INSNS (1), /* int signed load */
89 COSTS_N_INSNS (1), /* int zeroed load */
90 COSTS_N_INSNS (0), /* float load */
91 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
92 COSTS_N_INSNS (3), /* fadd, fsub */
93 COSTS_N_INSNS (3), /* fcmp */
94 COSTS_N_INSNS (1), /* fmov, fmovr */
95 COSTS_N_INSNS (3), /* fmul */
96 COSTS_N_INSNS (6), /* fdivs */
97 COSTS_N_INSNS (9), /* fdivd */
98 COSTS_N_INSNS (12), /* fsqrts */
99 COSTS_N_INSNS (12), /* fsqrtd */
100 COSTS_N_INSNS (4), /* imul */
101 COSTS_N_INSNS (4), /* imulX */
102 0, /* imul bit factor */
103 COSTS_N_INSNS (4), /* idiv */
104 COSTS_N_INSNS (4), /* idivX */
105 COSTS_N_INSNS (1), /* movcc/movr */
106 1, /* shift penalty */
110 struct processor_costs hypersparc_costs = {
111 COSTS_N_INSNS (1), /* int load */
112 COSTS_N_INSNS (1), /* int signed load */
113 COSTS_N_INSNS (1), /* int zeroed load */
114 COSTS_N_INSNS (1), /* float load */
115 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
116 COSTS_N_INSNS (1), /* fadd, fsub */
117 COSTS_N_INSNS (1), /* fcmp */
118 COSTS_N_INSNS (1), /* fmov, fmovr */
119 COSTS_N_INSNS (1), /* fmul */
120 COSTS_N_INSNS (8), /* fdivs */
121 COSTS_N_INSNS (12), /* fdivd */
122 COSTS_N_INSNS (17), /* fsqrts */
123 COSTS_N_INSNS (17), /* fsqrtd */
124 COSTS_N_INSNS (17), /* imul */
125 COSTS_N_INSNS (17), /* imulX */
126 0, /* imul bit factor */
127 COSTS_N_INSNS (17), /* idiv */
128 COSTS_N_INSNS (17), /* idivX */
129 COSTS_N_INSNS (1), /* movcc/movr */
130 0, /* shift penalty */
134 struct processor_costs leon_costs = {
135 COSTS_N_INSNS (1), /* int load */
136 COSTS_N_INSNS (1), /* int signed load */
137 COSTS_N_INSNS (1), /* int zeroed load */
138 COSTS_N_INSNS (1), /* float load */
139 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
140 COSTS_N_INSNS (1), /* fadd, fsub */
141 COSTS_N_INSNS (1), /* fcmp */
142 COSTS_N_INSNS (1), /* fmov, fmovr */
143 COSTS_N_INSNS (1), /* fmul */
144 COSTS_N_INSNS (15), /* fdivs */
145 COSTS_N_INSNS (15), /* fdivd */
146 COSTS_N_INSNS (23), /* fsqrts */
147 COSTS_N_INSNS (23), /* fsqrtd */
148 COSTS_N_INSNS (5), /* imul */
149 COSTS_N_INSNS (5), /* imulX */
150 0, /* imul bit factor */
151 COSTS_N_INSNS (5), /* idiv */
152 COSTS_N_INSNS (5), /* idivX */
153 COSTS_N_INSNS (1), /* movcc/movr */
154 0, /* shift penalty */
158 struct processor_costs sparclet_costs = {
159 COSTS_N_INSNS (3), /* int load */
160 COSTS_N_INSNS (3), /* int signed load */
161 COSTS_N_INSNS (1), /* int zeroed load */
162 COSTS_N_INSNS (1), /* float load */
163 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
164 COSTS_N_INSNS (1), /* fadd, fsub */
165 COSTS_N_INSNS (1), /* fcmp */
166 COSTS_N_INSNS (1), /* fmov, fmovr */
167 COSTS_N_INSNS (1), /* fmul */
168 COSTS_N_INSNS (1), /* fdivs */
169 COSTS_N_INSNS (1), /* fdivd */
170 COSTS_N_INSNS (1), /* fsqrts */
171 COSTS_N_INSNS (1), /* fsqrtd */
172 COSTS_N_INSNS (5), /* imul */
173 COSTS_N_INSNS (5), /* imulX */
174 0, /* imul bit factor */
175 COSTS_N_INSNS (5), /* idiv */
176 COSTS_N_INSNS (5), /* idivX */
177 COSTS_N_INSNS (1), /* movcc/movr */
178 0, /* shift penalty */
182 struct processor_costs ultrasparc_costs = {
183 COSTS_N_INSNS (2), /* int load */
184 COSTS_N_INSNS (3), /* int signed load */
185 COSTS_N_INSNS (2), /* int zeroed load */
186 COSTS_N_INSNS (2), /* float load */
187 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
188 COSTS_N_INSNS (4), /* fadd, fsub */
189 COSTS_N_INSNS (1), /* fcmp */
190 COSTS_N_INSNS (2), /* fmov, fmovr */
191 COSTS_N_INSNS (4), /* fmul */
192 COSTS_N_INSNS (13), /* fdivs */
193 COSTS_N_INSNS (23), /* fdivd */
194 COSTS_N_INSNS (13), /* fsqrts */
195 COSTS_N_INSNS (23), /* fsqrtd */
196 COSTS_N_INSNS (4), /* imul */
197 COSTS_N_INSNS (4), /* imulX */
198 2, /* imul bit factor */
199 COSTS_N_INSNS (37), /* idiv */
200 COSTS_N_INSNS (68), /* idivX */
201 COSTS_N_INSNS (2), /* movcc/movr */
202 2, /* shift penalty */
206 struct processor_costs ultrasparc3_costs = {
207 COSTS_N_INSNS (2), /* int load */
208 COSTS_N_INSNS (3), /* int signed load */
209 COSTS_N_INSNS (3), /* int zeroed load */
210 COSTS_N_INSNS (2), /* float load */
211 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
212 COSTS_N_INSNS (4), /* fadd, fsub */
213 COSTS_N_INSNS (5), /* fcmp */
214 COSTS_N_INSNS (3), /* fmov, fmovr */
215 COSTS_N_INSNS (4), /* fmul */
216 COSTS_N_INSNS (17), /* fdivs */
217 COSTS_N_INSNS (20), /* fdivd */
218 COSTS_N_INSNS (20), /* fsqrts */
219 COSTS_N_INSNS (29), /* fsqrtd */
220 COSTS_N_INSNS (6), /* imul */
221 COSTS_N_INSNS (6), /* imulX */
222 0, /* imul bit factor */
223 COSTS_N_INSNS (40), /* idiv */
224 COSTS_N_INSNS (71), /* idivX */
225 COSTS_N_INSNS (2), /* movcc/movr */
226 0, /* shift penalty */
230 struct processor_costs niagara_costs = {
231 COSTS_N_INSNS (3), /* int load */
232 COSTS_N_INSNS (3), /* int signed load */
233 COSTS_N_INSNS (3), /* int zeroed load */
234 COSTS_N_INSNS (9), /* float load */
235 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
236 COSTS_N_INSNS (8), /* fadd, fsub */
237 COSTS_N_INSNS (26), /* fcmp */
238 COSTS_N_INSNS (8), /* fmov, fmovr */
239 COSTS_N_INSNS (29), /* fmul */
240 COSTS_N_INSNS (54), /* fdivs */
241 COSTS_N_INSNS (83), /* fdivd */
242 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
243 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
244 COSTS_N_INSNS (11), /* imul */
245 COSTS_N_INSNS (11), /* imulX */
246 0, /* imul bit factor */
247 COSTS_N_INSNS (72), /* idiv */
248 COSTS_N_INSNS (72), /* idivX */
249 COSTS_N_INSNS (1), /* movcc/movr */
250 0, /* shift penalty */
254 struct processor_costs niagara2_costs = {
255 COSTS_N_INSNS (3), /* int load */
256 COSTS_N_INSNS (3), /* int signed load */
257 COSTS_N_INSNS (3), /* int zeroed load */
258 COSTS_N_INSNS (3), /* float load */
259 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
260 COSTS_N_INSNS (6), /* fadd, fsub */
261 COSTS_N_INSNS (6), /* fcmp */
262 COSTS_N_INSNS (6), /* fmov, fmovr */
263 COSTS_N_INSNS (6), /* fmul */
264 COSTS_N_INSNS (19), /* fdivs */
265 COSTS_N_INSNS (33), /* fdivd */
266 COSTS_N_INSNS (19), /* fsqrts */
267 COSTS_N_INSNS (33), /* fsqrtd */
268 COSTS_N_INSNS (5), /* imul */
269 COSTS_N_INSNS (5), /* imulX */
270 0, /* imul bit factor */
271 COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
272 COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
273 COSTS_N_INSNS (1), /* movcc/movr */
274 0, /* shift penalty */
277 const struct processor_costs *sparc_costs = &cypress_costs;
279 #ifdef HAVE_AS_RELAX_OPTION
280 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
281 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
282 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
283 somebody does not branch between the sethi and jmp. */
284 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
286 #define LEAF_SIBCALL_SLOT_RESERVED_P \
287 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
290 /* Global variables for machine-dependent things. */
292 /* Size of frame. Need to know this to emit return insns from leaf procedures.
293 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
294 reload pass. This is important as the value is later used for scheduling
295 (to see what can go in a delay slot).
296 APPARENT_FSIZE is the size of the stack less the register save area and less
297 the outgoing argument area. It is used when saving call preserved regs. */
298 static HOST_WIDE_INT apparent_fsize;
299 static HOST_WIDE_INT actual_fsize;
301 /* Number of live general or floating point registers needed to be
302 saved (as 4-byte quantities). */
303 static int num_gfregs;
305 /* Vector to say how input registers are mapped to output registers.
306 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
307 eliminate it. You must use -fomit-frame-pointer to get that. */
308 char leaf_reg_remap[] =
309 { 0, 1, 2, 3, 4, 5, 6, 7,
310 -1, -1, -1, -1, -1, -1, 14, -1,
311 -1, -1, -1, -1, -1, -1, -1, -1,
312 8, 9, 10, 11, 12, 13, -1, 15,
314 32, 33, 34, 35, 36, 37, 38, 39,
315 40, 41, 42, 43, 44, 45, 46, 47,
316 48, 49, 50, 51, 52, 53, 54, 55,
317 56, 57, 58, 59, 60, 61, 62, 63,
318 64, 65, 66, 67, 68, 69, 70, 71,
319 72, 73, 74, 75, 76, 77, 78, 79,
320 80, 81, 82, 83, 84, 85, 86, 87,
321 88, 89, 90, 91, 92, 93, 94, 95,
322 96, 97, 98, 99, 100};
324 /* Vector, indexed by hard register number, which contains 1
325 for a register that is allowable in a candidate for leaf
326 function treatment. */
327 char sparc_leaf_regs[] =
328 { 1, 1, 1, 1, 1, 1, 1, 1,
329 0, 0, 0, 0, 0, 0, 1, 0,
330 0, 0, 0, 0, 0, 0, 0, 0,
331 1, 1, 1, 1, 1, 1, 0, 1,
332 1, 1, 1, 1, 1, 1, 1, 1,
333 1, 1, 1, 1, 1, 1, 1, 1,
334 1, 1, 1, 1, 1, 1, 1, 1,
335 1, 1, 1, 1, 1, 1, 1, 1,
336 1, 1, 1, 1, 1, 1, 1, 1,
337 1, 1, 1, 1, 1, 1, 1, 1,
338 1, 1, 1, 1, 1, 1, 1, 1,
339 1, 1, 1, 1, 1, 1, 1, 1,
342 struct GTY(()) machine_function
344 /* Some local-dynamic TLS symbol name. */
345 const char *some_ld_name;
347 /* True if the current function is leaf and uses only leaf regs,
348 so that the SPARC leaf function optimization can be applied.
349 Private version of current_function_uses_only_leaf_regs, see
350 sparc_expand_prologue for the rationale. */
353 /* True if the data calculated by sparc_expand_prologue are valid. */
354 bool prologue_data_valid_p;
357 #define sparc_leaf_function_p cfun->machine->leaf_function_p
358 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
360 /* Register we pretend to think the frame pointer is allocated to.
361 Normally, this is %fp, but if we are in a leaf procedure, this
362 is %sp+"something". We record "something" separately as it may
363 be too big for reg+constant addressing. */
364 static rtx frame_base_reg;
365 static HOST_WIDE_INT frame_base_offset;
367 /* 1 if the next opcode is to be specially indented. */
368 int sparc_indent_opcode = 0;
370 static void sparc_option_override (void);
371 static void sparc_init_modes (void);
372 static void scan_record_type (const_tree, int *, int *, int *);
373 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
374 const_tree, bool, bool, int *, int *);
376 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
377 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
379 static void sparc_emit_set_const32 (rtx, rtx);
380 static void sparc_emit_set_const64 (rtx, rtx);
381 static void sparc_output_addr_vec (rtx);
382 static void sparc_output_addr_diff_vec (rtx);
383 static void sparc_output_deferred_case_vectors (void);
384 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
385 static bool sparc_legitimate_constant_p (enum machine_mode, rtx);
386 static rtx sparc_builtin_saveregs (void);
387 static int epilogue_renumber (rtx *, int);
388 static bool sparc_assemble_integer (rtx, unsigned int, int);
389 static int set_extends (rtx);
390 static void load_got_register (void);
391 static int save_or_restore_regs (int, int, rtx, int, int);
392 static void emit_save_or_restore_regs (int);
393 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
394 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
395 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
396 tree) ATTRIBUTE_UNUSED;
397 static int sparc_adjust_cost (rtx, rtx, rtx, int);
398 static int sparc_issue_rate (void);
399 static void sparc_sched_init (FILE *, int, int);
400 static int sparc_use_sched_lookahead (void);
402 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
403 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
404 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
405 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
406 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
408 static bool sparc_function_ok_for_sibcall (tree, tree);
409 static void sparc_init_libfuncs (void);
410 static void sparc_init_builtins (void);
411 static void sparc_vis_init_builtins (void);
412 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
413 static tree sparc_fold_builtin (tree, int, tree *, bool);
414 static int sparc_vis_mul8x16 (int, int);
415 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
416 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
417 HOST_WIDE_INT, tree);
418 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
419 HOST_WIDE_INT, const_tree);
420 static struct machine_function * sparc_init_machine_status (void);
421 static bool sparc_cannot_force_const_mem (enum machine_mode, rtx);
422 static rtx sparc_tls_get_addr (void);
423 static rtx sparc_tls_got (void);
424 static const char *get_some_local_dynamic_name (void);
425 static int get_some_local_dynamic_name_1 (rtx *, void *);
426 static int sparc_register_move_cost (enum machine_mode,
427 reg_class_t, reg_class_t);
428 static bool sparc_rtx_costs (rtx, int, int, int *, bool);
429 static rtx sparc_function_value (const_tree, const_tree, bool);
430 static rtx sparc_libcall_value (enum machine_mode, const_rtx);
431 static bool sparc_function_value_regno_p (const unsigned int);
432 static rtx sparc_struct_value_rtx (tree, int);
433 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
434 int *, const_tree, int);
435 static bool sparc_return_in_memory (const_tree, const_tree);
436 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
437 static void sparc_va_start (tree, rtx);
438 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
439 static bool sparc_vector_mode_supported_p (enum machine_mode);
440 static bool sparc_tls_referenced_p (rtx);
441 static rtx sparc_legitimize_tls_address (rtx);
442 static rtx sparc_legitimize_pic_address (rtx, rtx);
443 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
444 static rtx sparc_delegitimize_address (rtx);
445 static bool sparc_mode_dependent_address_p (const_rtx);
446 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
447 enum machine_mode, const_tree, bool);
448 static void sparc_function_arg_advance (CUMULATIVE_ARGS *,
449 enum machine_mode, const_tree, bool);
450 static rtx sparc_function_arg_1 (const CUMULATIVE_ARGS *,
451 enum machine_mode, const_tree, bool, bool);
452 static rtx sparc_function_arg (CUMULATIVE_ARGS *,
453 enum machine_mode, const_tree, bool);
454 static rtx sparc_function_incoming_arg (CUMULATIVE_ARGS *,
455 enum machine_mode, const_tree, bool);
456 static unsigned int sparc_function_arg_boundary (enum machine_mode,
458 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
459 enum machine_mode, tree, bool);
460 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
461 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
462 static void sparc_file_end (void);
463 static bool sparc_frame_pointer_required (void);
464 static bool sparc_can_eliminate (const int, const int);
465 static void sparc_conditional_register_usage (void);
466 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
467 static const char *sparc_mangle_type (const_tree);
469 static void sparc_trampoline_init (rtx, tree, rtx);
470 static enum machine_mode sparc_preferred_simd_mode (enum machine_mode);
471 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
472 static bool sparc_print_operand_punct_valid_p (unsigned char);
473 static void sparc_print_operand (FILE *, rtx, int);
474 static void sparc_print_operand_address (FILE *, rtx);
476 #ifdef SUBTARGET_ATTRIBUTE_TABLE
477 /* Table of valid machine attributes. */
478 static const struct attribute_spec sparc_attribute_table[] =
480 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
482 SUBTARGET_ATTRIBUTE_TABLE,
483 { NULL, 0, 0, false, false, false, NULL, false }
487 /* Option handling. */
490 enum cmodel sparc_cmodel;
492 char sparc_hard_reg_printed[8];
494 /* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
495 static const struct default_options sparc_option_optimization_table[] =
497 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
498 { OPT_LEVELS_NONE, 0, NULL, 0 }
501 /* Initialize the GCC target structure. */
503 /* The default is to use .half rather than .short for aligned HI objects. */
504 #undef TARGET_ASM_ALIGNED_HI_OP
505 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
507 #undef TARGET_ASM_UNALIGNED_HI_OP
508 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
509 #undef TARGET_ASM_UNALIGNED_SI_OP
510 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
511 #undef TARGET_ASM_UNALIGNED_DI_OP
512 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
514 /* The target hook has to handle DI-mode values. */
515 #undef TARGET_ASM_INTEGER
516 #define TARGET_ASM_INTEGER sparc_assemble_integer
518 #undef TARGET_ASM_FUNCTION_PROLOGUE
519 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
520 #undef TARGET_ASM_FUNCTION_EPILOGUE
521 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
523 #undef TARGET_SCHED_ADJUST_COST
524 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
525 #undef TARGET_SCHED_ISSUE_RATE
526 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
527 #undef TARGET_SCHED_INIT
528 #define TARGET_SCHED_INIT sparc_sched_init
529 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
530 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
532 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
533 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
535 #undef TARGET_INIT_LIBFUNCS
536 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
537 #undef TARGET_INIT_BUILTINS
538 #define TARGET_INIT_BUILTINS sparc_init_builtins
540 #undef TARGET_LEGITIMIZE_ADDRESS
541 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
542 #undef TARGET_DELEGITIMIZE_ADDRESS
543 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
544 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
545 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
547 #undef TARGET_EXPAND_BUILTIN
548 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
549 #undef TARGET_FOLD_BUILTIN
550 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
553 #undef TARGET_HAVE_TLS
554 #define TARGET_HAVE_TLS true
557 #undef TARGET_CANNOT_FORCE_CONST_MEM
558 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
560 #undef TARGET_ASM_OUTPUT_MI_THUNK
561 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
562 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
563 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
565 #undef TARGET_RTX_COSTS
566 #define TARGET_RTX_COSTS sparc_rtx_costs
567 #undef TARGET_ADDRESS_COST
568 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
569 #undef TARGET_REGISTER_MOVE_COST
570 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
572 #undef TARGET_PROMOTE_FUNCTION_MODE
573 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
575 #undef TARGET_FUNCTION_VALUE
576 #define TARGET_FUNCTION_VALUE sparc_function_value
577 #undef TARGET_LIBCALL_VALUE
578 #define TARGET_LIBCALL_VALUE sparc_libcall_value
579 #undef TARGET_FUNCTION_VALUE_REGNO_P
580 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
582 #undef TARGET_STRUCT_VALUE_RTX
583 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
584 #undef TARGET_RETURN_IN_MEMORY
585 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
586 #undef TARGET_MUST_PASS_IN_STACK
587 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
588 #undef TARGET_PASS_BY_REFERENCE
589 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
590 #undef TARGET_ARG_PARTIAL_BYTES
591 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
592 #undef TARGET_FUNCTION_ARG_ADVANCE
593 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
594 #undef TARGET_FUNCTION_ARG
595 #define TARGET_FUNCTION_ARG sparc_function_arg
596 #undef TARGET_FUNCTION_INCOMING_ARG
597 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
598 #undef TARGET_FUNCTION_ARG_BOUNDARY
599 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
601 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
602 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
603 #undef TARGET_STRICT_ARGUMENT_NAMING
604 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
606 #undef TARGET_EXPAND_BUILTIN_VA_START
607 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
608 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
609 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
611 #undef TARGET_VECTOR_MODE_SUPPORTED_P
612 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
614 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
615 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
617 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
618 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
620 #ifdef SUBTARGET_INSERT_ATTRIBUTES
621 #undef TARGET_INSERT_ATTRIBUTES
622 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
625 #ifdef SUBTARGET_ATTRIBUTE_TABLE
626 #undef TARGET_ATTRIBUTE_TABLE
627 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
630 #undef TARGET_RELAXED_ORDERING
631 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
633 #undef TARGET_DEFAULT_TARGET_FLAGS
634 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
635 #undef TARGET_OPTION_OVERRIDE
636 #define TARGET_OPTION_OVERRIDE sparc_option_override
637 #undef TARGET_OPTION_OPTIMIZATION_TABLE
638 #define TARGET_OPTION_OPTIMIZATION_TABLE sparc_option_optimization_table
640 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
641 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
642 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
645 #undef TARGET_ASM_FILE_END
646 #define TARGET_ASM_FILE_END sparc_file_end
648 #undef TARGET_FRAME_POINTER_REQUIRED
649 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
651 #undef TARGET_CAN_ELIMINATE
652 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
653 #undef TARGET_PREFERRED_RELOAD_CLASS
654 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
656 #undef TARGET_CONDITIONAL_REGISTER_USAGE
657 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
659 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
660 #undef TARGET_MANGLE_TYPE
661 #define TARGET_MANGLE_TYPE sparc_mangle_type
664 #undef TARGET_LEGITIMATE_ADDRESS_P
665 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
667 #undef TARGET_LEGITIMATE_CONSTANT_P
668 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
670 #undef TARGET_TRAMPOLINE_INIT
671 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
673 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
674 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
675 #undef TARGET_PRINT_OPERAND
676 #define TARGET_PRINT_OPERAND sparc_print_operand
677 #undef TARGET_PRINT_OPERAND_ADDRESS
678 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
680 struct gcc_target targetm = TARGET_INITIALIZER;
682 /* Validate and override various options, and do some machine dependent
686 sparc_option_override (void)
688 static struct code_model {
689 const char *const name;
690 const enum cmodel value;
691 } const cmodels[] = {
693 { "medlow", CM_MEDLOW },
694 { "medmid", CM_MEDMID },
695 { "medany", CM_MEDANY },
696 { "embmedany", CM_EMBMEDANY },
697 { NULL, (enum cmodel) 0 }
699 const struct code_model *cmodel;
700 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
701 static struct cpu_default {
703 const enum processor_type processor;
704 } const cpu_default[] = {
705 /* There must be one entry here for each TARGET_CPU value. */
706 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
707 { TARGET_CPU_v8, PROCESSOR_V8 },
708 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
709 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
710 { TARGET_CPU_leon, PROCESSOR_LEON },
711 { TARGET_CPU_sparclite, PROCESSOR_F930 },
712 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
713 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
714 { TARGET_CPU_v9, PROCESSOR_V9 },
715 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
716 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
717 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
718 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
721 const struct cpu_default *def;
722 /* Table of values for -m{cpu,tune}=. This must match the order of
723 the PROCESSOR_* enumeration. */
724 static struct cpu_table {
727 } const cpu_table[] = {
730 { MASK_ISA, MASK_V8 },
731 /* TI TMS390Z55 supersparc */
732 { MASK_ISA, MASK_V8 },
733 { MASK_ISA, MASK_V8|MASK_FPU },
735 { MASK_ISA, MASK_V8|MASK_FPU },
736 { MASK_ISA, MASK_SPARCLITE },
737 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
738 { MASK_ISA|MASK_FPU, MASK_SPARCLITE },
739 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
740 { MASK_ISA, MASK_SPARCLITE|MASK_FPU },
741 { MASK_ISA|MASK_FPU, MASK_SPARCLITE },
742 { MASK_ISA, MASK_SPARCLET },
744 { MASK_ISA, MASK_SPARCLET },
745 { MASK_ISA, MASK_V9 },
746 /* UltraSPARC I, II, IIi */
748 /* Although insns using %y are deprecated, it is a clear win. */
749 MASK_V9|MASK_DEPRECATED_V8_INSNS},
751 /* ??? Check if %y issue still holds true. */
753 MASK_V9|MASK_DEPRECATED_V8_INSNS},
756 MASK_V9|MASK_DEPRECATED_V8_INSNS},
758 { MASK_ISA, MASK_V9},
760 const struct cpu_table *cpu;
763 #ifdef SUBTARGET_OVERRIDE_OPTIONS
764 SUBTARGET_OVERRIDE_OPTIONS;
767 #ifndef SPARC_BI_ARCH
768 /* Check for unsupported architecture size. */
769 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
770 error ("%s is not supported by this configuration",
771 DEFAULT_ARCH32_P ? "-m64" : "-m32");
774 /* We force all 64bit archs to use 128 bit long double */
775 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
777 error ("-mlong-double-64 not allowed with -m64");
778 target_flags |= MASK_LONG_DOUBLE_128;
781 /* Code model selection. */
782 sparc_cmodel = SPARC_DEFAULT_CMODEL;
786 sparc_cmodel = CM_32;
789 if (sparc_cmodel_string != NULL)
793 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
794 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
796 if (cmodel->name == NULL)
797 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
799 sparc_cmodel = cmodel->value;
802 error ("-mcmodel= is not supported on 32 bit systems");
805 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
807 /* Set the default CPU. */
808 if (!global_options_set.x_sparc_cpu_and_features)
810 for (def = &cpu_default[0]; def->cpu != -1; ++def)
811 if (def->cpu == TARGET_CPU_DEFAULT)
813 gcc_assert (def->cpu != -1);
814 sparc_cpu_and_features = def->processor;
816 if (!global_options_set.x_sparc_cpu)
817 sparc_cpu = sparc_cpu_and_features;
819 cpu = &cpu_table[(int) sparc_cpu_and_features];
820 target_flags &= ~cpu->disable;
821 target_flags |= cpu->enable;
823 /* If -mfpu or -mno-fpu was explicitly used, don't override with
824 the processor default. */
825 if (target_flags_explicit & MASK_FPU)
826 target_flags = (target_flags & ~MASK_FPU) | fpu;
828 /* Don't allow -mvis if FPU is disabled. */
830 target_flags &= ~MASK_VIS;
832 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
834 -m64 also implies v9. */
835 if (TARGET_VIS || TARGET_ARCH64)
837 target_flags |= MASK_V9;
838 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
841 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
842 if (TARGET_V9 && TARGET_ARCH32)
843 target_flags |= MASK_DEPRECATED_V8_INSNS;
845 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
846 if (! TARGET_V9 || TARGET_ARCH64)
847 target_flags &= ~MASK_V8PLUS;
849 /* Don't use stack biasing in 32 bit mode. */
851 target_flags &= ~MASK_STACK_BIAS;
853 /* Supply a default value for align_functions. */
854 if (align_functions == 0
855 && (sparc_cpu == PROCESSOR_ULTRASPARC
856 || sparc_cpu == PROCESSOR_ULTRASPARC3
857 || sparc_cpu == PROCESSOR_NIAGARA
858 || sparc_cpu == PROCESSOR_NIAGARA2))
859 align_functions = 32;
861 /* Validate PCC_STRUCT_RETURN. */
862 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
863 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
865 /* Only use .uaxword when compiling for a 64-bit target. */
867 targetm.asm_out.unaligned_op.di = NULL;
869 /* Do various machine dependent initializations. */
872 /* Set up function hooks. */
873 init_machine_status = sparc_init_machine_status;
878 case PROCESSOR_CYPRESS:
879 sparc_costs = &cypress_costs;
882 case PROCESSOR_SPARCLITE:
883 case PROCESSOR_SUPERSPARC:
884 sparc_costs = &supersparc_costs;
888 case PROCESSOR_HYPERSPARC:
889 case PROCESSOR_SPARCLITE86X:
890 sparc_costs = &hypersparc_costs;
893 sparc_costs = &leon_costs;
895 case PROCESSOR_SPARCLET:
896 case PROCESSOR_TSC701:
897 sparc_costs = &sparclet_costs;
900 case PROCESSOR_ULTRASPARC:
901 sparc_costs = &ultrasparc_costs;
903 case PROCESSOR_ULTRASPARC3:
904 sparc_costs = &ultrasparc3_costs;
906 case PROCESSOR_NIAGARA:
907 sparc_costs = &niagara_costs;
909 case PROCESSOR_NIAGARA2:
910 sparc_costs = &niagara2_costs;
914 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
915 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
916 target_flags |= MASK_LONG_DOUBLE_128;
919 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
920 ((sparc_cpu == PROCESSOR_ULTRASPARC
921 || sparc_cpu == PROCESSOR_NIAGARA
922 || sparc_cpu == PROCESSOR_NIAGARA2)
924 : (sparc_cpu == PROCESSOR_ULTRASPARC3
926 global_options.x_param_values,
927 global_options_set.x_param_values);
928 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
929 ((sparc_cpu == PROCESSOR_ULTRASPARC
930 || sparc_cpu == PROCESSOR_ULTRASPARC3
931 || sparc_cpu == PROCESSOR_NIAGARA
932 || sparc_cpu == PROCESSOR_NIAGARA2)
934 global_options.x_param_values,
935 global_options_set.x_param_values);
938 /* Miscellaneous utilities. */
940 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
941 or branch on register contents instructions. */
944 v9_regcmp_p (enum rtx_code code)
946 return (code == EQ || code == NE || code == GE || code == LT
947 || code == LE || code == GT);
950 /* Nonzero if OP is a floating point constant which can
951 be loaded into an integer register using a single
952 sethi instruction. */
957 if (GET_CODE (op) == CONST_DOUBLE)
962 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
963 REAL_VALUE_TO_TARGET_SINGLE (r, i);
964 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
970 /* Nonzero if OP is a floating point constant which can
971 be loaded into an integer register using a single
977 if (GET_CODE (op) == CONST_DOUBLE)
982 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
983 REAL_VALUE_TO_TARGET_SINGLE (r, i);
984 return SPARC_SIMM13_P (i);
990 /* Nonzero if OP is a floating point constant which can
991 be loaded into an integer register using a high/losum
992 instruction sequence. */
995 fp_high_losum_p (rtx op)
997 /* The constraints calling this should only be in
998 SFmode move insns, so any constant which cannot
999 be moved using a single insn will do. */
1000 if (GET_CODE (op) == CONST_DOUBLE)
1005 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1006 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1007 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
1013 /* Return true if the address of LABEL can be loaded by means of the
1014 mov{si,di}_pic_label_ref patterns in PIC mode. */
1017 can_use_mov_pic_label_ref (rtx label)
1019 /* VxWorks does not impose a fixed gap between segments; the run-time
1020 gap can be different from the object-file gap. We therefore can't
1021 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1022 are absolutely sure that X is in the same segment as the GOT.
1023 Unfortunately, the flexibility of linker scripts means that we
1024 can't be sure of that in general, so assume that GOT-relative
1025 accesses are never valid on VxWorks. */
1026 if (TARGET_VXWORKS_RTP)
1029 /* Similarly, if the label is non-local, it might end up being placed
1030 in a different section than the current one; now mov_pic_label_ref
1031 requires the label and the code to be in the same section. */
1032 if (LABEL_REF_NONLOCAL_P (label))
1035 /* Finally, if we are reordering basic blocks and partition into hot
1036 and cold sections, this might happen for any label. */
1037 if (flag_reorder_blocks_and_partition)
1043 /* Expand a move instruction. Return true if all work is done. */
1046 sparc_expand_move (enum machine_mode mode, rtx *operands)
1048 /* Handle sets of MEM first. */
1049 if (GET_CODE (operands[0]) == MEM)
1051 /* 0 is a register (or a pair of registers) on SPARC. */
1052 if (register_or_zero_operand (operands[1], mode))
1055 if (!reload_in_progress)
1057 operands[0] = validize_mem (operands[0]);
1058 operands[1] = force_reg (mode, operands[1]);
1062 /* Fixup TLS cases. */
1064 && CONSTANT_P (operands[1])
1065 && sparc_tls_referenced_p (operands [1]))
1067 operands[1] = sparc_legitimize_tls_address (operands[1]);
1071 /* Fixup PIC cases. */
1072 if (flag_pic && CONSTANT_P (operands[1]))
1074 if (pic_address_needs_scratch (operands[1]))
1075 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
1077 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
1078 if (GET_CODE (operands[1]) == LABEL_REF
1079 && can_use_mov_pic_label_ref (operands[1]))
1083 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1089 gcc_assert (TARGET_ARCH64);
1090 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1095 if (symbolic_operand (operands[1], mode))
1098 = sparc_legitimize_pic_address (operands[1],
1100 ? operands[0] : NULL_RTX);
1105 /* If we are trying to toss an integer constant into FP registers,
1106 or loading a FP or vector constant, force it into memory. */
1107 if (CONSTANT_P (operands[1])
1108 && REG_P (operands[0])
1109 && (SPARC_FP_REG_P (REGNO (operands[0]))
1110 || SCALAR_FLOAT_MODE_P (mode)
1111 || VECTOR_MODE_P (mode)))
1113 /* emit_group_store will send such bogosity to us when it is
1114 not storing directly into memory. So fix this up to avoid
1115 crashes in output_constant_pool. */
1116 if (operands [1] == const0_rtx)
1117 operands[1] = CONST0_RTX (mode);
1119 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1120 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1121 && const_zero_operand (operands[1], mode))
1124 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1125 /* We are able to build any SF constant in integer registers
1126 with at most 2 instructions. */
1128 /* And any DF constant in integer registers. */
1130 && (reload_completed || reload_in_progress))))
1133 operands[1] = force_const_mem (mode, operands[1]);
1134 if (!reload_in_progress)
1135 operands[1] = validize_mem (operands[1]);
1139 /* Accept non-constants and valid constants unmodified. */
1140 if (!CONSTANT_P (operands[1])
1141 || GET_CODE (operands[1]) == HIGH
1142 || input_operand (operands[1], mode))
1148 /* All QImode constants require only one insn, so proceed. */
1153 sparc_emit_set_const32 (operands[0], operands[1]);
1157 /* input_operand should have filtered out 32-bit mode. */
1158 sparc_emit_set_const64 (operands[0], operands[1]);
1168 /* Load OP1, a 32-bit constant, into OP0, a register.
1169 We know it can't be done in one insn when we get
1170 here, the move expander guarantees this. */
1173 sparc_emit_set_const32 (rtx op0, rtx op1)
1175 enum machine_mode mode = GET_MODE (op0);
1178 if (reload_in_progress || reload_completed)
1181 temp = gen_reg_rtx (mode);
1183 if (GET_CODE (op1) == CONST_INT)
1185 gcc_assert (!small_int_operand (op1, mode)
1186 && !const_high_operand (op1, mode));
1188 /* Emit them as real moves instead of a HIGH/LO_SUM,
1189 this way CSE can see everything and reuse intermediate
1190 values if it wants. */
1191 emit_insn (gen_rtx_SET (VOIDmode, temp,
1192 GEN_INT (INTVAL (op1)
1193 & ~(HOST_WIDE_INT)0x3ff)));
1195 emit_insn (gen_rtx_SET (VOIDmode,
1197 gen_rtx_IOR (mode, temp,
1198 GEN_INT (INTVAL (op1) & 0x3ff))));
1202 /* A symbol, emit in the traditional way. */
1203 emit_insn (gen_rtx_SET (VOIDmode, temp,
1204 gen_rtx_HIGH (mode, op1)));
1205 emit_insn (gen_rtx_SET (VOIDmode,
1206 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1210 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1211 If TEMP is nonzero, we are forbidden to use any other scratch
1212 registers. Otherwise, we are allowed to generate them as needed.
1214 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1215 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1218 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1220 rtx temp1, temp2, temp3, temp4, temp5;
1223 if (temp && GET_MODE (temp) == TImode)
1226 temp = gen_rtx_REG (DImode, REGNO (temp));
1229 /* SPARC-V9 code-model support. */
1230 switch (sparc_cmodel)
1233 /* The range spanned by all instructions in the object is less
1234 than 2^31 bytes (2GB) and the distance from any instruction
1235 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1236 than 2^31 bytes (2GB).
1238 The executable must be in the low 4TB of the virtual address
1241 sethi %hi(symbol), %temp1
1242 or %temp1, %lo(symbol), %reg */
1244 temp1 = temp; /* op0 is allowed. */
1246 temp1 = gen_reg_rtx (DImode);
1248 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1249 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1253 /* The range spanned by all instructions in the object is less
1254 than 2^31 bytes (2GB) and the distance from any instruction
1255 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1256 than 2^31 bytes (2GB).
1258 The executable must be in the low 16TB of the virtual address
1261 sethi %h44(symbol), %temp1
1262 or %temp1, %m44(symbol), %temp2
1263 sllx %temp2, 12, %temp3
1264 or %temp3, %l44(symbol), %reg */
1269 temp3 = temp; /* op0 is allowed. */
1273 temp1 = gen_reg_rtx (DImode);
1274 temp2 = gen_reg_rtx (DImode);
1275 temp3 = gen_reg_rtx (DImode);
1278 emit_insn (gen_seth44 (temp1, op1));
1279 emit_insn (gen_setm44 (temp2, temp1, op1));
1280 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1281 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1282 emit_insn (gen_setl44 (op0, temp3, op1));
1286 /* The range spanned by all instructions in the object is less
1287 than 2^31 bytes (2GB) and the distance from any instruction
1288 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1289 than 2^31 bytes (2GB).
1291 The executable can be placed anywhere in the virtual address
1294 sethi %hh(symbol), %temp1
1295 sethi %lm(symbol), %temp2
1296 or %temp1, %hm(symbol), %temp3
1297 sllx %temp3, 32, %temp4
1298 or %temp4, %temp2, %temp5
1299 or %temp5, %lo(symbol), %reg */
1302 /* It is possible that one of the registers we got for operands[2]
1303 might coincide with that of operands[0] (which is why we made
1304 it TImode). Pick the other one to use as our scratch. */
1305 if (rtx_equal_p (temp, op0))
1307 gcc_assert (ti_temp);
1308 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1311 temp2 = temp; /* op0 is _not_ allowed, see above. */
1318 temp1 = gen_reg_rtx (DImode);
1319 temp2 = gen_reg_rtx (DImode);
1320 temp3 = gen_reg_rtx (DImode);
1321 temp4 = gen_reg_rtx (DImode);
1322 temp5 = gen_reg_rtx (DImode);
1325 emit_insn (gen_sethh (temp1, op1));
1326 emit_insn (gen_setlm (temp2, op1));
1327 emit_insn (gen_sethm (temp3, temp1, op1));
1328 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1329 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1330 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1331 gen_rtx_PLUS (DImode, temp4, temp2)));
1332 emit_insn (gen_setlo (op0, temp5, op1));
1336 /* Old old old backwards compatibility kruft here.
1337 Essentially it is MEDLOW with a fixed 64-bit
1338 virtual base added to all data segment addresses.
1339 Text-segment stuff is computed like MEDANY, we can't
1340 reuse the code above because the relocation knobs
1343 Data segment: sethi %hi(symbol), %temp1
1344 add %temp1, EMBMEDANY_BASE_REG, %temp2
1345 or %temp2, %lo(symbol), %reg */
1346 if (data_segment_operand (op1, GET_MODE (op1)))
1350 temp1 = temp; /* op0 is allowed. */
1355 temp1 = gen_reg_rtx (DImode);
1356 temp2 = gen_reg_rtx (DImode);
1359 emit_insn (gen_embmedany_sethi (temp1, op1));
1360 emit_insn (gen_embmedany_brsum (temp2, temp1));
1361 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1364 /* Text segment: sethi %uhi(symbol), %temp1
1365 sethi %hi(symbol), %temp2
1366 or %temp1, %ulo(symbol), %temp3
1367 sllx %temp3, 32, %temp4
1368 or %temp4, %temp2, %temp5
1369 or %temp5, %lo(symbol), %reg */
1374 /* It is possible that one of the registers we got for operands[2]
1375 might coincide with that of operands[0] (which is why we made
1376 it TImode). Pick the other one to use as our scratch. */
1377 if (rtx_equal_p (temp, op0))
1379 gcc_assert (ti_temp);
1380 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1383 temp2 = temp; /* op0 is _not_ allowed, see above. */
1390 temp1 = gen_reg_rtx (DImode);
1391 temp2 = gen_reg_rtx (DImode);
1392 temp3 = gen_reg_rtx (DImode);
1393 temp4 = gen_reg_rtx (DImode);
1394 temp5 = gen_reg_rtx (DImode);
1397 emit_insn (gen_embmedany_textuhi (temp1, op1));
1398 emit_insn (gen_embmedany_texthi (temp2, op1));
1399 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1400 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1401 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1402 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1403 gen_rtx_PLUS (DImode, temp4, temp2)));
1404 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1413 #if HOST_BITS_PER_WIDE_INT == 32
1415 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1420 /* These avoid problems when cross compiling. If we do not
1421 go through all this hair then the optimizer will see
1422 invalid REG_EQUAL notes or in some cases none at all. */
1423 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1424 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1425 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1426 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1428 /* The optimizer is not to assume anything about exactly
1429 which bits are set for a HIGH, they are unspecified.
1430 Unfortunately this leads to many missed optimizations
1431 during CSE. We mask out the non-HIGH bits, and matches
1432 a plain movdi, to alleviate this problem. */
1434 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1436 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1440 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1442 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1446 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1448 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1452 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1454 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1457 /* Worker routines for 64-bit constant formation on arch64.
1458 One of the key things to be doing in these emissions is
1459 to create as many temp REGs as possible. This makes it
1460 possible for half-built constants to be used later when
1461 such values are similar to something required later on.
1462 Without doing this, the optimizer cannot see such
1465 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1466 unsigned HOST_WIDE_INT, int);
1469 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1470 unsigned HOST_WIDE_INT low_bits, int is_neg)
1472 unsigned HOST_WIDE_INT high_bits;
1475 high_bits = (~low_bits) & 0xffffffff;
1477 high_bits = low_bits;
1479 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1482 emit_insn (gen_rtx_SET (VOIDmode, op0,
1483 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1487 /* If we are XOR'ing with -1, then we should emit a one's complement
1488 instead. This way the combiner will notice logical operations
1489 such as ANDN later on and substitute. */
1490 if ((low_bits & 0x3ff) == 0x3ff)
1492 emit_insn (gen_rtx_SET (VOIDmode, op0,
1493 gen_rtx_NOT (DImode, temp)));
1497 emit_insn (gen_rtx_SET (VOIDmode, op0,
1498 gen_safe_XOR64 (temp,
1499 (-(HOST_WIDE_INT)0x400
1500 | (low_bits & 0x3ff)))));
1505 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1506 unsigned HOST_WIDE_INT, int);
1509 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1510 unsigned HOST_WIDE_INT high_bits,
1511 unsigned HOST_WIDE_INT low_immediate,
1516 if ((high_bits & 0xfffffc00) != 0)
1518 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1519 if ((high_bits & ~0xfffffc00) != 0)
1520 emit_insn (gen_rtx_SET (VOIDmode, op0,
1521 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1527 emit_insn (gen_safe_SET64 (temp, high_bits));
1531 /* Now shift it up into place. */
1532 emit_insn (gen_rtx_SET (VOIDmode, op0,
1533 gen_rtx_ASHIFT (DImode, temp2,
1534 GEN_INT (shift_count))));
1536 /* If there is a low immediate part piece, finish up by
1537 putting that in as well. */
1538 if (low_immediate != 0)
1539 emit_insn (gen_rtx_SET (VOIDmode, op0,
1540 gen_safe_OR64 (op0, low_immediate)));
1543 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1544 unsigned HOST_WIDE_INT);
1546 /* Full 64-bit constant decomposition. Even though this is the
1547 'worst' case, we still optimize a few things away. */
1549 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1550 unsigned HOST_WIDE_INT high_bits,
1551 unsigned HOST_WIDE_INT low_bits)
1555 if (reload_in_progress || reload_completed)
1558 sub_temp = gen_reg_rtx (DImode);
1560 if ((high_bits & 0xfffffc00) != 0)
1562 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1563 if ((high_bits & ~0xfffffc00) != 0)
1564 emit_insn (gen_rtx_SET (VOIDmode,
1566 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1572 emit_insn (gen_safe_SET64 (temp, high_bits));
1576 if (!reload_in_progress && !reload_completed)
1578 rtx temp2 = gen_reg_rtx (DImode);
1579 rtx temp3 = gen_reg_rtx (DImode);
1580 rtx temp4 = gen_reg_rtx (DImode);
1582 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1583 gen_rtx_ASHIFT (DImode, sub_temp,
1586 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1587 if ((low_bits & ~0xfffffc00) != 0)
1589 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1590 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1591 emit_insn (gen_rtx_SET (VOIDmode, op0,
1592 gen_rtx_PLUS (DImode, temp4, temp3)));
1596 emit_insn (gen_rtx_SET (VOIDmode, op0,
1597 gen_rtx_PLUS (DImode, temp4, temp2)));
1602 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1603 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1604 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1607 /* We are in the middle of reload, so this is really
1608 painful. However we do still make an attempt to
1609 avoid emitting truly stupid code. */
1610 if (low1 != const0_rtx)
1612 emit_insn (gen_rtx_SET (VOIDmode, op0,
1613 gen_rtx_ASHIFT (DImode, sub_temp,
1614 GEN_INT (to_shift))));
1615 emit_insn (gen_rtx_SET (VOIDmode, op0,
1616 gen_rtx_IOR (DImode, op0, low1)));
1624 if (low2 != const0_rtx)
1626 emit_insn (gen_rtx_SET (VOIDmode, op0,
1627 gen_rtx_ASHIFT (DImode, sub_temp,
1628 GEN_INT (to_shift))));
1629 emit_insn (gen_rtx_SET (VOIDmode, op0,
1630 gen_rtx_IOR (DImode, op0, low2)));
1638 emit_insn (gen_rtx_SET (VOIDmode, op0,
1639 gen_rtx_ASHIFT (DImode, sub_temp,
1640 GEN_INT (to_shift))));
1641 if (low3 != const0_rtx)
1642 emit_insn (gen_rtx_SET (VOIDmode, op0,
1643 gen_rtx_IOR (DImode, op0, low3)));
1648 /* Analyze a 64-bit constant for certain properties. */
1649 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1650 unsigned HOST_WIDE_INT,
1651 int *, int *, int *);
1654 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1655 unsigned HOST_WIDE_INT low_bits,
1656 int *hbsp, int *lbsp, int *abbasp)
1658 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1661 lowest_bit_set = highest_bit_set = -1;
1665 if ((lowest_bit_set == -1)
1666 && ((low_bits >> i) & 1))
1668 if ((highest_bit_set == -1)
1669 && ((high_bits >> (32 - i - 1)) & 1))
1670 highest_bit_set = (64 - i - 1);
1673 && ((highest_bit_set == -1)
1674 || (lowest_bit_set == -1)));
1680 if ((lowest_bit_set == -1)
1681 && ((high_bits >> i) & 1))
1682 lowest_bit_set = i + 32;
1683 if ((highest_bit_set == -1)
1684 && ((low_bits >> (32 - i - 1)) & 1))
1685 highest_bit_set = 32 - i - 1;
1688 && ((highest_bit_set == -1)
1689 || (lowest_bit_set == -1)));
1691 /* If there are no bits set this should have gone out
1692 as one instruction! */
1693 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1694 all_bits_between_are_set = 1;
1695 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1699 if ((low_bits & (1 << i)) != 0)
1704 if ((high_bits & (1 << (i - 32))) != 0)
1707 all_bits_between_are_set = 0;
1710 *hbsp = highest_bit_set;
1711 *lbsp = lowest_bit_set;
1712 *abbasp = all_bits_between_are_set;
1715 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1718 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1719 unsigned HOST_WIDE_INT low_bits)
1721 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1724 || high_bits == 0xffffffff)
1727 analyze_64bit_constant (high_bits, low_bits,
1728 &highest_bit_set, &lowest_bit_set,
1729 &all_bits_between_are_set);
1731 if ((highest_bit_set == 63
1732 || lowest_bit_set == 0)
1733 && all_bits_between_are_set != 0)
1736 if ((highest_bit_set - lowest_bit_set) < 21)
1742 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1743 unsigned HOST_WIDE_INT,
1746 static unsigned HOST_WIDE_INT
1747 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1748 unsigned HOST_WIDE_INT low_bits,
1749 int lowest_bit_set, int shift)
1751 HOST_WIDE_INT hi, lo;
1753 if (lowest_bit_set < 32)
1755 lo = (low_bits >> lowest_bit_set) << shift;
1756 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1761 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1763 gcc_assert (! (hi & lo));
1767 /* Here we are sure to be arch64 and this is an integer constant
1768 being loaded into a register. Emit the most efficient
1769 insn sequence possible. Detection of all the 1-insn cases
1770 has been done already. */
1772 sparc_emit_set_const64 (rtx op0, rtx op1)
1774 unsigned HOST_WIDE_INT high_bits, low_bits;
1775 int lowest_bit_set, highest_bit_set;
1776 int all_bits_between_are_set;
1779 /* Sanity check that we know what we are working with. */
1780 gcc_assert (TARGET_ARCH64
1781 && (GET_CODE (op0) == SUBREG
1782 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1784 if (reload_in_progress || reload_completed)
1787 if (GET_CODE (op1) != CONST_INT)
1789 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1794 temp = gen_reg_rtx (DImode);
1796 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1797 low_bits = (INTVAL (op1) & 0xffffffff);
1799 /* low_bits bits 0 --> 31
1800 high_bits bits 32 --> 63 */
1802 analyze_64bit_constant (high_bits, low_bits,
1803 &highest_bit_set, &lowest_bit_set,
1804 &all_bits_between_are_set);
1806 /* First try for a 2-insn sequence. */
1808 /* These situations are preferred because the optimizer can
1809 * do more things with them:
1811 * sllx %reg, shift, %reg
1813 * srlx %reg, shift, %reg
1814 * 3) mov some_small_const, %reg
1815 * sllx %reg, shift, %reg
1817 if (((highest_bit_set == 63
1818 || lowest_bit_set == 0)
1819 && all_bits_between_are_set != 0)
1820 || ((highest_bit_set - lowest_bit_set) < 12))
1822 HOST_WIDE_INT the_const = -1;
1823 int shift = lowest_bit_set;
1825 if ((highest_bit_set != 63
1826 && lowest_bit_set != 0)
1827 || all_bits_between_are_set == 0)
1830 create_simple_focus_bits (high_bits, low_bits,
1833 else if (lowest_bit_set == 0)
1834 shift = -(63 - highest_bit_set);
1836 gcc_assert (SPARC_SIMM13_P (the_const));
1837 gcc_assert (shift != 0);
1839 emit_insn (gen_safe_SET64 (temp, the_const));
1841 emit_insn (gen_rtx_SET (VOIDmode,
1843 gen_rtx_ASHIFT (DImode,
1847 emit_insn (gen_rtx_SET (VOIDmode,
1849 gen_rtx_LSHIFTRT (DImode,
1851 GEN_INT (-shift))));
1855 /* Now a range of 22 or less bits set somewhere.
1856 * 1) sethi %hi(focus_bits), %reg
1857 * sllx %reg, shift, %reg
1858 * 2) sethi %hi(focus_bits), %reg
1859 * srlx %reg, shift, %reg
1861 if ((highest_bit_set - lowest_bit_set) < 21)
1863 unsigned HOST_WIDE_INT focus_bits =
1864 create_simple_focus_bits (high_bits, low_bits,
1865 lowest_bit_set, 10);
1867 gcc_assert (SPARC_SETHI_P (focus_bits));
1868 gcc_assert (lowest_bit_set != 10);
1870 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1872 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1873 if (lowest_bit_set < 10)
1874 emit_insn (gen_rtx_SET (VOIDmode,
1876 gen_rtx_LSHIFTRT (DImode, temp,
1877 GEN_INT (10 - lowest_bit_set))));
1878 else if (lowest_bit_set > 10)
1879 emit_insn (gen_rtx_SET (VOIDmode,
1881 gen_rtx_ASHIFT (DImode, temp,
1882 GEN_INT (lowest_bit_set - 10))));
1886 /* 1) sethi %hi(low_bits), %reg
1887 * or %reg, %lo(low_bits), %reg
1888 * 2) sethi %hi(~low_bits), %reg
1889 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1892 || high_bits == 0xffffffff)
1894 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1895 (high_bits == 0xffffffff));
1899 /* Now, try 3-insn sequences. */
1901 /* 1) sethi %hi(high_bits), %reg
1902 * or %reg, %lo(high_bits), %reg
1903 * sllx %reg, 32, %reg
1907 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1911 /* We may be able to do something quick
1912 when the constant is negated, so try that. */
1913 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1914 (~low_bits) & 0xfffffc00))
1916 /* NOTE: The trailing bits get XOR'd so we need the
1917 non-negated bits, not the negated ones. */
1918 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1920 if ((((~high_bits) & 0xffffffff) == 0
1921 && ((~low_bits) & 0x80000000) == 0)
1922 || (((~high_bits) & 0xffffffff) == 0xffffffff
1923 && ((~low_bits) & 0x80000000) != 0))
1925 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1927 if ((SPARC_SETHI_P (fast_int)
1928 && (~high_bits & 0xffffffff) == 0)
1929 || SPARC_SIMM13_P (fast_int))
1930 emit_insn (gen_safe_SET64 (temp, fast_int));
1932 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1937 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1938 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1939 sparc_emit_set_const64 (temp, negated_const);
1942 /* If we are XOR'ing with -1, then we should emit a one's complement
1943 instead. This way the combiner will notice logical operations
1944 such as ANDN later on and substitute. */
1945 if (trailing_bits == 0x3ff)
1947 emit_insn (gen_rtx_SET (VOIDmode, op0,
1948 gen_rtx_NOT (DImode, temp)));
1952 emit_insn (gen_rtx_SET (VOIDmode,
1954 gen_safe_XOR64 (temp,
1955 (-0x400 | trailing_bits))));
1960 /* 1) sethi %hi(xxx), %reg
1961 * or %reg, %lo(xxx), %reg
1962 * sllx %reg, yyy, %reg
1964 * ??? This is just a generalized version of the low_bits==0
1965 * thing above, FIXME...
1967 if ((highest_bit_set - lowest_bit_set) < 32)
1969 unsigned HOST_WIDE_INT focus_bits =
1970 create_simple_focus_bits (high_bits, low_bits,
1973 /* We can't get here in this state. */
1974 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1976 /* So what we know is that the set bits straddle the
1977 middle of the 64-bit word. */
1978 sparc_emit_set_const64_quick2 (op0, temp,
1984 /* 1) sethi %hi(high_bits), %reg
1985 * or %reg, %lo(high_bits), %reg
1986 * sllx %reg, 32, %reg
1987 * or %reg, low_bits, %reg
1989 if (SPARC_SIMM13_P(low_bits)
1990 && ((int)low_bits > 0))
1992 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1996 /* The easiest way when all else fails, is full decomposition. */
1997 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1999 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
2001 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2002 return the mode to be used for the comparison. For floating-point,
2003 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
2004 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2005 processing is needed. */
2008 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
2010 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2036 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2037 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2039 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2040 return CCX_NOOVmode;
2046 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2053 /* Emit the compare insn and return the CC reg for a CODE comparison
2054 with operands X and Y. */
2057 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2059 enum machine_mode mode;
2062 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2065 mode = SELECT_CC_MODE (code, x, y);
2067 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2068 fcc regs (cse can't tell they're really call clobbered regs and will
2069 remove a duplicate comparison even if there is an intervening function
2070 call - it will then try to reload the cc reg via an int reg which is why
2071 we need the movcc patterns). It is possible to provide the movcc
2072 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2073 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2074 to tell cse that CCFPE mode registers (even pseudos) are call
2077 /* ??? This is an experiment. Rather than making changes to cse which may
2078 or may not be easy/clean, we do our own cse. This is possible because
2079 we will generate hard registers. Cse knows they're call clobbered (it
2080 doesn't know the same thing about pseudos). If we guess wrong, no big
2081 deal, but if we win, great! */
2083 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2084 #if 1 /* experiment */
2087 /* We cycle through the registers to ensure they're all exercised. */
2088 static int next_fcc_reg = 0;
2089 /* Previous x,y for each fcc reg. */
2090 static rtx prev_args[4][2];
2092 /* Scan prev_args for x,y. */
2093 for (reg = 0; reg < 4; reg++)
2094 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2099 prev_args[reg][0] = x;
2100 prev_args[reg][1] = y;
2101 next_fcc_reg = (next_fcc_reg + 1) & 3;
2103 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2106 cc_reg = gen_reg_rtx (mode);
2107 #endif /* ! experiment */
2108 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2109 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2111 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2113 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2114 will only result in an unrecognizable insn so no point in asserting. */
2115 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2121 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2124 gen_compare_reg (rtx cmp)
2126 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2129 /* This function is used for v9 only.
2130 DEST is the target of the Scc insn.
2131 CODE is the code for an Scc's comparison.
2132 X and Y are the values we compare.
2134 This function is needed to turn
2137 (gt (reg:CCX 100 %icc)
2141 (gt:DI (reg:CCX 100 %icc)
2144 IE: The instruction recognizer needs to see the mode of the comparison to
2145 find the right instruction. We could use "gt:DI" right in the
2146 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2149 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2152 && (GET_MODE (x) == DImode
2153 || GET_MODE (dest) == DImode))
2156 /* Try to use the movrCC insns. */
2158 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2160 && v9_regcmp_p (compare_code))
2165 /* Special case for op0 != 0. This can be done with one instruction if
2168 if (compare_code == NE
2169 && GET_MODE (dest) == DImode
2170 && rtx_equal_p (op0, dest))
2172 emit_insn (gen_rtx_SET (VOIDmode, dest,
2173 gen_rtx_IF_THEN_ELSE (DImode,
2174 gen_rtx_fmt_ee (compare_code, DImode,
2181 if (reg_overlap_mentioned_p (dest, op0))
2183 /* Handle the case where dest == x.
2184 We "early clobber" the result. */
2185 op0 = gen_reg_rtx (GET_MODE (x));
2186 emit_move_insn (op0, x);
2189 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2190 if (GET_MODE (op0) != DImode)
2192 temp = gen_reg_rtx (DImode);
2193 convert_move (temp, op0, 0);
2197 emit_insn (gen_rtx_SET (VOIDmode, dest,
2198 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2199 gen_rtx_fmt_ee (compare_code, DImode,
2207 x = gen_compare_reg_1 (compare_code, x, y);
2210 gcc_assert (GET_MODE (x) != CC_NOOVmode
2211 && GET_MODE (x) != CCX_NOOVmode);
2213 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2214 emit_insn (gen_rtx_SET (VOIDmode, dest,
2215 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2216 gen_rtx_fmt_ee (compare_code,
2217 GET_MODE (x), x, y),
2218 const1_rtx, dest)));
2224 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2225 without jumps using the addx/subx instructions. */
2228 emit_scc_insn (rtx operands[])
2235 /* The quad-word fp compare library routines all return nonzero to indicate
2236 true, which is different from the equivalent libgcc routines, so we must
2237 handle them specially here. */
2238 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2240 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2241 GET_CODE (operands[1]));
2242 operands[2] = XEXP (operands[1], 0);
2243 operands[3] = XEXP (operands[1], 1);
2246 code = GET_CODE (operands[1]);
2250 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2251 more applications). The exception to this is "reg != 0" which can
2252 be done in one instruction on v9 (so we do it). */
2255 if (GET_MODE (x) == SImode)
2257 rtx pat = gen_seqsi_special (operands[0], x, y);
2261 else if (GET_MODE (x) == DImode)
2263 rtx pat = gen_seqdi_special (operands[0], x, y);
2271 if (GET_MODE (x) == SImode)
2273 rtx pat = gen_snesi_special (operands[0], x, y);
2277 else if (GET_MODE (x) == DImode)
2279 rtx pat = gen_snedi_special (operands[0], x, y);
2285 /* For the rest, on v9 we can use conditional moves. */
2289 if (gen_v9_scc (operands[0], code, x, y))
2293 /* We can do LTU and GEU using the addx/subx instructions too. And
2294 for GTU/LEU, if both operands are registers swap them and fall
2295 back to the easy case. */
2296 if (code == GTU || code == LEU)
2298 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2299 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2304 code = swap_condition (code);
2308 if (code == LTU || code == GEU)
2310 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2311 gen_rtx_fmt_ee (code, SImode,
2312 gen_compare_reg_1 (code, x, y),
2317 /* Nope, do branches. */
2321 /* Emit a conditional jump insn for the v9 architecture using comparison code
2322 CODE and jump target LABEL.
2323 This function exists to take advantage of the v9 brxx insns. */
2326 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2328 emit_jump_insn (gen_rtx_SET (VOIDmode,
2330 gen_rtx_IF_THEN_ELSE (VOIDmode,
2331 gen_rtx_fmt_ee (code, GET_MODE (op0),
2333 gen_rtx_LABEL_REF (VOIDmode, label),
2338 emit_conditional_branch_insn (rtx operands[])
2340 /* The quad-word fp compare library routines all return nonzero to indicate
2341 true, which is different from the equivalent libgcc routines, so we must
2342 handle them specially here. */
2343 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2345 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2346 GET_CODE (operands[0]));
2347 operands[1] = XEXP (operands[0], 0);
2348 operands[2] = XEXP (operands[0], 1);
2351 if (TARGET_ARCH64 && operands[2] == const0_rtx
2352 && GET_CODE (operands[1]) == REG
2353 && GET_MODE (operands[1]) == DImode)
2355 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2359 operands[1] = gen_compare_reg (operands[0]);
2360 operands[2] = const0_rtx;
2361 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2362 operands[1], operands[2]);
2363 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2368 /* Generate a DFmode part of a hard TFmode register.
2369 REG is the TFmode hard register, LOW is 1 for the
2370 low 64bit of the register and 0 otherwise.
2373 gen_df_reg (rtx reg, int low)
2375 int regno = REGNO (reg);
2377 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2378 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2379 return gen_rtx_REG (DFmode, regno);
2382 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2383 Unlike normal calls, TFmode operands are passed by reference. It is
2384 assumed that no more than 3 operands are required. */
2387 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2389 rtx ret_slot = NULL, arg[3], func_sym;
2392 /* We only expect to be called for conversions, unary, and binary ops. */
2393 gcc_assert (nargs == 2 || nargs == 3);
2395 for (i = 0; i < nargs; ++i)
2397 rtx this_arg = operands[i];
2400 /* TFmode arguments and return values are passed by reference. */
2401 if (GET_MODE (this_arg) == TFmode)
2403 int force_stack_temp;
2405 force_stack_temp = 0;
2406 if (TARGET_BUGGY_QP_LIB && i == 0)
2407 force_stack_temp = 1;
2409 if (GET_CODE (this_arg) == MEM
2410 && ! force_stack_temp)
2411 this_arg = XEXP (this_arg, 0);
2412 else if (CONSTANT_P (this_arg)
2413 && ! force_stack_temp)
2415 this_slot = force_const_mem (TFmode, this_arg);
2416 this_arg = XEXP (this_slot, 0);
2420 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2422 /* Operand 0 is the return value. We'll copy it out later. */
2424 emit_move_insn (this_slot, this_arg);
2426 ret_slot = this_slot;
2428 this_arg = XEXP (this_slot, 0);
2435 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2437 if (GET_MODE (operands[0]) == TFmode)
2440 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2441 arg[0], GET_MODE (arg[0]),
2442 arg[1], GET_MODE (arg[1]));
2444 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2445 arg[0], GET_MODE (arg[0]),
2446 arg[1], GET_MODE (arg[1]),
2447 arg[2], GET_MODE (arg[2]));
2450 emit_move_insn (operands[0], ret_slot);
2456 gcc_assert (nargs == 2);
2458 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2459 GET_MODE (operands[0]), 1,
2460 arg[1], GET_MODE (arg[1]));
2462 if (ret != operands[0])
2463 emit_move_insn (operands[0], ret);
2467 /* Expand soft-float TFmode calls to sparc abi routines. */
2470 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2492 emit_soft_tfmode_libcall (func, 3, operands);
2496 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2500 gcc_assert (code == SQRT);
2503 emit_soft_tfmode_libcall (func, 2, operands);
2507 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2514 switch (GET_MODE (operands[1]))
2527 case FLOAT_TRUNCATE:
2528 switch (GET_MODE (operands[0]))
2542 switch (GET_MODE (operands[1]))
2547 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2557 case UNSIGNED_FLOAT:
2558 switch (GET_MODE (operands[1]))
2563 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2574 switch (GET_MODE (operands[0]))
2588 switch (GET_MODE (operands[0]))
2605 emit_soft_tfmode_libcall (func, 2, operands);
2608 /* Expand a hard-float tfmode operation. All arguments must be in
2612 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2616 if (GET_RTX_CLASS (code) == RTX_UNARY)
2618 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2619 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2623 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2624 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2625 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2626 operands[1], operands[2]);
2629 if (register_operand (operands[0], VOIDmode))
2632 dest = gen_reg_rtx (GET_MODE (operands[0]));
2634 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2636 if (dest != operands[0])
2637 emit_move_insn (operands[0], dest);
2641 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2643 if (TARGET_HARD_QUAD)
2644 emit_hard_tfmode_operation (code, operands);
2646 emit_soft_tfmode_binop (code, operands);
2650 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2652 if (TARGET_HARD_QUAD)
2653 emit_hard_tfmode_operation (code, operands);
2655 emit_soft_tfmode_unop (code, operands);
2659 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2661 if (TARGET_HARD_QUAD)
2662 emit_hard_tfmode_operation (code, operands);
2664 emit_soft_tfmode_cvt (code, operands);
2667 /* Return nonzero if a branch/jump/call instruction will be emitting
2668 nop into its delay slot. */
2671 empty_delay_slot (rtx insn)
2675 /* If no previous instruction (should not happen), return true. */
2676 if (PREV_INSN (insn) == NULL)
2679 seq = NEXT_INSN (PREV_INSN (insn));
2680 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2686 /* Return nonzero if TRIAL can go into the call delay slot. */
2689 tls_call_delay (rtx trial)
2694 call __tls_get_addr, %tgd_call (foo)
2695 add %l7, %o0, %o0, %tgd_add (foo)
2696 while Sun as/ld does not. */
2697 if (TARGET_GNU_TLS || !TARGET_TLS)
2700 pat = PATTERN (trial);
2702 /* We must reject tgd_add{32|64}, i.e.
2703 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2704 and tldm_add{32|64}, i.e.
2705 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2707 if (GET_CODE (pat) == SET
2708 && GET_CODE (SET_SRC (pat)) == PLUS)
2710 rtx unspec = XEXP (SET_SRC (pat), 1);
2712 if (GET_CODE (unspec) == UNSPEC
2713 && (XINT (unspec, 1) == UNSPEC_TLSGD
2714 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2721 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2722 instruction. RETURN_P is true if the v9 variant 'return' is to be
2723 considered in the test too.
2725 TRIAL must be a SET whose destination is a REG appropriate for the
2726 'restore' instruction or, if RETURN_P is true, for the 'return'
2730 eligible_for_restore_insn (rtx trial, bool return_p)
2732 rtx pat = PATTERN (trial);
2733 rtx src = SET_SRC (pat);
2735 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2736 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2737 && arith_operand (src, GET_MODE (src)))
2740 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2742 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2745 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2746 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2747 && arith_double_operand (src, GET_MODE (src)))
2748 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2750 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2751 else if (! TARGET_FPU && register_operand (src, SFmode))
2754 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2755 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2758 /* If we have the 'return' instruction, anything that does not use
2759 local or output registers and can go into a delay slot wins. */
2760 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2761 && (get_attr_in_uncond_branch_delay (trial)
2762 == IN_UNCOND_BRANCH_DELAY_TRUE))
2765 /* The 'restore src1,src2,dest' pattern for SImode. */
2766 else if (GET_CODE (src) == PLUS
2767 && register_operand (XEXP (src, 0), SImode)
2768 && arith_operand (XEXP (src, 1), SImode))
2771 /* The 'restore src1,src2,dest' pattern for DImode. */
2772 else if (GET_CODE (src) == PLUS
2773 && register_operand (XEXP (src, 0), DImode)
2774 && arith_double_operand (XEXP (src, 1), DImode))
2777 /* The 'restore src1,%lo(src2),dest' pattern. */
2778 else if (GET_CODE (src) == LO_SUM
2779 && ! TARGET_CM_MEDMID
2780 && ((register_operand (XEXP (src, 0), SImode)
2781 && immediate_operand (XEXP (src, 1), SImode))
2783 && register_operand (XEXP (src, 0), DImode)
2784 && immediate_operand (XEXP (src, 1), DImode))))
2787 /* The 'restore src,src,dest' pattern. */
2788 else if (GET_CODE (src) == ASHIFT
2789 && (register_operand (XEXP (src, 0), SImode)
2790 || register_operand (XEXP (src, 0), DImode))
2791 && XEXP (src, 1) == const1_rtx)
2797 /* Return nonzero if TRIAL can go into the function return's
2801 eligible_for_return_delay (rtx trial)
2805 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2808 if (get_attr_length (trial) != 1)
2811 /* If there are any call-saved registers, we should scan TRIAL if it
2812 does not reference them. For now just make it easy. */
2816 /* If the function uses __builtin_eh_return, the eh_return machinery
2817 occupies the delay slot. */
2818 if (crtl->calls_eh_return)
2821 /* In the case of a true leaf function, anything can go into the slot. */
2822 if (sparc_leaf_function_p)
2823 return get_attr_in_uncond_branch_delay (trial)
2824 == IN_UNCOND_BRANCH_DELAY_TRUE;
2826 pat = PATTERN (trial);
2828 /* Otherwise, only operations which can be done in tandem with
2829 a `restore' or `return' insn can go into the delay slot. */
2830 if (GET_CODE (SET_DEST (pat)) != REG
2831 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2834 /* If this instruction sets up floating point register and we have a return
2835 instruction, it can probably go in. But restore will not work
2837 if (REGNO (SET_DEST (pat)) >= 32)
2839 && ! epilogue_renumber (&pat, 1)
2840 && (get_attr_in_uncond_branch_delay (trial)
2841 == IN_UNCOND_BRANCH_DELAY_TRUE));
2843 return eligible_for_restore_insn (trial, true);
2846 /* Return nonzero if TRIAL can go into the sibling call's
2850 eligible_for_sibcall_delay (rtx trial)
2854 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2857 if (get_attr_length (trial) != 1)
2860 pat = PATTERN (trial);
2862 if (sparc_leaf_function_p)
2864 /* If the tail call is done using the call instruction,
2865 we have to restore %o7 in the delay slot. */
2866 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2869 /* %g1 is used to build the function address */
2870 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2876 /* Otherwise, only operations which can be done in tandem with
2877 a `restore' insn can go into the delay slot. */
2878 if (GET_CODE (SET_DEST (pat)) != REG
2879 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2880 || REGNO (SET_DEST (pat)) >= 32)
2883 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2885 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2888 return eligible_for_restore_insn (trial, false);
2892 short_branch (int uid1, int uid2)
2894 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2896 /* Leave a few words of "slop". */
2897 if (delta >= -1023 && delta <= 1022)
2903 /* Return nonzero if REG is not used after INSN.
2904 We assume REG is a reload reg, and therefore does
2905 not live past labels or calls or jumps. */
2907 reg_unused_after (rtx reg, rtx insn)
2909 enum rtx_code code, prev_code = UNKNOWN;
2911 while ((insn = NEXT_INSN (insn)))
2913 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2916 code = GET_CODE (insn);
2917 if (GET_CODE (insn) == CODE_LABEL)
2922 rtx set = single_set (insn);
2923 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2926 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2928 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2936 /* Determine if it's legal to put X into the constant pool. This
2937 is not possible if X contains the address of a symbol that is
2938 not constant (TLS) or not known at final link time (PIC). */
2941 sparc_cannot_force_const_mem (enum machine_mode mode, rtx x)
2943 switch (GET_CODE (x))
2948 /* Accept all non-symbolic constants. */
2952 /* Labels are OK iff we are non-PIC. */
2953 return flag_pic != 0;
2956 /* 'Naked' TLS symbol references are never OK,
2957 non-TLS symbols are OK iff we are non-PIC. */
2958 if (SYMBOL_REF_TLS_MODEL (x))
2961 return flag_pic != 0;
2964 return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
2967 return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
2968 || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
2976 /* Global Offset Table support. */
2977 static GTY(()) rtx got_helper_rtx = NULL_RTX;
2978 static GTY(()) rtx global_offset_table_rtx = NULL_RTX;
2980 /* Return the SYMBOL_REF for the Global Offset Table. */
2982 static GTY(()) rtx sparc_got_symbol = NULL_RTX;
2987 if (!sparc_got_symbol)
2988 sparc_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
2990 return sparc_got_symbol;
2993 /* Ensure that we are not using patterns that are not OK with PIC. */
3003 op = recog_data.operand[i];
3004 gcc_assert (GET_CODE (op) != SYMBOL_REF
3005 && (GET_CODE (op) != CONST
3006 || (GET_CODE (XEXP (op, 0)) == MINUS
3007 && XEXP (XEXP (op, 0), 0) == sparc_got ()
3008 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
3015 /* Return true if X is an address which needs a temporary register when
3016 reloaded while generating PIC code. */
3019 pic_address_needs_scratch (rtx x)
3021 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
3022 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
3023 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
3024 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3025 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
3031 /* Determine if a given RTX is a valid constant. We already know this
3032 satisfies CONSTANT_P. */
3035 sparc_legitimate_constant_p (enum machine_mode mode, rtx x)
3037 switch (GET_CODE (x))
3041 if (sparc_tls_referenced_p (x))
3046 if (GET_MODE (x) == VOIDmode)
3049 /* Floating point constants are generally not ok.
3050 The only exception is 0.0 in VIS. */
3052 && SCALAR_FLOAT_MODE_P (mode)
3053 && const_zero_operand (x, mode))
3059 /* Vector constants are generally not ok.
3060 The only exception is 0 in VIS. */
3062 && const_zero_operand (x, mode))
3074 /* Determine if a given RTX is a valid constant address. */
3077 constant_address_p (rtx x)
3079 switch (GET_CODE (x))
3087 if (flag_pic && pic_address_needs_scratch (x))
3089 return sparc_legitimate_constant_p (Pmode, x);
3092 return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
3099 /* Nonzero if the constant value X is a legitimate general operand
3100 when generating PIC code. It is given that flag_pic is on and
3101 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3104 legitimate_pic_operand_p (rtx x)
3106 if (pic_address_needs_scratch (x))
3108 if (sparc_tls_referenced_p (x))
3113 #define RTX_OK_FOR_OFFSET_P(X) \
3114 (CONST_INT_P (X) && INTVAL (X) >= -0x1000 && INTVAL (X) < 0x1000 - 8)
3116 #define RTX_OK_FOR_OLO10_P(X) \
3117 (CONST_INT_P (X) && INTVAL (X) >= -0x1000 && INTVAL (X) < 0xc00 - 8)
3119 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
3121 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
3122 ordinarily. This changes a bit when generating PIC. */
3125 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3127 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3129 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3131 else if (GET_CODE (addr) == PLUS)
3133 rs1 = XEXP (addr, 0);
3134 rs2 = XEXP (addr, 1);
3136 /* Canonicalize. REG comes first, if there are no regs,
3137 LO_SUM comes first. */
3139 && GET_CODE (rs1) != SUBREG
3141 || GET_CODE (rs2) == SUBREG
3142 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3144 rs1 = XEXP (addr, 1);
3145 rs2 = XEXP (addr, 0);
3149 && rs1 == pic_offset_table_rtx
3151 && GET_CODE (rs2) != SUBREG
3152 && GET_CODE (rs2) != LO_SUM
3153 && GET_CODE (rs2) != MEM
3154 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
3155 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3156 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3158 || GET_CODE (rs1) == SUBREG)
3159 && RTX_OK_FOR_OFFSET_P (rs2)))
3164 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3165 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3167 /* We prohibit REG + REG for TFmode when there are no quad move insns
3168 and we consequently need to split. We do this because REG+REG
3169 is not an offsettable address. If we get the situation in reload
3170 where source and destination of a movtf pattern are both MEMs with
3171 REG+REG address, then only one of them gets converted to an
3172 offsettable address. */
3174 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3177 /* We prohibit REG + REG on ARCH32 if not optimizing for
3178 DFmode/DImode because then mem_min_alignment is likely to be zero
3179 after reload and the forced split would lack a matching splitter
3181 if (TARGET_ARCH32 && !optimize
3182 && (mode == DFmode || mode == DImode))
3185 else if (USE_AS_OFFSETABLE_LO10
3186 && GET_CODE (rs1) == LO_SUM
3188 && ! TARGET_CM_MEDMID
3189 && RTX_OK_FOR_OLO10_P (rs2))
3192 imm1 = XEXP (rs1, 1);
3193 rs1 = XEXP (rs1, 0);
3194 if (!CONSTANT_P (imm1)
3195 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3199 else if (GET_CODE (addr) == LO_SUM)
3201 rs1 = XEXP (addr, 0);
3202 imm1 = XEXP (addr, 1);
3204 if (!CONSTANT_P (imm1)
3205 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3208 /* We can't allow TFmode in 32-bit mode, because an offset greater
3209 than the alignment (8) may cause the LO_SUM to overflow. */
3210 if (mode == TFmode && TARGET_ARCH32)
3213 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3218 if (GET_CODE (rs1) == SUBREG)
3219 rs1 = SUBREG_REG (rs1);
3225 if (GET_CODE (rs2) == SUBREG)
3226 rs2 = SUBREG_REG (rs2);
3233 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3234 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3239 if ((REGNO (rs1) >= 32
3240 && REGNO (rs1) != FRAME_POINTER_REGNUM
3241 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3243 && (REGNO (rs2) >= 32
3244 && REGNO (rs2) != FRAME_POINTER_REGNUM
3245 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3251 /* Return the SYMBOL_REF for the tls_get_addr function. */
3253 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
3256 sparc_tls_get_addr (void)
3258 if (!sparc_tls_symbol)
3259 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3261 return sparc_tls_symbol;
3264 /* Return the Global Offset Table to be used in TLS mode. */
3267 sparc_tls_got (void)
3269 /* In PIC mode, this is just the PIC offset table. */
3272 crtl->uses_pic_offset_table = 1;
3273 return pic_offset_table_rtx;
3276 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
3277 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
3278 if (TARGET_SUN_TLS && TARGET_ARCH32)
3280 load_got_register ();
3281 return global_offset_table_rtx;
3284 /* In all other cases, we load a new pseudo with the GOT symbol. */
3285 return copy_to_reg (sparc_got ());
3288 /* Return true if X contains a thread-local symbol. */
3291 sparc_tls_referenced_p (rtx x)
3293 if (!TARGET_HAVE_TLS)
3296 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3297 x = XEXP (XEXP (x, 0), 0);
3299 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
3302 /* That's all we handle in sparc_legitimize_tls_address for now. */
3306 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3307 this (thread-local) address. */
3310 sparc_legitimize_tls_address (rtx addr)
3312 rtx temp1, temp2, temp3, ret, o0, got, insn;
3314 gcc_assert (can_create_pseudo_p ());
3316 if (GET_CODE (addr) == SYMBOL_REF)
3317 switch (SYMBOL_REF_TLS_MODEL (addr))
3319 case TLS_MODEL_GLOBAL_DYNAMIC:
3321 temp1 = gen_reg_rtx (SImode);
3322 temp2 = gen_reg_rtx (SImode);
3323 ret = gen_reg_rtx (Pmode);
3324 o0 = gen_rtx_REG (Pmode, 8);
3325 got = sparc_tls_got ();
3326 emit_insn (gen_tgd_hi22 (temp1, addr));
3327 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3330 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3331 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3336 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3337 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3340 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3341 insn = get_insns ();
3343 emit_libcall_block (insn, ret, o0, addr);
3346 case TLS_MODEL_LOCAL_DYNAMIC:
3348 temp1 = gen_reg_rtx (SImode);
3349 temp2 = gen_reg_rtx (SImode);
3350 temp3 = gen_reg_rtx (Pmode);
3351 ret = gen_reg_rtx (Pmode);
3352 o0 = gen_rtx_REG (Pmode, 8);
3353 got = sparc_tls_got ();
3354 emit_insn (gen_tldm_hi22 (temp1));
3355 emit_insn (gen_tldm_lo10 (temp2, temp1));
3358 emit_insn (gen_tldm_add32 (o0, got, temp2));
3359 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3364 emit_insn (gen_tldm_add64 (o0, got, temp2));
3365 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3368 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3369 insn = get_insns ();
3371 emit_libcall_block (insn, temp3, o0,
3372 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3373 UNSPEC_TLSLD_BASE));
3374 temp1 = gen_reg_rtx (SImode);
3375 temp2 = gen_reg_rtx (SImode);
3376 emit_insn (gen_tldo_hix22 (temp1, addr));
3377 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3379 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3381 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3384 case TLS_MODEL_INITIAL_EXEC:
3385 temp1 = gen_reg_rtx (SImode);
3386 temp2 = gen_reg_rtx (SImode);
3387 temp3 = gen_reg_rtx (Pmode);
3388 got = sparc_tls_got ();
3389 emit_insn (gen_tie_hi22 (temp1, addr));
3390 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3392 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3394 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3397 ret = gen_reg_rtx (Pmode);
3399 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3402 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3406 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3409 case TLS_MODEL_LOCAL_EXEC:
3410 temp1 = gen_reg_rtx (Pmode);
3411 temp2 = gen_reg_rtx (Pmode);
3414 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3415 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3419 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3420 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3422 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3429 else if (GET_CODE (addr) == CONST)
3433 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
3435 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
3436 offset = XEXP (XEXP (addr, 0), 1);
3438 base = force_operand (base, NULL_RTX);
3439 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
3440 offset = force_reg (Pmode, offset);
3441 ret = gen_rtx_PLUS (Pmode, base, offset);
3445 gcc_unreachable (); /* for now ... */
3450 /* Legitimize PIC addresses. If the address is already position-independent,
3451 we return ORIG. Newly generated position-independent addresses go into a
3452 reg. This is REG if nonzero, otherwise we allocate register(s) as
3456 sparc_legitimize_pic_address (rtx orig, rtx reg)
3458 bool gotdata_op = false;
3460 if (GET_CODE (orig) == SYMBOL_REF
3461 /* See the comment in sparc_expand_move. */
3462 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
3464 rtx pic_ref, address;
3469 gcc_assert (! reload_in_progress && ! reload_completed);
3470 reg = gen_reg_rtx (Pmode);
3475 /* If not during reload, allocate another temp reg here for loading
3476 in the address, so that these instructions can be optimized
3478 rtx temp_reg = ((reload_in_progress || reload_completed)
3479 ? reg : gen_reg_rtx (Pmode));
3481 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3482 won't get confused into thinking that these two instructions
3483 are loading in the true address of the symbol. If in the
3484 future a PIC rtx exists, that should be used instead. */
3487 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3488 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3492 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3493 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3501 crtl->uses_pic_offset_table = 1;
3505 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
3506 pic_offset_table_rtx,
3509 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
3510 pic_offset_table_rtx,
3516 = gen_const_mem (Pmode,
3517 gen_rtx_PLUS (Pmode,
3518 pic_offset_table_rtx, address));
3519 insn = emit_move_insn (reg, pic_ref);
3522 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3524 set_unique_reg_note (insn, REG_EQUAL, orig);
3527 else if (GET_CODE (orig) == CONST)
3531 if (GET_CODE (XEXP (orig, 0)) == PLUS
3532 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3537 gcc_assert (! reload_in_progress && ! reload_completed);
3538 reg = gen_reg_rtx (Pmode);
3541 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3542 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
3543 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
3544 base == reg ? NULL_RTX : reg);
3546 if (GET_CODE (offset) == CONST_INT)
3548 if (SMALL_INT (offset))
3549 return plus_constant (base, INTVAL (offset));
3550 else if (! reload_in_progress && ! reload_completed)
3551 offset = force_reg (Pmode, offset);
3553 /* If we reach here, then something is seriously wrong. */
3556 return gen_rtx_PLUS (Pmode, base, offset);
3558 else if (GET_CODE (orig) == LABEL_REF)
3559 /* ??? We ought to be checking that the register is live instead, in case
3560 it is eliminated. */
3561 crtl->uses_pic_offset_table = 1;
3566 /* Try machine-dependent ways of modifying an illegitimate address X
3567 to be legitimate. If we find one, return the new, valid address.
3569 OLDX is the address as it was before break_out_memory_refs was called.
3570 In some cases it is useful to look at this to decide what needs to be done.
3572 MODE is the mode of the operand pointed to by X.
3574 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3577 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3578 enum machine_mode mode)
3582 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3583 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3584 force_operand (XEXP (x, 0), NULL_RTX));
3585 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3586 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3587 force_operand (XEXP (x, 1), NULL_RTX));
3588 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3589 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3591 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3592 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3593 force_operand (XEXP (x, 1), NULL_RTX));
3595 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3598 if (sparc_tls_referenced_p (x))
3599 x = sparc_legitimize_tls_address (x);
3601 x = sparc_legitimize_pic_address (x, NULL_RTX);
3602 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3603 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3604 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3605 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3606 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3607 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3608 else if (GET_CODE (x) == SYMBOL_REF
3609 || GET_CODE (x) == CONST
3610 || GET_CODE (x) == LABEL_REF)
3611 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3616 /* Delegitimize an address that was legitimized by the above function. */
3619 sparc_delegitimize_address (rtx x)
3621 x = delegitimize_mem_from_attrs (x);
3623 if (GET_CODE (x) == LO_SUM
3624 && GET_CODE (XEXP (x, 1)) == UNSPEC
3625 && XINT (XEXP (x, 1), 1) == UNSPEC_TLSLE)
3627 x = XVECEXP (XEXP (x, 1), 0, 0);
3628 gcc_assert (GET_CODE (x) == SYMBOL_REF);
3631 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
3632 if (GET_CODE (x) == MINUS
3633 && REG_P (XEXP (x, 0))
3634 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
3635 && GET_CODE (XEXP (x, 1)) == LO_SUM
3636 && GET_CODE (XEXP (XEXP (x, 1), 1)) == UNSPEC
3637 && XINT (XEXP (XEXP (x, 1), 1), 1) == UNSPEC_MOVE_PIC_LABEL)
3639 x = XVECEXP (XEXP (XEXP (x, 1), 1), 0, 0);
3640 gcc_assert (GET_CODE (x) == LABEL_REF);
3646 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
3647 replace the input X, or the original X if no replacement is called for.
3648 The output parameter *WIN is 1 if the calling macro should goto WIN,
3651 For SPARC, we wish to handle addresses by splitting them into
3652 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
3653 This cuts the number of extra insns by one.
3655 Do nothing when generating PIC code and the address is a symbolic
3656 operand or requires a scratch register. */
3659 sparc_legitimize_reload_address (rtx x, enum machine_mode mode,
3660 int opnum, int type,
3661 int ind_levels ATTRIBUTE_UNUSED, int *win)
3663 /* Decompose SImode constants into HIGH+LO_SUM. */
3665 && (mode != TFmode || TARGET_ARCH64)
3666 && GET_MODE (x) == SImode
3667 && GET_CODE (x) != LO_SUM
3668 && GET_CODE (x) != HIGH
3669 && sparc_cmodel <= CM_MEDLOW
3671 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
3673 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
3674 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3675 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3676 opnum, (enum reload_type)type);
3681 /* We have to recognize what we have already generated above. */
3682 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
3684 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3685 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3686 opnum, (enum reload_type)type);
3695 /* Return true if ADDR (a legitimate address expression)
3696 has an effect that depends on the machine mode it is used for.
3702 is not equivalent to
3704 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
3706 because [%l7+a+1] is interpreted as the address of (a+1). */
3710 sparc_mode_dependent_address_p (const_rtx addr)
3712 if (flag_pic && GET_CODE (addr) == PLUS)
3714 rtx op0 = XEXP (addr, 0);
3715 rtx op1 = XEXP (addr, 1);
3716 if (op0 == pic_offset_table_rtx
3717 && symbolic_operand (op1, VOIDmode))
3724 #ifdef HAVE_GAS_HIDDEN
3725 # define USE_HIDDEN_LINKONCE 1
3727 # define USE_HIDDEN_LINKONCE 0
3731 get_pc_thunk_name (char name[32], unsigned int regno)
3733 const char *reg_name = reg_names[regno];
3735 /* Skip the leading '%' as that cannot be used in a
3739 if (USE_HIDDEN_LINKONCE)
3740 sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
3742 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
3745 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
3748 gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2, rtx op3)
3750 int orig_flag_pic = flag_pic;
3753 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
3756 insn = gen_load_pcrel_symdi (op0, op1, op2, op3);
3758 insn = gen_load_pcrel_symsi (op0, op1, op2, op3);
3759 flag_pic = orig_flag_pic;
3764 /* Emit code to load the GOT register. */
3767 load_got_register (void)
3769 /* In PIC mode, this will retrieve pic_offset_table_rtx. */
3770 if (!global_offset_table_rtx)
3771 global_offset_table_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
3773 if (TARGET_VXWORKS_RTP)
3774 emit_insn (gen_vxworks_load_got ());
3777 /* The GOT symbol is subject to a PC-relative relocation so we need a
3778 helper function to add the PC value and thus get the final value. */
3779 if (!got_helper_rtx)
3782 get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
3783 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
3786 emit_insn (gen_load_pcrel_sym (global_offset_table_rtx, sparc_got (),
3788 GEN_INT (GLOBAL_OFFSET_TABLE_REGNUM)));
3791 /* Need to emit this whether or not we obey regdecls,
3792 since setjmp/longjmp can cause life info to screw up.
3793 ??? In the case where we don't obey regdecls, this is not sufficient
3794 since we may not fall out the bottom. */
3795 emit_use (global_offset_table_rtx);
3798 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3799 address of the call target. */
3802 sparc_emit_call_insn (rtx pat, rtx addr)
3806 insn = emit_call_insn (pat);
3808 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3809 if (TARGET_VXWORKS_RTP
3811 && GET_CODE (addr) == SYMBOL_REF
3812 && (SYMBOL_REF_DECL (addr)
3813 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3814 : !SYMBOL_REF_LOCAL_P (addr)))
3816 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3817 crtl->uses_pic_offset_table = 1;
3821 /* Return 1 if RTX is a MEM which is known to be aligned to at
3822 least a DESIRED byte boundary. */
3825 mem_min_alignment (rtx mem, int desired)
3827 rtx addr, base, offset;
3829 /* If it's not a MEM we can't accept it. */
3830 if (GET_CODE (mem) != MEM)
3834 if (!TARGET_UNALIGNED_DOUBLES
3835 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3838 /* ??? The rest of the function predates MEM_ALIGN so
3839 there is probably a bit of redundancy. */
3840 addr = XEXP (mem, 0);
3841 base = offset = NULL_RTX;
3842 if (GET_CODE (addr) == PLUS)
3844 if (GET_CODE (XEXP (addr, 0)) == REG)
3846 base = XEXP (addr, 0);
3848 /* What we are saying here is that if the base
3849 REG is aligned properly, the compiler will make
3850 sure any REG based index upon it will be so
3852 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3853 offset = XEXP (addr, 1);
3855 offset = const0_rtx;
3858 else if (GET_CODE (addr) == REG)
3861 offset = const0_rtx;
3864 if (base != NULL_RTX)
3866 int regno = REGNO (base);
3868 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3870 /* Check if the compiler has recorded some information
3871 about the alignment of the base REG. If reload has
3872 completed, we already matched with proper alignments.
3873 If not running global_alloc, reload might give us
3874 unaligned pointer to local stack though. */
3876 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3877 || (optimize && reload_completed))
3878 && (INTVAL (offset) & (desired - 1)) == 0)
3883 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3887 else if (! TARGET_UNALIGNED_DOUBLES
3888 || CONSTANT_P (addr)
3889 || GET_CODE (addr) == LO_SUM)
3891 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3892 is true, in which case we can only assume that an access is aligned if
3893 it is to a constant address, or the address involves a LO_SUM. */
3897 /* An obviously unaligned address. */
3902 /* Vectors to keep interesting information about registers where it can easily
3903 be got. We used to use the actual mode value as the bit number, but there
3904 are more than 32 modes now. Instead we use two tables: one indexed by
3905 hard register number, and one indexed by mode. */
3907 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3908 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3909 mapped into one sparc_mode_class mode. */
3911 enum sparc_mode_class {
3912 S_MODE, D_MODE, T_MODE, O_MODE,
3913 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3917 /* Modes for single-word and smaller quantities. */
3918 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3920 /* Modes for double-word and smaller quantities. */
3921 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3923 /* Modes for quad-word and smaller quantities. */
3924 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3926 /* Modes for 8-word and smaller quantities. */
3927 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3929 /* Modes for single-float quantities. We must allow any single word or
3930 smaller quantity. This is because the fix/float conversion instructions
3931 take integer inputs/outputs from the float registers. */
3932 #define SF_MODES (S_MODES)
3934 /* Modes for double-float and smaller quantities. */
3935 #define DF_MODES (D_MODES)
3937 /* Modes for quad-float and smaller quantities. */
3938 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
3940 /* Modes for quad-float pairs and smaller quantities. */
3941 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
3943 /* Modes for double-float only quantities. */
3944 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3946 /* Modes for quad-float and double-float only quantities. */
3947 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
3949 /* Modes for quad-float pairs and double-float only quantities. */
3950 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
3952 /* Modes for condition codes. */
3953 #define CC_MODES (1 << (int) CC_MODE)
3954 #define CCFP_MODES (1 << (int) CCFP_MODE)
3956 /* Value is 1 if register/mode pair is acceptable on sparc.
3957 The funny mixture of D and T modes is because integer operations
3958 do not specially operate on tetra quantities, so non-quad-aligned
3959 registers can hold quadword quantities (except %o4 and %i4 because
3960 they cross fixed registers). */
3962 /* This points to either the 32 bit or the 64 bit version. */
3963 const int *hard_regno_mode_classes;
3965 static const int hard_32bit_mode_classes[] = {
3966 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3967 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3968 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3969 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3971 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3972 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3973 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3974 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3976 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3977 and none can hold SFmode/SImode values. */
3978 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3979 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3980 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3981 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3984 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3990 static const int hard_64bit_mode_classes[] = {
3991 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3992 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3993 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3994 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3996 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3997 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3998 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3999 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4001 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4002 and none can hold SFmode/SImode values. */
4003 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4004 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4005 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4006 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4009 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4015 int sparc_mode_class [NUM_MACHINE_MODES];
4017 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
4020 sparc_init_modes (void)
4024 for (i = 0; i < NUM_MACHINE_MODES; i++)
4026 switch (GET_MODE_CLASS (i))
4029 case MODE_PARTIAL_INT:
4030 case MODE_COMPLEX_INT:
4031 if (GET_MODE_SIZE (i) <= 4)
4032 sparc_mode_class[i] = 1 << (int) S_MODE;
4033 else if (GET_MODE_SIZE (i) == 8)
4034 sparc_mode_class[i] = 1 << (int) D_MODE;
4035 else if (GET_MODE_SIZE (i) == 16)
4036 sparc_mode_class[i] = 1 << (int) T_MODE;
4037 else if (GET_MODE_SIZE (i) == 32)
4038 sparc_mode_class[i] = 1 << (int) O_MODE;
4040 sparc_mode_class[i] = 0;
4042 case MODE_VECTOR_INT:
4043 if (GET_MODE_SIZE (i) <= 4)
4044 sparc_mode_class[i] = 1 << (int)SF_MODE;
4045 else if (GET_MODE_SIZE (i) == 8)
4046 sparc_mode_class[i] = 1 << (int)DF_MODE;
4049 case MODE_COMPLEX_FLOAT:
4050 if (GET_MODE_SIZE (i) <= 4)
4051 sparc_mode_class[i] = 1 << (int) SF_MODE;
4052 else if (GET_MODE_SIZE (i) == 8)
4053 sparc_mode_class[i] = 1 << (int) DF_MODE;
4054 else if (GET_MODE_SIZE (i) == 16)
4055 sparc_mode_class[i] = 1 << (int) TF_MODE;
4056 else if (GET_MODE_SIZE (i) == 32)
4057 sparc_mode_class[i] = 1 << (int) OF_MODE;
4059 sparc_mode_class[i] = 0;
4062 if (i == (int) CCFPmode || i == (int) CCFPEmode)
4063 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
4065 sparc_mode_class[i] = 1 << (int) CC_MODE;
4068 sparc_mode_class[i] = 0;
4074 hard_regno_mode_classes = hard_64bit_mode_classes;
4076 hard_regno_mode_classes = hard_32bit_mode_classes;
4078 /* Initialize the array used by REGNO_REG_CLASS. */
4079 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4081 if (i < 16 && TARGET_V8PLUS)
4082 sparc_regno_reg_class[i] = I64_REGS;
4083 else if (i < 32 || i == FRAME_POINTER_REGNUM)
4084 sparc_regno_reg_class[i] = GENERAL_REGS;
4086 sparc_regno_reg_class[i] = FP_REGS;
4088 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
4090 sparc_regno_reg_class[i] = FPCC_REGS;
4092 sparc_regno_reg_class[i] = NO_REGS;
4096 /* Compute the frame size required by the function. This function is called
4097 during the reload pass and also by sparc_expand_prologue. */
4100 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
4102 int outgoing_args_size = (crtl->outgoing_args_size
4103 + REG_PARM_STACK_SPACE (current_function_decl));
4104 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
4109 for (i = 0; i < 8; i++)
4110 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4115 for (i = 0; i < 8; i += 2)
4116 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
4117 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
4121 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
4122 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
4123 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
4126 /* Set up values for use in prologue and epilogue. */
4127 num_gfregs = n_regs;
4132 && crtl->outgoing_args_size == 0)
4133 actual_fsize = apparent_fsize = 0;
4136 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
4137 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
4138 apparent_fsize += n_regs * 4;
4139 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
4142 /* Make sure nothing can clobber our register windows.
4143 If a SAVE must be done, or there is a stack-local variable,
4144 the register window area must be allocated. */
4145 if (! leaf_function_p || size > 0)
4146 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
4148 return SPARC_STACK_ALIGN (actual_fsize);
4151 /* Output any necessary .register pseudo-ops. */
4154 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
4156 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
4162 /* Check if %g[2367] were used without
4163 .register being printed for them already. */
4164 for (i = 2; i < 8; i++)
4166 if (df_regs_ever_live_p (i)
4167 && ! sparc_hard_reg_printed [i])
4169 sparc_hard_reg_printed [i] = 1;
4170 /* %g7 is used as TLS base register, use #ignore
4171 for it instead of #scratch. */
4172 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
4173 i == 7 ? "ignore" : "scratch");
4180 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
4182 #if PROBE_INTERVAL > 4096
4183 #error Cannot use indexed addressing mode for stack probing
4186 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
4187 inclusive. These are offsets from the current stack pointer.
4189 Note that we don't use the REG+REG addressing mode for the probes because
4190 of the stack bias in 64-bit mode. And it doesn't really buy us anything
4191 so the advantages of having a single code win here. */
4194 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
4196 rtx g1 = gen_rtx_REG (Pmode, 1);
4198 /* See if we have a constant small number of probes to generate. If so,
4199 that's the easy case. */
4200 if (size <= PROBE_INTERVAL)
4202 emit_move_insn (g1, GEN_INT (first));
4203 emit_insn (gen_rtx_SET (VOIDmode, g1,
4204 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4205 emit_stack_probe (plus_constant (g1, -size));
4208 /* The run-time loop is made up of 10 insns in the generic case while the
4209 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
4210 else if (size <= 5 * PROBE_INTERVAL)
4214 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
4215 emit_insn (gen_rtx_SET (VOIDmode, g1,
4216 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4217 emit_stack_probe (g1);
4219 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
4220 it exceeds SIZE. If only two probes are needed, this will not
4221 generate any code. Then probe at FIRST + SIZE. */
4222 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
4224 emit_insn (gen_rtx_SET (VOIDmode, g1,
4225 plus_constant (g1, -PROBE_INTERVAL)));
4226 emit_stack_probe (g1);
4229 emit_stack_probe (plus_constant (g1, (i - PROBE_INTERVAL) - size));
4232 /* Otherwise, do the same as above, but in a loop. Note that we must be
4233 extra careful with variables wrapping around because we might be at
4234 the very top (or the very bottom) of the address space and we have
4235 to be able to handle this case properly; in particular, we use an
4236 equality test for the loop condition. */
4239 HOST_WIDE_INT rounded_size;
4240 rtx g4 = gen_rtx_REG (Pmode, 4);
4242 emit_move_insn (g1, GEN_INT (first));
4245 /* Step 1: round SIZE to the previous multiple of the interval. */
4247 rounded_size = size & -PROBE_INTERVAL;
4248 emit_move_insn (g4, GEN_INT (rounded_size));
4251 /* Step 2: compute initial and final value of the loop counter. */
4253 /* TEST_ADDR = SP + FIRST. */
4254 emit_insn (gen_rtx_SET (VOIDmode, g1,
4255 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4257 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
4258 emit_insn (gen_rtx_SET (VOIDmode, g4, gen_rtx_MINUS (Pmode, g1, g4)));
4263 while (TEST_ADDR != LAST_ADDR)
4265 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
4269 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
4270 until it is equal to ROUNDED_SIZE. */
4273 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
4275 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
4278 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
4279 that SIZE is equal to ROUNDED_SIZE. */
4281 if (size != rounded_size)
4282 emit_stack_probe (plus_constant (g4, rounded_size - size));
4285 /* Make sure nothing is scheduled before we are done. */
4286 emit_insn (gen_blockage ());
4289 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
4290 absolute addresses. */
4293 output_probe_stack_range (rtx reg1, rtx reg2)
4295 static int labelno = 0;
4296 char loop_lab[32], end_lab[32];
4299 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
4300 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
4302 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
4304 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
4307 output_asm_insn ("cmp\t%0, %1", xops);
4309 fputs ("\tbe,pn\t%xcc,", asm_out_file);
4311 fputs ("\tbe\t", asm_out_file);
4312 assemble_name_raw (asm_out_file, end_lab);
4313 fputc ('\n', asm_out_file);
4315 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
4316 xops[1] = GEN_INT (-PROBE_INTERVAL);
4317 output_asm_insn (" add\t%0, %1, %0", xops);
4319 /* Probe at TEST_ADDR and branch. */
4321 fputs ("\tba,pt\t%xcc,", asm_out_file);
4323 fputs ("\tba\t", asm_out_file);
4324 assemble_name_raw (asm_out_file, loop_lab);
4325 fputc ('\n', asm_out_file);
4326 xops[1] = GEN_INT (SPARC_STACK_BIAS);
4327 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
4329 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
4334 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
4335 as needed. LOW should be double-word aligned for 32-bit registers.
4336 Return the new OFFSET. */
4339 #define SORR_RESTORE 1
4342 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
4347 if (TARGET_ARCH64 && high <= 32)
4349 for (i = low; i < high; i++)
4351 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4353 mem = gen_frame_mem (DImode, plus_constant (base, offset));
4354 if (action == SORR_SAVE)
4356 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4357 RTX_FRAME_RELATED_P (insn) = 1;
4359 else /* action == SORR_RESTORE */
4360 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4367 for (i = low; i < high; i += 2)
4369 bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i];
4370 bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1];
4371 enum machine_mode mode;
4376 mode = i < 32 ? DImode : DFmode;
4381 mode = i < 32 ? SImode : SFmode;
4386 mode = i < 32 ? SImode : SFmode;
4393 mem = gen_frame_mem (mode, plus_constant (base, offset));
4394 if (action == SORR_SAVE)
4396 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4397 RTX_FRAME_RELATED_P (insn) = 1;
4399 else /* action == SORR_RESTORE */
4400 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4402 /* Always preserve double-word alignment. */
4403 offset = (offset + 7) & -8;
4410 /* Emit code to save call-saved registers. */
4413 emit_save_or_restore_regs (int action)
4415 HOST_WIDE_INT offset;
4418 offset = frame_base_offset - apparent_fsize;
4420 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
4422 /* ??? This might be optimized a little as %g1 might already have a
4423 value close enough that a single add insn will do. */
4424 /* ??? Although, all of this is probably only a temporary fix
4425 because if %g1 can hold a function result, then
4426 sparc_expand_epilogue will lose (the result will be
4428 base = gen_rtx_REG (Pmode, 1);
4429 emit_move_insn (base, GEN_INT (offset));
4430 emit_insn (gen_rtx_SET (VOIDmode,
4432 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
4436 base = frame_base_reg;
4438 offset = save_or_restore_regs (0, 8, base, offset, action);
4439 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
4442 /* Generate a save_register_window insn. */
4445 gen_save_register_window (rtx increment)
4448 return gen_save_register_windowdi (increment);
4450 return gen_save_register_windowsi (increment);
4453 /* Generate an increment for the stack pointer. */
4456 gen_stack_pointer_inc (rtx increment)
4458 return gen_rtx_SET (VOIDmode,
4460 gen_rtx_PLUS (Pmode,
4465 /* Generate a decrement for the stack pointer. */
4468 gen_stack_pointer_dec (rtx decrement)
4470 return gen_rtx_SET (VOIDmode,
4472 gen_rtx_MINUS (Pmode,
4477 /* Expand the function prologue. The prologue is responsible for reserving
4478 storage for the frame, saving the call-saved registers and loading the
4479 GOT register if needed. */
4482 sparc_expand_prologue (void)
4487 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4488 on the final value of the flag means deferring the prologue/epilogue
4489 expansion until just before the second scheduling pass, which is too
4490 late to emit multiple epilogues or return insns.
4492 Of course we are making the assumption that the value of the flag
4493 will not change between now and its final value. Of the three parts
4494 of the formula, only the last one can reasonably vary. Let's take a
4495 closer look, after assuming that the first two ones are set to true
4496 (otherwise the last value is effectively silenced).
4498 If only_leaf_regs_used returns false, the global predicate will also
4499 be false so the actual frame size calculated below will be positive.
4500 As a consequence, the save_register_window insn will be emitted in
4501 the instruction stream; now this insn explicitly references %fp
4502 which is not a leaf register so only_leaf_regs_used will always
4503 return false subsequently.
4505 If only_leaf_regs_used returns true, we hope that the subsequent
4506 optimization passes won't cause non-leaf registers to pop up. For
4507 example, the regrename pass has special provisions to not rename to
4508 non-leaf registers in a leaf function. */
4509 sparc_leaf_function_p
4510 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
4512 /* Need to use actual_fsize, since we are also allocating
4513 space for our callee (and our own register save area). */
4515 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4517 /* Advertise that the data calculated just above are now valid. */
4518 sparc_prologue_data_valid_p = true;
4520 if (flag_stack_usage)
4521 current_function_static_stack_size = actual_fsize;
4523 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && actual_fsize)
4524 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, actual_fsize);
4526 if (sparc_leaf_function_p)
4528 frame_base_reg = stack_pointer_rtx;
4529 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4533 frame_base_reg = hard_frame_pointer_rtx;
4534 frame_base_offset = SPARC_STACK_BIAS;
4537 if (actual_fsize == 0)
4539 else if (sparc_leaf_function_p)
4541 if (actual_fsize <= 4096)
4542 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4543 else if (actual_fsize <= 8192)
4545 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4546 /* %sp is still the CFA register. */
4547 RTX_FRAME_RELATED_P (insn) = 1;
4549 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4553 rtx reg = gen_rtx_REG (Pmode, 1);
4554 emit_move_insn (reg, GEN_INT (-actual_fsize));
4555 insn = emit_insn (gen_stack_pointer_inc (reg));
4556 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4557 gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4560 RTX_FRAME_RELATED_P (insn) = 1;
4564 if (actual_fsize <= 4096)
4565 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4566 else if (actual_fsize <= 8192)
4568 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
4569 /* %sp is not the CFA register anymore. */
4570 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4574 rtx reg = gen_rtx_REG (Pmode, 1);
4575 emit_move_insn (reg, GEN_INT (-actual_fsize));
4576 insn = emit_insn (gen_save_register_window (reg));
4579 RTX_FRAME_RELATED_P (insn) = 1;
4580 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4581 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4585 emit_save_or_restore_regs (SORR_SAVE);
4587 /* Load the GOT register if needed. */
4588 if (crtl->uses_pic_offset_table)
4589 load_got_register ();
4592 /* This function generates the assembly code for function entry, which boils
4593 down to emitting the necessary .register directives. */
4596 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4598 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4599 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4601 sparc_output_scratch_registers (file);
4604 /* Expand the function epilogue, either normal or part of a sibcall.
4605 We emit all the instructions except the return or the call. */
4608 sparc_expand_epilogue (void)
4611 emit_save_or_restore_regs (SORR_RESTORE);
4613 if (actual_fsize == 0)
4615 else if (sparc_leaf_function_p)
4617 if (actual_fsize <= 4096)
4618 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4619 else if (actual_fsize <= 8192)
4621 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4622 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4626 rtx reg = gen_rtx_REG (Pmode, 1);
4627 emit_move_insn (reg, GEN_INT (-actual_fsize));
4628 emit_insn (gen_stack_pointer_dec (reg));
4633 /* Return true if it is appropriate to emit `return' instructions in the
4634 body of a function. */
4637 sparc_can_use_return_insn_p (void)
4639 return sparc_prologue_data_valid_p
4640 && (actual_fsize == 0 || !sparc_leaf_function_p);
4643 /* This function generates the assembly code for function exit. */
4646 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4648 /* If the last two instructions of a function are "call foo; dslot;"
4649 the return address might point to the first instruction in the next
4650 function and we have to output a dummy nop for the sake of sane
4651 backtraces in such cases. This is pointless for sibling calls since
4652 the return address is explicitly adjusted. */
4654 rtx insn, last_real_insn;
4656 insn = get_last_insn ();
4658 last_real_insn = prev_real_insn (insn);
4660 && GET_CODE (last_real_insn) == INSN
4661 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4662 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4665 && CALL_P (last_real_insn)
4666 && !SIBLING_CALL_P (last_real_insn))
4667 fputs("\tnop\n", file);
4669 sparc_output_deferred_case_vectors ();
4672 /* Output a 'restore' instruction. */
4675 output_restore (rtx pat)
4681 fputs ("\t restore\n", asm_out_file);
4685 gcc_assert (GET_CODE (pat) == SET);
4687 operands[0] = SET_DEST (pat);
4688 pat = SET_SRC (pat);
4690 switch (GET_CODE (pat))
4693 operands[1] = XEXP (pat, 0);
4694 operands[2] = XEXP (pat, 1);
4695 output_asm_insn (" restore %r1, %2, %Y0", operands);
4698 operands[1] = XEXP (pat, 0);
4699 operands[2] = XEXP (pat, 1);
4700 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4703 operands[1] = XEXP (pat, 0);
4704 gcc_assert (XEXP (pat, 1) == const1_rtx);
4705 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4709 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4714 /* Output a return. */
4717 output_return (rtx insn)
4719 if (sparc_leaf_function_p)
4721 /* This is a leaf function so we don't have to bother restoring the
4722 register window, which frees us from dealing with the convoluted
4723 semantics of restore/return. We simply output the jump to the
4724 return address and the insn in the delay slot (if any). */
4726 gcc_assert (! crtl->calls_eh_return);
4728 return "jmp\t%%o7+%)%#";
4732 /* This is a regular function so we have to restore the register window.
4733 We may have a pending insn for the delay slot, which will be either
4734 combined with the 'restore' instruction or put in the delay slot of
4735 the 'return' instruction. */
4737 if (crtl->calls_eh_return)
4739 /* If the function uses __builtin_eh_return, the eh_return
4740 machinery occupies the delay slot. */
4741 gcc_assert (! final_sequence);
4743 if (! flag_delayed_branch)
4744 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4747 fputs ("\treturn\t%i7+8\n", asm_out_file);
4749 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4751 if (flag_delayed_branch)
4752 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4754 fputs ("\t nop\n", asm_out_file);
4756 else if (final_sequence)
4760 delay = NEXT_INSN (insn);
4763 pat = PATTERN (delay);
4765 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4767 epilogue_renumber (&pat, 0);
4768 return "return\t%%i7+%)%#";
4772 output_asm_insn ("jmp\t%%i7+%)", NULL);
4773 output_restore (pat);
4774 PATTERN (delay) = gen_blockage ();
4775 INSN_CODE (delay) = -1;
4780 /* The delay slot is empty. */
4782 return "return\t%%i7+%)\n\t nop";
4783 else if (flag_delayed_branch)
4784 return "jmp\t%%i7+%)\n\t restore";
4786 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4793 /* Output a sibling call. */
4796 output_sibcall (rtx insn, rtx call_operand)
4800 gcc_assert (flag_delayed_branch);
4802 operands[0] = call_operand;
4804 if (sparc_leaf_function_p)
4806 /* This is a leaf function so we don't have to bother restoring the
4807 register window. We simply output the jump to the function and
4808 the insn in the delay slot (if any). */
4810 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4813 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4816 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4817 it into branch if possible. */
4818 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4823 /* This is a regular function so we have to restore the register window.
4824 We may have a pending insn for the delay slot, which will be combined
4825 with the 'restore' instruction. */
4827 output_asm_insn ("call\t%a0, 0", operands);
4831 rtx delay = NEXT_INSN (insn);
4834 output_restore (PATTERN (delay));
4836 PATTERN (delay) = gen_blockage ();
4837 INSN_CODE (delay) = -1;
4840 output_restore (NULL_RTX);
4846 /* Functions for handling argument passing.
4848 For 32-bit, the first 6 args are normally in registers and the rest are
4849 pushed. Any arg that starts within the first 6 words is at least
4850 partially passed in a register unless its data type forbids.
4852 For 64-bit, the argument registers are laid out as an array of 16 elements
4853 and arguments are added sequentially. The first 6 int args and up to the
4854 first 16 fp args (depending on size) are passed in regs.
4856 Slot Stack Integral Float Float in structure Double Long Double
4857 ---- ----- -------- ----- ------------------ ------ -----------
4858 15 [SP+248] %f31 %f30,%f31 %d30
4859 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4860 13 [SP+232] %f27 %f26,%f27 %d26
4861 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4862 11 [SP+216] %f23 %f22,%f23 %d22
4863 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4864 9 [SP+200] %f19 %f18,%f19 %d18
4865 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4866 7 [SP+184] %f15 %f14,%f15 %d14
4867 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4868 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4869 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4870 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4871 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4872 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4873 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4875 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4877 Integral arguments are always passed as 64-bit quantities appropriately
4880 Passing of floating point values is handled as follows.
4881 If a prototype is in scope:
4882 If the value is in a named argument (i.e. not a stdarg function or a
4883 value not part of the `...') then the value is passed in the appropriate
4885 If the value is part of the `...' and is passed in one of the first 6
4886 slots then the value is passed in the appropriate int reg.
4887 If the value is part of the `...' and is not passed in one of the first 6
4888 slots then the value is passed in memory.
4889 If a prototype is not in scope:
4890 If the value is one of the first 6 arguments the value is passed in the
4891 appropriate integer reg and the appropriate fp reg.
4892 If the value is not one of the first 6 arguments the value is passed in
4893 the appropriate fp reg and in memory.
4896 Summary of the calling conventions implemented by GCC on the SPARC:
4899 size argument return value
4901 small integer <4 int. reg. int. reg.
4902 word 4 int. reg. int. reg.
4903 double word 8 int. reg. int. reg.
4905 _Complex small integer <8 int. reg. int. reg.
4906 _Complex word 8 int. reg. int. reg.
4907 _Complex double word 16 memory int. reg.
4909 vector integer <=8 int. reg. FP reg.
4910 vector integer >8 memory memory
4912 float 4 int. reg. FP reg.
4913 double 8 int. reg. FP reg.
4914 long double 16 memory memory
4916 _Complex float 8 memory FP reg.
4917 _Complex double 16 memory FP reg.
4918 _Complex long double 32 memory FP reg.
4920 vector float any memory memory
4922 aggregate any memory memory
4927 size argument return value
4929 small integer <8 int. reg. int. reg.
4930 word 8 int. reg. int. reg.
4931 double word 16 int. reg. int. reg.
4933 _Complex small integer <16 int. reg. int. reg.
4934 _Complex word 16 int. reg. int. reg.
4935 _Complex double word 32 memory int. reg.
4937 vector integer <=16 FP reg. FP reg.
4938 vector integer 16<s<=32 memory FP reg.
4939 vector integer >32 memory memory
4941 float 4 FP reg. FP reg.
4942 double 8 FP reg. FP reg.
4943 long double 16 FP reg. FP reg.
4945 _Complex float 8 FP reg. FP reg.
4946 _Complex double 16 FP reg. FP reg.
4947 _Complex long double 32 memory FP reg.
4949 vector float <=16 FP reg. FP reg.
4950 vector float 16<s<=32 memory FP reg.
4951 vector float >32 memory memory
4953 aggregate <=16 reg. reg.
4954 aggregate 16<s<=32 memory reg.
4955 aggregate >32 memory memory
4959 Note #1: complex floating-point types follow the extended SPARC ABIs as
4960 implemented by the Sun compiler.
4962 Note #2: integral vector types follow the scalar floating-point types
4963 conventions to match what is implemented by the Sun VIS SDK.
4965 Note #3: floating-point vector types follow the aggregate types
4969 /* Maximum number of int regs for args. */
4970 #define SPARC_INT_ARG_MAX 6
4971 /* Maximum number of fp regs for args. */
4972 #define SPARC_FP_ARG_MAX 16
4974 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4976 /* Handle the INIT_CUMULATIVE_ARGS macro.
4977 Initialize a variable CUM of type CUMULATIVE_ARGS
4978 for a call to a function whose data type is FNTYPE.
4979 For a library call, FNTYPE is 0. */
4982 init_cumulative_args (struct sparc_args *cum, tree fntype,
4983 rtx libname ATTRIBUTE_UNUSED,
4984 tree fndecl ATTRIBUTE_UNUSED)
4987 cum->prototype_p = fntype && prototype_p (fntype);
4988 cum->libcall_p = fntype == 0;
4991 /* Handle promotion of pointer and integer arguments. */
4993 static enum machine_mode
4994 sparc_promote_function_mode (const_tree type,
4995 enum machine_mode mode,
4997 const_tree fntype ATTRIBUTE_UNUSED,
4998 int for_return ATTRIBUTE_UNUSED)
5000 if (type != NULL_TREE && POINTER_TYPE_P (type))
5002 *punsignedp = POINTERS_EXTEND_UNSIGNED;
5006 /* Integral arguments are passed as full words, as per the ABI. */
5007 if (GET_MODE_CLASS (mode) == MODE_INT
5008 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5014 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
5017 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
5019 return TARGET_ARCH64 ? true : false;
5022 /* Scan the record type TYPE and return the following predicates:
5023 - INTREGS_P: the record contains at least one field or sub-field
5024 that is eligible for promotion in integer registers.
5025 - FP_REGS_P: the record contains at least one field or sub-field
5026 that is eligible for promotion in floating-point registers.
5027 - PACKED_P: the record contains at least one field that is packed.
5029 Sub-fields are not taken into account for the PACKED_P predicate. */
5032 scan_record_type (const_tree type, int *intregs_p, int *fpregs_p,
5037 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5039 if (TREE_CODE (field) == FIELD_DECL)
5041 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5042 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
5043 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5044 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5050 if (packed_p && DECL_PACKED (field))
5056 /* Compute the slot number to pass an argument in.
5057 Return the slot number or -1 if passing on the stack.
5059 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5060 the preceding args and about the function being called.
5061 MODE is the argument's machine mode.
5062 TYPE is the data type of the argument (as a tree).
5063 This is null for libcalls where that information may
5065 NAMED is nonzero if this argument is a named parameter
5066 (otherwise it is an extra parameter matching an ellipsis).
5067 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
5068 *PREGNO records the register number to use if scalar type.
5069 *PPADDING records the amount of padding needed in words. */
5072 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
5073 const_tree type, bool named, bool incoming_p,
5074 int *pregno, int *ppadding)
5076 int regbase = (incoming_p
5077 ? SPARC_INCOMING_INT_ARG_FIRST
5078 : SPARC_OUTGOING_INT_ARG_FIRST);
5079 int slotno = cum->words;
5080 enum mode_class mclass;
5085 if (type && TREE_ADDRESSABLE (type))
5091 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
5094 /* For SPARC64, objects requiring 16-byte alignment get it. */
5096 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
5097 && (slotno & 1) != 0)
5098 slotno++, *ppadding = 1;
5100 mclass = GET_MODE_CLASS (mode);
5101 if (type && TREE_CODE (type) == VECTOR_TYPE)
5103 /* Vector types deserve special treatment because they are
5104 polymorphic wrt their mode, depending upon whether VIS
5105 instructions are enabled. */
5106 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5108 /* The SPARC port defines no floating-point vector modes. */
5109 gcc_assert (mode == BLKmode);
5113 /* Integral vector types should either have a vector
5114 mode or an integral mode, because we are guaranteed
5115 by pass_by_reference that their size is not greater
5116 than 16 bytes and TImode is 16-byte wide. */
5117 gcc_assert (mode != BLKmode);
5119 /* Vector integers are handled like floats according to
5121 mclass = MODE_FLOAT;
5128 case MODE_COMPLEX_FLOAT:
5129 case MODE_VECTOR_INT:
5130 if (TARGET_ARCH64 && TARGET_FPU && named)
5132 if (slotno >= SPARC_FP_ARG_MAX)
5134 regno = SPARC_FP_ARG_FIRST + slotno * 2;
5135 /* Arguments filling only one single FP register are
5136 right-justified in the outer double FP register. */
5137 if (GET_MODE_SIZE (mode) <= 4)
5144 case MODE_COMPLEX_INT:
5145 if (slotno >= SPARC_INT_ARG_MAX)
5147 regno = regbase + slotno;
5151 if (mode == VOIDmode)
5152 /* MODE is VOIDmode when generating the actual call. */
5155 gcc_assert (mode == BLKmode);
5159 || (TREE_CODE (type) != VECTOR_TYPE
5160 && TREE_CODE (type) != RECORD_TYPE))
5162 if (slotno >= SPARC_INT_ARG_MAX)
5164 regno = regbase + slotno;
5166 else /* TARGET_ARCH64 && type */
5168 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
5170 /* First see what kinds of registers we would need. */
5171 if (TREE_CODE (type) == VECTOR_TYPE)
5174 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
5176 /* The ABI obviously doesn't specify how packed structures
5177 are passed. These are defined to be passed in int regs
5178 if possible, otherwise memory. */
5179 if (packed_p || !named)
5180 fpregs_p = 0, intregs_p = 1;
5182 /* If all arg slots are filled, then must pass on stack. */
5183 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
5186 /* If there are only int args and all int arg slots are filled,
5187 then must pass on stack. */
5188 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
5191 /* Note that even if all int arg slots are filled, fp members may
5192 still be passed in regs if such regs are available.
5193 *PREGNO isn't set because there may be more than one, it's up
5194 to the caller to compute them. */
5207 /* Handle recursive register counting for structure field layout. */
5209 struct function_arg_record_value_parms
5211 rtx ret; /* return expression being built. */
5212 int slotno; /* slot number of the argument. */
5213 int named; /* whether the argument is named. */
5214 int regbase; /* regno of the base register. */
5215 int stack; /* 1 if part of the argument is on the stack. */
5216 int intoffset; /* offset of the first pending integer field. */
5217 unsigned int nregs; /* number of words passed in registers. */
5220 static void function_arg_record_value_3
5221 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
5222 static void function_arg_record_value_2
5223 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5224 static void function_arg_record_value_1
5225 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5226 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
5227 static rtx function_arg_union_value (int, enum machine_mode, int, int);
5229 /* A subroutine of function_arg_record_value. Traverse the structure
5230 recursively and determine how many registers will be required. */
5233 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
5234 struct function_arg_record_value_parms *parms,
5239 /* We need to compute how many registers are needed so we can
5240 allocate the PARALLEL but before we can do that we need to know
5241 whether there are any packed fields. The ABI obviously doesn't
5242 specify how structures are passed in this case, so they are
5243 defined to be passed in int regs if possible, otherwise memory,
5244 regardless of whether there are fp values present. */
5247 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5249 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5256 /* Compute how many registers we need. */
5257 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5259 if (TREE_CODE (field) == FIELD_DECL)
5261 HOST_WIDE_INT bitpos = startbitpos;
5263 if (DECL_SIZE (field) != 0)
5265 if (integer_zerop (DECL_SIZE (field)))
5268 if (host_integerp (bit_position (field), 1))
5269 bitpos += int_bit_position (field);
5272 /* ??? FIXME: else assume zero offset. */
5274 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5275 function_arg_record_value_1 (TREE_TYPE (field),
5279 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5280 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5285 if (parms->intoffset != -1)
5287 unsigned int startbit, endbit;
5288 int intslots, this_slotno;
5290 startbit = parms->intoffset & -BITS_PER_WORD;
5291 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5293 intslots = (endbit - startbit) / BITS_PER_WORD;
5294 this_slotno = parms->slotno + parms->intoffset
5297 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5299 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5300 /* We need to pass this field on the stack. */
5304 parms->nregs += intslots;
5305 parms->intoffset = -1;
5308 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
5309 If it wasn't true we wouldn't be here. */
5310 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5311 && DECL_MODE (field) == BLKmode)
5312 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5313 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5320 if (parms->intoffset == -1)
5321 parms->intoffset = bitpos;
5327 /* A subroutine of function_arg_record_value. Assign the bits of the
5328 structure between parms->intoffset and bitpos to integer registers. */
5331 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
5332 struct function_arg_record_value_parms *parms)
5334 enum machine_mode mode;
5336 unsigned int startbit, endbit;
5337 int this_slotno, intslots, intoffset;
5340 if (parms->intoffset == -1)
5343 intoffset = parms->intoffset;
5344 parms->intoffset = -1;
5346 startbit = intoffset & -BITS_PER_WORD;
5347 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5348 intslots = (endbit - startbit) / BITS_PER_WORD;
5349 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
5351 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
5355 /* If this is the trailing part of a word, only load that much into
5356 the register. Otherwise load the whole register. Note that in
5357 the latter case we may pick up unwanted bits. It's not a problem
5358 at the moment but may wish to revisit. */
5360 if (intoffset % BITS_PER_WORD != 0)
5361 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5366 intoffset /= BITS_PER_UNIT;
5369 regno = parms->regbase + this_slotno;
5370 reg = gen_rtx_REG (mode, regno);
5371 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5372 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5375 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5380 while (intslots > 0);
5383 /* A subroutine of function_arg_record_value. Traverse the structure
5384 recursively and assign bits to floating point registers. Track which
5385 bits in between need integer registers; invoke function_arg_record_value_3
5386 to make that happen. */
5389 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
5390 struct function_arg_record_value_parms *parms,
5396 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5398 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5405 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5407 if (TREE_CODE (field) == FIELD_DECL)
5409 HOST_WIDE_INT bitpos = startbitpos;
5411 if (DECL_SIZE (field) != 0)
5413 if (integer_zerop (DECL_SIZE (field)))
5416 if (host_integerp (bit_position (field), 1))
5417 bitpos += int_bit_position (field);
5420 /* ??? FIXME: else assume zero offset. */
5422 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5423 function_arg_record_value_2 (TREE_TYPE (field),
5427 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5428 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5433 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5434 int regno, nregs, pos;
5435 enum machine_mode mode = DECL_MODE (field);
5438 function_arg_record_value_3 (bitpos, parms);
5440 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5443 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5444 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5446 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5448 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5454 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5455 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5457 reg = gen_rtx_REG (mode, regno);
5458 pos = bitpos / BITS_PER_UNIT;
5459 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5460 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5464 regno += GET_MODE_SIZE (mode) / 4;
5465 reg = gen_rtx_REG (mode, regno);
5466 pos += GET_MODE_SIZE (mode);
5467 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5468 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5474 if (parms->intoffset == -1)
5475 parms->intoffset = bitpos;
5481 /* Used by function_arg and sparc_function_value_1 to implement the complex
5482 conventions of the 64-bit ABI for passing and returning structures.
5483 Return an expression valid as a return value for the FUNCTION_ARG
5484 and TARGET_FUNCTION_VALUE.
5486 TYPE is the data type of the argument (as a tree).
5487 This is null for libcalls where that information may
5489 MODE is the argument's machine mode.
5490 SLOTNO is the index number of the argument's slot in the parameter array.
5491 NAMED is nonzero if this argument is a named parameter
5492 (otherwise it is an extra parameter matching an ellipsis).
5493 REGBASE is the regno of the base register for the parameter array. */
5496 function_arg_record_value (const_tree type, enum machine_mode mode,
5497 int slotno, int named, int regbase)
5499 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5500 struct function_arg_record_value_parms parms;
5503 parms.ret = NULL_RTX;
5504 parms.slotno = slotno;
5505 parms.named = named;
5506 parms.regbase = regbase;
5509 /* Compute how many registers we need. */
5511 parms.intoffset = 0;
5512 function_arg_record_value_1 (type, 0, &parms, false);
5514 /* Take into account pending integer fields. */
5515 if (parms.intoffset != -1)
5517 unsigned int startbit, endbit;
5518 int intslots, this_slotno;
5520 startbit = parms.intoffset & -BITS_PER_WORD;
5521 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5522 intslots = (endbit - startbit) / BITS_PER_WORD;
5523 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5525 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5527 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5528 /* We need to pass this field on the stack. */
5532 parms.nregs += intslots;
5534 nregs = parms.nregs;
5536 /* Allocate the vector and handle some annoying special cases. */
5539 /* ??? Empty structure has no value? Duh? */
5542 /* Though there's nothing really to store, return a word register
5543 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5544 leads to breakage due to the fact that there are zero bytes to
5546 return gen_rtx_REG (mode, regbase);
5550 /* ??? C++ has structures with no fields, and yet a size. Give up
5551 for now and pass everything back in integer registers. */
5552 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5554 if (nregs + slotno > SPARC_INT_ARG_MAX)
5555 nregs = SPARC_INT_ARG_MAX - slotno;
5557 gcc_assert (nregs != 0);
5559 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5561 /* If at least one field must be passed on the stack, generate
5562 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5563 also be passed on the stack. We can't do much better because the
5564 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5565 of structures for which the fields passed exclusively in registers
5566 are not at the beginning of the structure. */
5568 XVECEXP (parms.ret, 0, 0)
5569 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5571 /* Fill in the entries. */
5573 parms.intoffset = 0;
5574 function_arg_record_value_2 (type, 0, &parms, false);
5575 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5577 gcc_assert (parms.nregs == nregs);
5582 /* Used by function_arg and sparc_function_value_1 to implement the conventions
5583 of the 64-bit ABI for passing and returning unions.
5584 Return an expression valid as a return value for the FUNCTION_ARG
5585 and TARGET_FUNCTION_VALUE.
5587 SIZE is the size in bytes of the union.
5588 MODE is the argument's machine mode.
5589 REGNO is the hard register the union will be passed in. */
5592 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5595 int nwords = ROUND_ADVANCE (size), i;
5598 /* See comment in previous function for empty structures. */
5600 return gen_rtx_REG (mode, regno);
5602 if (slotno == SPARC_INT_ARG_MAX - 1)
5605 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5607 for (i = 0; i < nwords; i++)
5609 /* Unions are passed left-justified. */
5610 XVECEXP (regs, 0, i)
5611 = gen_rtx_EXPR_LIST (VOIDmode,
5612 gen_rtx_REG (word_mode, regno),
5613 GEN_INT (UNITS_PER_WORD * i));
5620 /* Used by function_arg and sparc_function_value_1 to implement the conventions
5621 for passing and returning large (BLKmode) vectors.
5622 Return an expression valid as a return value for the FUNCTION_ARG
5623 and TARGET_FUNCTION_VALUE.
5625 SIZE is the size in bytes of the vector (at least 8 bytes).
5626 REGNO is the FP hard register the vector will be passed in. */
5629 function_arg_vector_value (int size, int regno)
5631 int i, nregs = size / 8;
5634 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5636 for (i = 0; i < nregs; i++)
5638 XVECEXP (regs, 0, i)
5639 = gen_rtx_EXPR_LIST (VOIDmode,
5640 gen_rtx_REG (DImode, regno + 2*i),
5647 /* Determine where to put an argument to a function.
5648 Value is zero to push the argument on the stack,
5649 or a hard register in which to store the argument.
5651 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5652 the preceding args and about the function being called.
5653 MODE is the argument's machine mode.
5654 TYPE is the data type of the argument (as a tree).
5655 This is null for libcalls where that information may
5657 NAMED is true if this argument is a named parameter
5658 (otherwise it is an extra parameter matching an ellipsis).
5659 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
5660 TARGET_FUNCTION_INCOMING_ARG. */
5663 sparc_function_arg_1 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
5664 const_tree type, bool named, bool incoming_p)
5666 int regbase = (incoming_p
5667 ? SPARC_INCOMING_INT_ARG_FIRST
5668 : SPARC_OUTGOING_INT_ARG_FIRST);
5669 int slotno, regno, padding;
5670 enum mode_class mclass = GET_MODE_CLASS (mode);
5672 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5677 /* Vector types deserve special treatment because they are polymorphic wrt
5678 their mode, depending upon whether VIS instructions are enabled. */
5679 if (type && TREE_CODE (type) == VECTOR_TYPE)
5681 HOST_WIDE_INT size = int_size_in_bytes (type);
5682 gcc_assert ((TARGET_ARCH32 && size <= 8)
5683 || (TARGET_ARCH64 && size <= 16));
5685 if (mode == BLKmode)
5686 return function_arg_vector_value (size,
5687 SPARC_FP_ARG_FIRST + 2*slotno);
5689 mclass = MODE_FLOAT;
5693 return gen_rtx_REG (mode, regno);
5695 /* Structures up to 16 bytes in size are passed in arg slots on the stack
5696 and are promoted to registers if possible. */
5697 if (type && TREE_CODE (type) == RECORD_TYPE)
5699 HOST_WIDE_INT size = int_size_in_bytes (type);
5700 gcc_assert (size <= 16);
5702 return function_arg_record_value (type, mode, slotno, named, regbase);
5705 /* Unions up to 16 bytes in size are passed in integer registers. */
5706 else if (type && TREE_CODE (type) == UNION_TYPE)
5708 HOST_WIDE_INT size = int_size_in_bytes (type);
5709 gcc_assert (size <= 16);
5711 return function_arg_union_value (size, mode, slotno, regno);
5714 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5715 but also have the slot allocated for them.
5716 If no prototype is in scope fp values in register slots get passed
5717 in two places, either fp regs and int regs or fp regs and memory. */
5718 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5719 && SPARC_FP_REG_P (regno))
5721 rtx reg = gen_rtx_REG (mode, regno);
5722 if (cum->prototype_p || cum->libcall_p)
5724 /* "* 2" because fp reg numbers are recorded in 4 byte
5727 /* ??? This will cause the value to be passed in the fp reg and
5728 in the stack. When a prototype exists we want to pass the
5729 value in the reg but reserve space on the stack. That's an
5730 optimization, and is deferred [for a bit]. */
5731 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5732 return gen_rtx_PARALLEL (mode,
5734 gen_rtx_EXPR_LIST (VOIDmode,
5735 NULL_RTX, const0_rtx),
5736 gen_rtx_EXPR_LIST (VOIDmode,
5740 /* ??? It seems that passing back a register even when past
5741 the area declared by REG_PARM_STACK_SPACE will allocate
5742 space appropriately, and will not copy the data onto the
5743 stack, exactly as we desire.
5745 This is due to locate_and_pad_parm being called in
5746 expand_call whenever reg_parm_stack_space > 0, which
5747 while beneficial to our example here, would seem to be
5748 in error from what had been intended. Ho hum... -- r~ */
5756 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5760 /* On incoming, we don't need to know that the value
5761 is passed in %f0 and %i0, and it confuses other parts
5762 causing needless spillage even on the simplest cases. */
5766 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5767 + (regno - SPARC_FP_ARG_FIRST) / 2);
5769 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5770 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5772 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5776 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5777 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5778 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5783 /* All other aggregate types are passed in an integer register in a mode
5784 corresponding to the size of the type. */
5785 else if (type && AGGREGATE_TYPE_P (type))
5787 HOST_WIDE_INT size = int_size_in_bytes (type);
5788 gcc_assert (size <= 16);
5790 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5793 return gen_rtx_REG (mode, regno);
5796 /* Handle the TARGET_FUNCTION_ARG target hook. */
5799 sparc_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5800 const_tree type, bool named)
5802 return sparc_function_arg_1 (cum, mode, type, named, false);
5805 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
5808 sparc_function_incoming_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5809 const_tree type, bool named)
5811 return sparc_function_arg_1 (cum, mode, type, named, true);
5814 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
5817 sparc_function_arg_boundary (enum machine_mode mode, const_tree type)
5819 return ((TARGET_ARCH64
5820 && (GET_MODE_ALIGNMENT (mode) == 128
5821 || (type && TYPE_ALIGN (type) == 128)))
5826 /* For an arg passed partly in registers and partly in memory,
5827 this is the number of bytes of registers used.
5828 For args passed entirely in registers or entirely in memory, zero.
5830 Any arg that starts in the first 6 regs but won't entirely fit in them
5831 needs partial registers on v8. On v9, structures with integer
5832 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5833 values that begin in the last fp reg [where "last fp reg" varies with the
5834 mode] will be split between that reg and memory. */
5837 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5838 tree type, bool named)
5840 int slotno, regno, padding;
5842 /* We pass false for incoming_p here, it doesn't matter. */
5843 slotno = function_arg_slotno (cum, mode, type, named, false,
5851 if ((slotno + (mode == BLKmode
5852 ? ROUND_ADVANCE (int_size_in_bytes (type))
5853 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5854 > SPARC_INT_ARG_MAX)
5855 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5859 /* We are guaranteed by pass_by_reference that the size of the
5860 argument is not greater than 16 bytes, so we only need to return
5861 one word if the argument is partially passed in registers. */
5863 if (type && AGGREGATE_TYPE_P (type))
5865 int size = int_size_in_bytes (type);
5867 if (size > UNITS_PER_WORD
5868 && slotno == SPARC_INT_ARG_MAX - 1)
5869 return UNITS_PER_WORD;
5871 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5872 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5873 && ! (TARGET_FPU && named)))
5875 /* The complex types are passed as packed types. */
5876 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5877 && slotno == SPARC_INT_ARG_MAX - 1)
5878 return UNITS_PER_WORD;
5880 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5882 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5884 return UNITS_PER_WORD;
5891 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5892 Specify whether to pass the argument by reference. */
5895 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5896 enum machine_mode mode, const_tree type,
5897 bool named ATTRIBUTE_UNUSED)
5900 /* Original SPARC 32-bit ABI says that structures and unions,
5901 and quad-precision floats are passed by reference. For Pascal,
5902 also pass arrays by reference. All other base types are passed
5905 Extended ABI (as implemented by the Sun compiler) says that all
5906 complex floats are passed by reference. Pass complex integers
5907 in registers up to 8 bytes. More generally, enforce the 2-word
5908 cap for passing arguments in registers.
5910 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5911 integers are passed like floats of the same size, that is in
5912 registers up to 8 bytes. Pass all vector floats by reference
5913 like structure and unions. */
5914 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5916 /* Catch CDImode, TFmode, DCmode and TCmode. */
5917 || GET_MODE_SIZE (mode) > 8
5919 && TREE_CODE (type) == VECTOR_TYPE
5920 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5922 /* Original SPARC 64-bit ABI says that structures and unions
5923 smaller than 16 bytes are passed in registers, as well as
5924 all other base types.
5926 Extended ABI (as implemented by the Sun compiler) says that
5927 complex floats are passed in registers up to 16 bytes. Pass
5928 all complex integers in registers up to 16 bytes. More generally,
5929 enforce the 2-word cap for passing arguments in registers.
5931 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5932 integers are passed like floats of the same size, that is in
5933 registers (up to 16 bytes). Pass all vector floats like structure
5936 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5937 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5938 /* Catch CTImode and TCmode. */
5939 || GET_MODE_SIZE (mode) > 16);
5942 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
5943 Update the data in CUM to advance over an argument
5944 of mode MODE and data type TYPE.
5945 TYPE is null for libcalls where that information may not be available. */
5948 sparc_function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5949 const_tree type, bool named)
5953 /* We pass false for incoming_p here, it doesn't matter. */
5954 function_arg_slotno (cum, mode, type, named, false, ®no, &padding);
5956 /* If argument requires leading padding, add it. */
5957 cum->words += padding;
5961 cum->words += (mode != BLKmode
5962 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5963 : ROUND_ADVANCE (int_size_in_bytes (type)));
5967 if (type && AGGREGATE_TYPE_P (type))
5969 int size = int_size_in_bytes (type);
5973 else if (size <= 16)
5975 else /* passed by reference */
5980 cum->words += (mode != BLKmode
5981 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5982 : ROUND_ADVANCE (int_size_in_bytes (type)));
5987 /* Handle the FUNCTION_ARG_PADDING macro.
5988 For the 64 bit ABI structs are always stored left shifted in their
5992 function_arg_padding (enum machine_mode mode, const_tree type)
5994 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5997 /* Fall back to the default. */
5998 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
6001 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
6002 Specify whether to return the return value in memory. */
6005 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6008 /* Original SPARC 32-bit ABI says that structures and unions,
6009 and quad-precision floats are returned in memory. All other
6010 base types are returned in registers.
6012 Extended ABI (as implemented by the Sun compiler) says that
6013 all complex floats are returned in registers (8 FP registers
6014 at most for '_Complex long double'). Return all complex integers
6015 in registers (4 at most for '_Complex long long').
6017 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6018 integers are returned like floats of the same size, that is in
6019 registers up to 8 bytes and in memory otherwise. Return all
6020 vector floats in memory like structure and unions; note that
6021 they always have BLKmode like the latter. */
6022 return (TYPE_MODE (type) == BLKmode
6023 || TYPE_MODE (type) == TFmode
6024 || (TREE_CODE (type) == VECTOR_TYPE
6025 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6027 /* Original SPARC 64-bit ABI says that structures and unions
6028 smaller than 32 bytes are returned in registers, as well as
6029 all other base types.
6031 Extended ABI (as implemented by the Sun compiler) says that all
6032 complex floats are returned in registers (8 FP registers at most
6033 for '_Complex long double'). Return all complex integers in
6034 registers (4 at most for '_Complex TItype').
6036 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6037 integers are returned like floats of the same size, that is in
6038 registers. Return all vector floats like structure and unions;
6039 note that they always have BLKmode like the latter. */
6040 return (TYPE_MODE (type) == BLKmode
6041 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
6044 /* Handle the TARGET_STRUCT_VALUE target hook.
6045 Return where to find the structure return value address. */
6048 sparc_struct_value_rtx (tree fndecl, int incoming)
6057 mem = gen_frame_mem (Pmode, plus_constant (frame_pointer_rtx,
6058 STRUCT_VALUE_OFFSET));
6060 mem = gen_frame_mem (Pmode, plus_constant (stack_pointer_rtx,
6061 STRUCT_VALUE_OFFSET));
6063 /* Only follow the SPARC ABI for fixed-size structure returns.
6064 Variable size structure returns are handled per the normal
6065 procedures in GCC. This is enabled by -mstd-struct-return */
6067 && sparc_std_struct_return
6068 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
6069 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
6071 /* We must check and adjust the return address, as it is
6072 optional as to whether the return object is really
6074 rtx ret_reg = gen_rtx_REG (Pmode, 31);
6075 rtx scratch = gen_reg_rtx (SImode);
6076 rtx endlab = gen_label_rtx ();
6078 /* Calculate the return object size */
6079 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
6080 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
6081 /* Construct a temporary return value */
6083 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
6085 /* Implement SPARC 32-bit psABI callee return struct checking:
6087 Fetch the instruction where we will return to and see if
6088 it's an unimp instruction (the most significant 10 bits
6090 emit_move_insn (scratch, gen_rtx_MEM (SImode,
6091 plus_constant (ret_reg, 8)));
6092 /* Assume the size is valid and pre-adjust */
6093 emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
6094 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
6096 emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
6097 /* Write the address of the memory pointed to by temp_val into
6098 the memory pointed to by mem */
6099 emit_move_insn (mem, XEXP (temp_val, 0));
6100 emit_label (endlab);
6107 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
6108 For v9, function return values are subject to the same rules as arguments,
6109 except that up to 32 bytes may be returned in registers. */
6112 sparc_function_value_1 (const_tree type, enum machine_mode mode,
6115 /* Beware that the two values are swapped here wrt function_arg. */
6116 int regbase = (outgoing
6117 ? SPARC_INCOMING_INT_ARG_FIRST
6118 : SPARC_OUTGOING_INT_ARG_FIRST);
6119 enum mode_class mclass = GET_MODE_CLASS (mode);
6122 /* Vector types deserve special treatment because they are polymorphic wrt
6123 their mode, depending upon whether VIS instructions are enabled. */
6124 if (type && TREE_CODE (type) == VECTOR_TYPE)
6126 HOST_WIDE_INT size = int_size_in_bytes (type);
6127 gcc_assert ((TARGET_ARCH32 && size <= 8)
6128 || (TARGET_ARCH64 && size <= 32));
6130 if (mode == BLKmode)
6131 return function_arg_vector_value (size,
6132 SPARC_FP_ARG_FIRST);
6134 mclass = MODE_FLOAT;
6137 if (TARGET_ARCH64 && type)
6139 /* Structures up to 32 bytes in size are returned in registers. */
6140 if (TREE_CODE (type) == RECORD_TYPE)
6142 HOST_WIDE_INT size = int_size_in_bytes (type);
6143 gcc_assert (size <= 32);
6145 return function_arg_record_value (type, mode, 0, 1, regbase);
6148 /* Unions up to 32 bytes in size are returned in integer registers. */
6149 else if (TREE_CODE (type) == UNION_TYPE)
6151 HOST_WIDE_INT size = int_size_in_bytes (type);
6152 gcc_assert (size <= 32);
6154 return function_arg_union_value (size, mode, 0, regbase);
6157 /* Objects that require it are returned in FP registers. */
6158 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6161 /* All other aggregate types are returned in an integer register in a
6162 mode corresponding to the size of the type. */
6163 else if (AGGREGATE_TYPE_P (type))
6165 /* All other aggregate types are passed in an integer register
6166 in a mode corresponding to the size of the type. */
6167 HOST_WIDE_INT size = int_size_in_bytes (type);
6168 gcc_assert (size <= 32);
6170 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6172 /* ??? We probably should have made the same ABI change in
6173 3.4.0 as the one we made for unions. The latter was
6174 required by the SCD though, while the former is not
6175 specified, so we favored compatibility and efficiency.
6177 Now we're stuck for aggregates larger than 16 bytes,
6178 because OImode vanished in the meantime. Let's not
6179 try to be unduly clever, and simply follow the ABI
6180 for unions in that case. */
6181 if (mode == BLKmode)
6182 return function_arg_union_value (size, mode, 0, regbase);
6187 /* We should only have pointer and integer types at this point. This
6188 must match sparc_promote_function_mode. */
6189 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6193 /* We should only have pointer and integer types at this point. This must
6194 match sparc_promote_function_mode. */
6195 else if (TARGET_ARCH32
6196 && mclass == MODE_INT
6197 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6200 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
6201 regno = SPARC_FP_ARG_FIRST;
6205 return gen_rtx_REG (mode, regno);
6208 /* Handle TARGET_FUNCTION_VALUE.
6209 On the SPARC, the value is found in the first "output" register, but the
6210 called function leaves it in the first "input" register. */
6213 sparc_function_value (const_tree valtype,
6214 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
6217 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
6220 /* Handle TARGET_LIBCALL_VALUE. */
6223 sparc_libcall_value (enum machine_mode mode,
6224 const_rtx fun ATTRIBUTE_UNUSED)
6226 return sparc_function_value_1 (NULL_TREE, mode, false);
6229 /* Handle FUNCTION_VALUE_REGNO_P.
6230 On the SPARC, the first "output" reg is used for integer values, and the
6231 first floating point register is used for floating point values. */
6234 sparc_function_value_regno_p (const unsigned int regno)
6236 return (regno == 8 || regno == 32);
6239 /* Do what is necessary for `va_start'. We look at the current function
6240 to determine if stdarg or varargs is used and return the address of
6241 the first unnamed parameter. */
6244 sparc_builtin_saveregs (void)
6246 int first_reg = crtl->args.info.words;
6250 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
6251 emit_move_insn (gen_rtx_MEM (word_mode,
6252 gen_rtx_PLUS (Pmode,
6254 GEN_INT (FIRST_PARM_OFFSET (0)
6257 gen_rtx_REG (word_mode,
6258 SPARC_INCOMING_INT_ARG_FIRST + regno));
6260 address = gen_rtx_PLUS (Pmode,
6262 GEN_INT (FIRST_PARM_OFFSET (0)
6263 + UNITS_PER_WORD * first_reg));
6268 /* Implement `va_start' for stdarg. */
6271 sparc_va_start (tree valist, rtx nextarg)
6273 nextarg = expand_builtin_saveregs ();
6274 std_expand_builtin_va_start (valist, nextarg);
6277 /* Implement `va_arg' for stdarg. */
6280 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6283 HOST_WIDE_INT size, rsize, align;
6286 tree ptrtype = build_pointer_type (type);
6288 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
6291 size = rsize = UNITS_PER_WORD;
6297 size = int_size_in_bytes (type);
6298 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
6303 /* For SPARC64, objects requiring 16-byte alignment get it. */
6304 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
6305 align = 2 * UNITS_PER_WORD;
6307 /* SPARC-V9 ABI states that structures up to 16 bytes in size
6308 are left-justified in their slots. */
6309 if (AGGREGATE_TYPE_P (type))
6312 size = rsize = UNITS_PER_WORD;
6322 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
6323 size_int (align - 1));
6324 incr = fold_convert (sizetype, incr);
6325 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
6327 incr = fold_convert (ptr_type_node, incr);
6330 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
6333 if (BYTES_BIG_ENDIAN && size < rsize)
6334 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
6335 size_int (rsize - size));
6339 addr = fold_convert (build_pointer_type (ptrtype), addr);
6340 addr = build_va_arg_indirect_ref (addr);
6343 /* If the address isn't aligned properly for the type, we need a temporary.
6344 FIXME: This is inefficient, usually we can do this in registers. */
6345 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
6347 tree tmp = create_tmp_var (type, "va_arg_tmp");
6348 tree dest_addr = build_fold_addr_expr (tmp);
6349 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
6350 3, dest_addr, addr, size_int (rsize));
6351 TREE_ADDRESSABLE (tmp) = 1;
6352 gimplify_and_add (copy, pre_p);
6357 addr = fold_convert (ptrtype, addr);
6360 = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
6361 gimplify_assign (valist, incr, post_p);
6363 return build_va_arg_indirect_ref (addr);
6366 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
6367 Specify whether the vector mode is supported by the hardware. */
6370 sparc_vector_mode_supported_p (enum machine_mode mode)
6372 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
6375 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
6377 static enum machine_mode
6378 sparc_preferred_simd_mode (enum machine_mode mode)
6396 /* Return the string to output an unconditional branch to LABEL, which is
6397 the operand number of the label.
6399 DEST is the destination insn (i.e. the label), INSN is the source. */
6402 output_ubranch (rtx dest, int label, rtx insn)
6404 static char string[64];
6405 bool v9_form = false;
6408 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
6410 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6411 - INSN_ADDRESSES (INSN_UID (insn)));
6412 /* Leave some instructions for "slop". */
6413 if (delta >= -260000 && delta < 260000)
6418 strcpy (string, "ba%*,pt\t%%xcc, ");
6420 strcpy (string, "b%*\t");
6422 p = strchr (string, '\0');
6433 /* Return the string to output a conditional branch to LABEL, which is
6434 the operand number of the label. OP is the conditional expression.
6435 XEXP (OP, 0) is assumed to be a condition code register (integer or
6436 floating point) and its mode specifies what kind of comparison we made.
6438 DEST is the destination insn (i.e. the label), INSN is the source.
6440 REVERSED is nonzero if we should reverse the sense of the comparison.
6442 ANNUL is nonzero if we should generate an annulling branch. */
6445 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
6448 static char string[64];
6449 enum rtx_code code = GET_CODE (op);
6450 rtx cc_reg = XEXP (op, 0);
6451 enum machine_mode mode = GET_MODE (cc_reg);
6452 const char *labelno, *branch;
6453 int spaces = 8, far;
6456 /* v9 branches are limited to +-1MB. If it is too far away,
6469 fbne,a,pn %fcc2, .LC29
6477 far = TARGET_V9 && (get_attr_length (insn) >= 3);
6480 /* Reversal of FP compares takes care -- an ordered compare
6481 becomes an unordered compare and vice versa. */
6482 if (mode == CCFPmode || mode == CCFPEmode)
6483 code = reverse_condition_maybe_unordered (code);
6485 code = reverse_condition (code);
6488 /* Start by writing the branch condition. */
6489 if (mode == CCFPmode || mode == CCFPEmode)
6540 /* ??? !v9: FP branches cannot be preceded by another floating point
6541 insn. Because there is currently no concept of pre-delay slots,
6542 we can fix this only by always emitting a nop before a floating
6547 strcpy (string, "nop\n\t");
6548 strcat (string, branch);
6561 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6573 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6594 strcpy (string, branch);
6596 spaces -= strlen (branch);
6597 p = strchr (string, '\0');
6599 /* Now add the annulling, the label, and a possible noop. */
6612 if (! far && insn && INSN_ADDRESSES_SET_P ())
6614 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6615 - INSN_ADDRESSES (INSN_UID (insn)));
6616 /* Leave some instructions for "slop". */
6617 if (delta < -260000 || delta >= 260000)
6621 if (mode == CCFPmode || mode == CCFPEmode)
6623 static char v9_fcc_labelno[] = "%%fccX, ";
6624 /* Set the char indicating the number of the fcc reg to use. */
6625 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6626 labelno = v9_fcc_labelno;
6629 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6633 else if (mode == CCXmode || mode == CCX_NOOVmode)
6635 labelno = "%%xcc, ";
6640 labelno = "%%icc, ";
6645 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6648 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6661 strcpy (p, labelno);
6662 p = strchr (p, '\0');
6665 strcpy (p, ".+12\n\t nop\n\tb\t");
6666 /* Skip the next insn if requested or
6667 if we know that it will be a nop. */
6668 if (annul || ! final_sequence)
6682 /* Emit a library call comparison between floating point X and Y.
6683 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
6684 Return the new operator to be used in the comparison sequence.
6686 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6687 values as arguments instead of the TFmode registers themselves,
6688 that's why we cannot call emit_float_lib_cmp. */
6691 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6694 rtx slot0, slot1, result, tem, tem2, libfunc;
6695 enum machine_mode mode;
6696 enum rtx_code new_comparison;
6701 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
6705 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
6709 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
6713 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
6717 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
6721 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
6732 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
6745 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6746 emit_move_insn (slot0, x);
6753 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6754 emit_move_insn (slot1, y);
6757 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6758 emit_library_call (libfunc, LCT_NORMAL,
6760 XEXP (slot0, 0), Pmode,
6761 XEXP (slot1, 0), Pmode);
6766 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6767 emit_library_call (libfunc, LCT_NORMAL,
6769 x, TFmode, y, TFmode);
6774 /* Immediately move the result of the libcall into a pseudo
6775 register so reload doesn't clobber the value if it needs
6776 the return register for a spill reg. */
6777 result = gen_reg_rtx (mode);
6778 emit_move_insn (result, hard_libcall_value (mode, libfunc));
6783 return gen_rtx_NE (VOIDmode, result, const0_rtx);
6786 new_comparison = (comparison == UNORDERED ? EQ : NE);
6787 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
6790 new_comparison = (comparison == UNGT ? GT : NE);
6791 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
6793 return gen_rtx_NE (VOIDmode, result, const2_rtx);
6795 tem = gen_reg_rtx (mode);
6797 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6799 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6800 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
6803 tem = gen_reg_rtx (mode);
6805 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6807 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6808 tem2 = gen_reg_rtx (mode);
6810 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6812 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6813 new_comparison = (comparison == UNEQ ? EQ : NE);
6814 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
6820 /* Generate an unsigned DImode to FP conversion. This is the same code
6821 optabs would emit if we didn't have TFmode patterns. */
6824 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6826 rtx neglab, donelab, i0, i1, f0, in, out;
6829 in = force_reg (DImode, operands[1]);
6830 neglab = gen_label_rtx ();
6831 donelab = gen_label_rtx ();
6832 i0 = gen_reg_rtx (DImode);
6833 i1 = gen_reg_rtx (DImode);
6834 f0 = gen_reg_rtx (mode);
6836 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6838 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6839 emit_jump_insn (gen_jump (donelab));
6842 emit_label (neglab);
6844 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6845 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6846 emit_insn (gen_iordi3 (i0, i0, i1));
6847 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6848 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6850 emit_label (donelab);
6853 /* Generate an FP to unsigned DImode conversion. This is the same code
6854 optabs would emit if we didn't have TFmode patterns. */
6857 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6859 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6862 in = force_reg (mode, operands[1]);
6863 neglab = gen_label_rtx ();
6864 donelab = gen_label_rtx ();
6865 i0 = gen_reg_rtx (DImode);
6866 i1 = gen_reg_rtx (DImode);
6867 limit = gen_reg_rtx (mode);
6868 f0 = gen_reg_rtx (mode);
6870 emit_move_insn (limit,
6871 CONST_DOUBLE_FROM_REAL_VALUE (
6872 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6873 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6875 emit_insn (gen_rtx_SET (VOIDmode,
6877 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6878 emit_jump_insn (gen_jump (donelab));
6881 emit_label (neglab);
6883 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6884 emit_insn (gen_rtx_SET (VOIDmode,
6886 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6887 emit_insn (gen_movdi (i1, const1_rtx));
6888 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6889 emit_insn (gen_xordi3 (out, i0, i1));
6891 emit_label (donelab);
6894 /* Return the string to output a conditional branch to LABEL, testing
6895 register REG. LABEL is the operand number of the label; REG is the
6896 operand number of the reg. OP is the conditional expression. The mode
6897 of REG says what kind of comparison we made.
6899 DEST is the destination insn (i.e. the label), INSN is the source.
6901 REVERSED is nonzero if we should reverse the sense of the comparison.
6903 ANNUL is nonzero if we should generate an annulling branch. */
6906 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6907 int annul, rtx insn)
6909 static char string[64];
6910 enum rtx_code code = GET_CODE (op);
6911 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6916 /* branch on register are limited to +-128KB. If it is too far away,
6929 brgez,a,pn %o1, .LC29
6935 ba,pt %xcc, .LC29 */
6937 far = get_attr_length (insn) >= 3;
6939 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6941 code = reverse_condition (code);
6943 /* Only 64 bit versions of these instructions exist. */
6944 gcc_assert (mode == DImode);
6946 /* Start by writing the branch condition. */
6951 strcpy (string, "brnz");
6955 strcpy (string, "brz");
6959 strcpy (string, "brgez");
6963 strcpy (string, "brlz");
6967 strcpy (string, "brlez");
6971 strcpy (string, "brgz");
6978 p = strchr (string, '\0');
6980 /* Now add the annulling, reg, label, and nop. */
6987 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6990 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6995 *p = p < string + 8 ? '\t' : ' ';
7003 int veryfar = 1, delta;
7005 if (INSN_ADDRESSES_SET_P ())
7007 delta = (INSN_ADDRESSES (INSN_UID (dest))
7008 - INSN_ADDRESSES (INSN_UID (insn)));
7009 /* Leave some instructions for "slop". */
7010 if (delta >= -260000 && delta < 260000)
7014 strcpy (p, ".+12\n\t nop\n\t");
7015 /* Skip the next insn if requested or
7016 if we know that it will be a nop. */
7017 if (annul || ! final_sequence)
7027 strcpy (p, "ba,pt\t%%xcc, ");
7041 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
7042 Such instructions cannot be used in the delay slot of return insn on v9.
7043 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
7047 epilogue_renumber (register rtx *where, int test)
7049 register const char *fmt;
7051 register enum rtx_code code;
7056 code = GET_CODE (*where);
7061 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
7063 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
7064 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
7072 /* Do not replace the frame pointer with the stack pointer because
7073 it can cause the delayed instruction to load below the stack.
7074 This occurs when instructions like:
7076 (set (reg/i:SI 24 %i0)
7077 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
7078 (const_int -20 [0xffffffec])) 0))
7080 are in the return delayed slot. */
7082 if (GET_CODE (XEXP (*where, 0)) == REG
7083 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
7084 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
7085 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
7090 if (SPARC_STACK_BIAS
7091 && GET_CODE (XEXP (*where, 0)) == REG
7092 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
7100 fmt = GET_RTX_FORMAT (code);
7102 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7107 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
7108 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
7111 else if (fmt[i] == 'e'
7112 && epilogue_renumber (&(XEXP (*where, i)), test))
7118 /* Leaf functions and non-leaf functions have different needs. */
7121 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
7124 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
7126 static const int *const reg_alloc_orders[] = {
7127 reg_leaf_alloc_order,
7128 reg_nonleaf_alloc_order};
7131 order_regs_for_local_alloc (void)
7133 static int last_order_nonleaf = 1;
7135 if (df_regs_ever_live_p (15) != last_order_nonleaf)
7137 last_order_nonleaf = !last_order_nonleaf;
7138 memcpy ((char *) reg_alloc_order,
7139 (const char *) reg_alloc_orders[last_order_nonleaf],
7140 FIRST_PSEUDO_REGISTER * sizeof (int));
7144 /* Return 1 if REG and MEM are legitimate enough to allow the various
7145 mem<-->reg splits to be run. */
7148 sparc_splitdi_legitimate (rtx reg, rtx mem)
7150 /* Punt if we are here by mistake. */
7151 gcc_assert (reload_completed);
7153 /* We must have an offsettable memory reference. */
7154 if (! offsettable_memref_p (mem))
7157 /* If we have legitimate args for ldd/std, we do not want
7158 the split to happen. */
7159 if ((REGNO (reg) % 2) == 0
7160 && mem_min_alignment (mem, 8))
7167 /* Return 1 if x and y are some kind of REG and they refer to
7168 different hard registers. This test is guaranteed to be
7169 run after reload. */
7172 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
7174 if (GET_CODE (x) != REG)
7176 if (GET_CODE (y) != REG)
7178 if (REGNO (x) == REGNO (y))
7183 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
7184 This makes them candidates for using ldd and std insns.
7186 Note reg1 and reg2 *must* be hard registers. */
7189 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
7191 /* We might have been passed a SUBREG. */
7192 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
7195 if (REGNO (reg1) % 2 != 0)
7198 /* Integer ldd is deprecated in SPARC V9 */
7199 if (TARGET_V9 && REGNO (reg1) < 32)
7202 return (REGNO (reg1) == REGNO (reg2) - 1);
7205 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
7208 This can only happen when addr1 and addr2, the addresses in mem1
7209 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
7210 addr1 must also be aligned on a 64-bit boundary.
7212 Also iff dependent_reg_rtx is not null it should not be used to
7213 compute the address for mem1, i.e. we cannot optimize a sequence
7225 But, note that the transformation from:
7230 is perfectly fine. Thus, the peephole2 patterns always pass us
7231 the destination register of the first load, never the second one.
7233 For stores we don't have a similar problem, so dependent_reg_rtx is
7237 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
7241 HOST_WIDE_INT offset1;
7243 /* The mems cannot be volatile. */
7244 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
7247 /* MEM1 should be aligned on a 64-bit boundary. */
7248 if (MEM_ALIGN (mem1) < 64)
7251 addr1 = XEXP (mem1, 0);
7252 addr2 = XEXP (mem2, 0);
7254 /* Extract a register number and offset (if used) from the first addr. */
7255 if (GET_CODE (addr1) == PLUS)
7257 /* If not a REG, return zero. */
7258 if (GET_CODE (XEXP (addr1, 0)) != REG)
7262 reg1 = REGNO (XEXP (addr1, 0));
7263 /* The offset must be constant! */
7264 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
7266 offset1 = INTVAL (XEXP (addr1, 1));
7269 else if (GET_CODE (addr1) != REG)
7273 reg1 = REGNO (addr1);
7274 /* This was a simple (mem (reg)) expression. Offset is 0. */
7278 /* Make sure the second address is a (mem (plus (reg) (const_int). */
7279 if (GET_CODE (addr2) != PLUS)
7282 if (GET_CODE (XEXP (addr2, 0)) != REG
7283 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
7286 if (reg1 != REGNO (XEXP (addr2, 0)))
7289 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
7292 /* The first offset must be evenly divisible by 8 to ensure the
7293 address is 64 bit aligned. */
7294 if (offset1 % 8 != 0)
7297 /* The offset for the second addr must be 4 more than the first addr. */
7298 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
7301 /* All the tests passed. addr1 and addr2 are valid for ldd and std
7306 /* Return 1 if reg is a pseudo, or is the first register in
7307 a hard register pair. This makes it suitable for use in
7308 ldd and std insns. */
7311 register_ok_for_ldd (rtx reg)
7313 /* We might have been passed a SUBREG. */
7317 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
7318 return (REGNO (reg) % 2 == 0);
7323 /* Return 1 if OP is a memory whose address is known to be
7324 aligned to 8-byte boundary, or a pseudo during reload.
7325 This makes it suitable for use in ldd and std insns. */
7328 memory_ok_for_ldd (rtx op)
7332 /* In 64-bit mode, we assume that the address is word-aligned. */
7333 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
7336 if ((reload_in_progress || reload_completed)
7337 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
7340 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
7342 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
7351 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
7354 sparc_print_operand_punct_valid_p (unsigned char code)
7367 /* Implement TARGET_PRINT_OPERAND.
7368 Print operand X (an rtx) in assembler syntax to file FILE.
7369 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
7370 For `%' followed by punctuation, CODE is the punctuation and X is null. */
7373 sparc_print_operand (FILE *file, rtx x, int code)
7378 /* Output an insn in a delay slot. */
7380 sparc_indent_opcode = 1;
7382 fputs ("\n\t nop", file);
7385 /* Output an annul flag if there's nothing for the delay slot and we
7386 are optimizing. This is always used with '(' below.
7387 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
7388 this is a dbx bug. So, we only do this when optimizing.
7389 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
7390 Always emit a nop in case the next instruction is a branch. */
7391 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
7395 /* Output a 'nop' if there's nothing for the delay slot and we are
7396 not optimizing. This is always used with '*' above. */
7397 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
7398 fputs ("\n\t nop", file);
7399 else if (final_sequence)
7400 sparc_indent_opcode = 1;
7403 /* Output the right displacement from the saved PC on function return.
7404 The caller may have placed an "unimp" insn immediately after the call
7405 so we have to account for it. This insn is used in the 32-bit ABI
7406 when calling a function that returns a non zero-sized structure. The
7407 64-bit ABI doesn't have it. Be careful to have this test be the same
7408 as that for the call. The exception is when sparc_std_struct_return
7409 is enabled, the psABI is followed exactly and the adjustment is made
7410 by the code in sparc_struct_value_rtx. The call emitted is the same
7411 when sparc_std_struct_return is enabled. */
7413 && cfun->returns_struct
7414 && !sparc_std_struct_return
7415 && DECL_SIZE (DECL_RESULT (current_function_decl))
7416 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
7418 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
7424 /* Output the Embedded Medium/Anywhere code model base register. */
7425 fputs (EMBMEDANY_BASE_REG, file);
7428 /* Print some local dynamic TLS name. */
7429 assemble_name (file, get_some_local_dynamic_name ());
7433 /* Adjust the operand to take into account a RESTORE operation. */
7434 if (GET_CODE (x) == CONST_INT)
7436 else if (GET_CODE (x) != REG)
7437 output_operand_lossage ("invalid %%Y operand");
7438 else if (REGNO (x) < 8)
7439 fputs (reg_names[REGNO (x)], file);
7440 else if (REGNO (x) >= 24 && REGNO (x) < 32)
7441 fputs (reg_names[REGNO (x)-16], file);
7443 output_operand_lossage ("invalid %%Y operand");
7446 /* Print out the low order register name of a register pair. */
7447 if (WORDS_BIG_ENDIAN)
7448 fputs (reg_names[REGNO (x)+1], file);
7450 fputs (reg_names[REGNO (x)], file);
7453 /* Print out the high order register name of a register pair. */
7454 if (WORDS_BIG_ENDIAN)
7455 fputs (reg_names[REGNO (x)], file);
7457 fputs (reg_names[REGNO (x)+1], file);
7460 /* Print out the second register name of a register pair or quad.
7461 I.e., R (%o0) => %o1. */
7462 fputs (reg_names[REGNO (x)+1], file);
7465 /* Print out the third register name of a register quad.
7466 I.e., S (%o0) => %o2. */
7467 fputs (reg_names[REGNO (x)+2], file);
7470 /* Print out the fourth register name of a register quad.
7471 I.e., T (%o0) => %o3. */
7472 fputs (reg_names[REGNO (x)+3], file);
7475 /* Print a condition code register. */
7476 if (REGNO (x) == SPARC_ICC_REG)
7478 /* We don't handle CC[X]_NOOVmode because they're not supposed
7480 if (GET_MODE (x) == CCmode)
7481 fputs ("%icc", file);
7482 else if (GET_MODE (x) == CCXmode)
7483 fputs ("%xcc", file);
7488 /* %fccN register */
7489 fputs (reg_names[REGNO (x)], file);
7492 /* Print the operand's address only. */
7493 output_address (XEXP (x, 0));
7496 /* In this case we need a register. Use %g0 if the
7497 operand is const0_rtx. */
7499 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
7501 fputs ("%g0", file);
7508 switch (GET_CODE (x))
7510 case IOR: fputs ("or", file); break;
7511 case AND: fputs ("and", file); break;
7512 case XOR: fputs ("xor", file); break;
7513 default: output_operand_lossage ("invalid %%A operand");
7518 switch (GET_CODE (x))
7520 case IOR: fputs ("orn", file); break;
7521 case AND: fputs ("andn", file); break;
7522 case XOR: fputs ("xnor", file); break;
7523 default: output_operand_lossage ("invalid %%B operand");
7527 /* These are used by the conditional move instructions. */
7531 enum rtx_code rc = GET_CODE (x);
7535 enum machine_mode mode = GET_MODE (XEXP (x, 0));
7536 if (mode == CCFPmode || mode == CCFPEmode)
7537 rc = reverse_condition_maybe_unordered (GET_CODE (x));
7539 rc = reverse_condition (GET_CODE (x));
7543 case NE: fputs ("ne", file); break;
7544 case EQ: fputs ("e", file); break;
7545 case GE: fputs ("ge", file); break;
7546 case GT: fputs ("g", file); break;
7547 case LE: fputs ("le", file); break;
7548 case LT: fputs ("l", file); break;
7549 case GEU: fputs ("geu", file); break;
7550 case GTU: fputs ("gu", file); break;
7551 case LEU: fputs ("leu", file); break;
7552 case LTU: fputs ("lu", file); break;
7553 case LTGT: fputs ("lg", file); break;
7554 case UNORDERED: fputs ("u", file); break;
7555 case ORDERED: fputs ("o", file); break;
7556 case UNLT: fputs ("ul", file); break;
7557 case UNLE: fputs ("ule", file); break;
7558 case UNGT: fputs ("ug", file); break;
7559 case UNGE: fputs ("uge", file); break;
7560 case UNEQ: fputs ("ue", file); break;
7561 default: output_operand_lossage (code == 'c'
7562 ? "invalid %%c operand"
7563 : "invalid %%C operand");
7568 /* These are used by the movr instruction pattern. */
7572 enum rtx_code rc = (code == 'd'
7573 ? reverse_condition (GET_CODE (x))
7577 case NE: fputs ("ne", file); break;
7578 case EQ: fputs ("e", file); break;
7579 case GE: fputs ("gez", file); break;
7580 case LT: fputs ("lz", file); break;
7581 case LE: fputs ("lez", file); break;
7582 case GT: fputs ("gz", file); break;
7583 default: output_operand_lossage (code == 'd'
7584 ? "invalid %%d operand"
7585 : "invalid %%D operand");
7592 /* Print a sign-extended character. */
7593 int i = trunc_int_for_mode (INTVAL (x), QImode);
7594 fprintf (file, "%d", i);
7599 /* Operand must be a MEM; write its address. */
7600 if (GET_CODE (x) != MEM)
7601 output_operand_lossage ("invalid %%f operand");
7602 output_address (XEXP (x, 0));
7607 /* Print a sign-extended 32-bit value. */
7609 if (GET_CODE(x) == CONST_INT)
7611 else if (GET_CODE(x) == CONST_DOUBLE)
7612 i = CONST_DOUBLE_LOW (x);
7615 output_operand_lossage ("invalid %%s operand");
7618 i = trunc_int_for_mode (i, SImode);
7619 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7624 /* Do nothing special. */
7628 /* Undocumented flag. */
7629 output_operand_lossage ("invalid operand output code");
7632 if (GET_CODE (x) == REG)
7633 fputs (reg_names[REGNO (x)], file);
7634 else if (GET_CODE (x) == MEM)
7637 /* Poor Sun assembler doesn't understand absolute addressing. */
7638 if (CONSTANT_P (XEXP (x, 0)))
7639 fputs ("%g0+", file);
7640 output_address (XEXP (x, 0));
7643 else if (GET_CODE (x) == HIGH)
7645 fputs ("%hi(", file);
7646 output_addr_const (file, XEXP (x, 0));
7649 else if (GET_CODE (x) == LO_SUM)
7651 sparc_print_operand (file, XEXP (x, 0), 0);
7652 if (TARGET_CM_MEDMID)
7653 fputs ("+%l44(", file);
7655 fputs ("+%lo(", file);
7656 output_addr_const (file, XEXP (x, 1));
7659 else if (GET_CODE (x) == CONST_DOUBLE
7660 && (GET_MODE (x) == VOIDmode
7661 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7663 if (CONST_DOUBLE_HIGH (x) == 0)
7664 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7665 else if (CONST_DOUBLE_HIGH (x) == -1
7666 && CONST_DOUBLE_LOW (x) < 0)
7667 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7669 output_operand_lossage ("long long constant not a valid immediate operand");
7671 else if (GET_CODE (x) == CONST_DOUBLE)
7672 output_operand_lossage ("floating point constant not a valid immediate operand");
7673 else { output_addr_const (file, x); }
7676 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
7679 sparc_print_operand_address (FILE *file, rtx x)
7681 register rtx base, index = 0;
7683 register rtx addr = x;
7686 fputs (reg_names[REGNO (addr)], file);
7687 else if (GET_CODE (addr) == PLUS)
7689 if (CONST_INT_P (XEXP (addr, 0)))
7690 offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
7691 else if (CONST_INT_P (XEXP (addr, 1)))
7692 offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
7694 base = XEXP (addr, 0), index = XEXP (addr, 1);
7695 if (GET_CODE (base) == LO_SUM)
7697 gcc_assert (USE_AS_OFFSETABLE_LO10
7699 && ! TARGET_CM_MEDMID);
7700 output_operand (XEXP (base, 0), 0);
7701 fputs ("+%lo(", file);
7702 output_address (XEXP (base, 1));
7703 fprintf (file, ")+%d", offset);
7707 fputs (reg_names[REGNO (base)], file);
7709 fprintf (file, "%+d", offset);
7710 else if (REG_P (index))
7711 fprintf (file, "+%s", reg_names[REGNO (index)]);
7712 else if (GET_CODE (index) == SYMBOL_REF
7713 || GET_CODE (index) == LABEL_REF
7714 || GET_CODE (index) == CONST)
7715 fputc ('+', file), output_addr_const (file, index);
7716 else gcc_unreachable ();
7719 else if (GET_CODE (addr) == MINUS
7720 && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
7722 output_addr_const (file, XEXP (addr, 0));
7724 output_addr_const (file, XEXP (addr, 1));
7725 fputs ("-.)", file);
7727 else if (GET_CODE (addr) == LO_SUM)
7729 output_operand (XEXP (addr, 0), 0);
7730 if (TARGET_CM_MEDMID)
7731 fputs ("+%l44(", file);
7733 fputs ("+%lo(", file);
7734 output_address (XEXP (addr, 1));
7738 && GET_CODE (addr) == CONST
7739 && GET_CODE (XEXP (addr, 0)) == MINUS
7740 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
7741 && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
7742 && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
7744 addr = XEXP (addr, 0);
7745 output_addr_const (file, XEXP (addr, 0));
7746 /* Group the args of the second CONST in parenthesis. */
7748 /* Skip past the second CONST--it does nothing for us. */
7749 output_addr_const (file, XEXP (XEXP (addr, 1), 0));
7750 /* Close the parenthesis. */
7755 output_addr_const (file, addr);
7759 /* Target hook for assembling integer objects. The sparc version has
7760 special handling for aligned DI-mode objects. */
7763 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7765 /* ??? We only output .xword's for symbols and only then in environments
7766 where the assembler can handle them. */
7767 if (aligned_p && size == 8
7768 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7772 assemble_integer_with_op ("\t.xword\t", x);
7777 assemble_aligned_integer (4, const0_rtx);
7778 assemble_aligned_integer (4, x);
7782 return default_assemble_integer (x, size, aligned_p);
7785 /* Return the value of a code used in the .proc pseudo-op that says
7786 what kind of result this function returns. For non-C types, we pick
7787 the closest C type. */
7789 #ifndef SHORT_TYPE_SIZE
7790 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7793 #ifndef INT_TYPE_SIZE
7794 #define INT_TYPE_SIZE BITS_PER_WORD
7797 #ifndef LONG_TYPE_SIZE
7798 #define LONG_TYPE_SIZE BITS_PER_WORD
7801 #ifndef LONG_LONG_TYPE_SIZE
7802 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7805 #ifndef FLOAT_TYPE_SIZE
7806 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7809 #ifndef DOUBLE_TYPE_SIZE
7810 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7813 #ifndef LONG_DOUBLE_TYPE_SIZE
7814 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7818 sparc_type_code (register tree type)
7820 register unsigned long qualifiers = 0;
7821 register unsigned shift;
7823 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7824 setting more, since some assemblers will give an error for this. Also,
7825 we must be careful to avoid shifts of 32 bits or more to avoid getting
7826 unpredictable results. */
7828 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7830 switch (TREE_CODE (type))
7836 qualifiers |= (3 << shift);
7841 qualifiers |= (2 << shift);
7845 case REFERENCE_TYPE:
7847 qualifiers |= (1 << shift);
7851 return (qualifiers | 8);
7854 case QUAL_UNION_TYPE:
7855 return (qualifiers | 9);
7858 return (qualifiers | 10);
7861 return (qualifiers | 16);
7864 /* If this is a range type, consider it to be the underlying
7866 if (TREE_TYPE (type) != 0)
7869 /* Carefully distinguish all the standard types of C,
7870 without messing up if the language is not C. We do this by
7871 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7872 look at both the names and the above fields, but that's redundant.
7873 Any type whose size is between two C types will be considered
7874 to be the wider of the two types. Also, we do not have a
7875 special code to use for "long long", so anything wider than
7876 long is treated the same. Note that we can't distinguish
7877 between "int" and "long" in this code if they are the same
7878 size, but that's fine, since neither can the assembler. */
7880 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7881 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7883 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7884 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7886 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7887 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7890 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7893 /* If this is a range type, consider it to be the underlying
7895 if (TREE_TYPE (type) != 0)
7898 /* Carefully distinguish all the standard types of C,
7899 without messing up if the language is not C. */
7901 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7902 return (qualifiers | 6);
7905 return (qualifiers | 7);
7907 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7908 /* ??? We need to distinguish between double and float complex types,
7909 but I don't know how yet because I can't reach this code from
7910 existing front-ends. */
7911 return (qualifiers | 7); /* Who knows? */
7914 case BOOLEAN_TYPE: /* Boolean truth value type. */
7920 gcc_unreachable (); /* Not a type! */
7927 /* Nested function support. */
7929 /* Emit RTL insns to initialize the variable parts of a trampoline.
7930 FNADDR is an RTX for the address of the function's pure code.
7931 CXT is an RTX for the static chain value for the function.
7933 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7934 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7935 (to store insns). This is a bit excessive. Perhaps a different
7936 mechanism would be better here.
7938 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7941 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
7943 /* SPARC 32-bit trampoline:
7946 sethi %hi(static), %g2
7948 or %g2, %lo(static), %g2
7950 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7951 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7955 (adjust_address (m_tramp, SImode, 0),
7956 expand_binop (SImode, ior_optab,
7957 expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
7958 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7959 NULL_RTX, 1, OPTAB_DIRECT));
7962 (adjust_address (m_tramp, SImode, 4),
7963 expand_binop (SImode, ior_optab,
7964 expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
7965 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7966 NULL_RTX, 1, OPTAB_DIRECT));
7969 (adjust_address (m_tramp, SImode, 8),
7970 expand_binop (SImode, ior_optab,
7971 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7972 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7973 NULL_RTX, 1, OPTAB_DIRECT));
7976 (adjust_address (m_tramp, SImode, 12),
7977 expand_binop (SImode, ior_optab,
7978 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7979 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7980 NULL_RTX, 1, OPTAB_DIRECT));
7982 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7983 aligned on a 16 byte boundary so one flush clears it all. */
7984 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
7985 if (sparc_cpu != PROCESSOR_ULTRASPARC
7986 && sparc_cpu != PROCESSOR_ULTRASPARC3
7987 && sparc_cpu != PROCESSOR_NIAGARA
7988 && sparc_cpu != PROCESSOR_NIAGARA2)
7989 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
7991 /* Call __enable_execute_stack after writing onto the stack to make sure
7992 the stack address is accessible. */
7993 #ifdef ENABLE_EXECUTE_STACK
7994 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7995 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8000 /* The 64-bit version is simpler because it makes more sense to load the
8001 values as "immediate" data out of the trampoline. It's also easier since
8002 we can read the PC without clobbering a register. */
8005 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
8007 /* SPARC 64-bit trampoline:
8016 emit_move_insn (adjust_address (m_tramp, SImode, 0),
8017 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
8018 emit_move_insn (adjust_address (m_tramp, SImode, 4),
8019 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
8020 emit_move_insn (adjust_address (m_tramp, SImode, 8),
8021 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
8022 emit_move_insn (adjust_address (m_tramp, SImode, 12),
8023 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
8024 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
8025 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
8026 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
8028 if (sparc_cpu != PROCESSOR_ULTRASPARC
8029 && sparc_cpu != PROCESSOR_ULTRASPARC3
8030 && sparc_cpu != PROCESSOR_NIAGARA
8031 && sparc_cpu != PROCESSOR_NIAGARA2)
8032 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
8034 /* Call __enable_execute_stack after writing onto the stack to make sure
8035 the stack address is accessible. */
8036 #ifdef ENABLE_EXECUTE_STACK
8037 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
8038 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8042 /* Worker for TARGET_TRAMPOLINE_INIT. */
8045 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
8047 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
8048 cxt = force_reg (Pmode, cxt);
8050 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
8052 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
8055 /* Adjust the cost of a scheduling dependency. Return the new cost of
8056 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
8059 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8061 enum attr_type insn_type;
8063 if (! recog_memoized (insn))
8066 insn_type = get_attr_type (insn);
8068 if (REG_NOTE_KIND (link) == 0)
8070 /* Data dependency; DEP_INSN writes a register that INSN reads some
8073 /* if a load, then the dependence must be on the memory address;
8074 add an extra "cycle". Note that the cost could be two cycles
8075 if the reg was written late in an instruction group; we ca not tell
8077 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
8080 /* Get the delay only if the address of the store is the dependence. */
8081 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
8083 rtx pat = PATTERN(insn);
8084 rtx dep_pat = PATTERN (dep_insn);
8086 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8087 return cost; /* This should not happen! */
8089 /* The dependency between the two instructions was on the data that
8090 is being stored. Assume that this implies that the address of the
8091 store is not dependent. */
8092 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8095 return cost + 3; /* An approximation. */
8098 /* A shift instruction cannot receive its data from an instruction
8099 in the same cycle; add a one cycle penalty. */
8100 if (insn_type == TYPE_SHIFT)
8101 return cost + 3; /* Split before cascade into shift. */
8105 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
8106 INSN writes some cycles later. */
8108 /* These are only significant for the fpu unit; writing a fp reg before
8109 the fpu has finished with it stalls the processor. */
8111 /* Reusing an integer register causes no problems. */
8112 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8120 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8122 enum attr_type insn_type, dep_type;
8123 rtx pat = PATTERN(insn);
8124 rtx dep_pat = PATTERN (dep_insn);
8126 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
8129 insn_type = get_attr_type (insn);
8130 dep_type = get_attr_type (dep_insn);
8132 switch (REG_NOTE_KIND (link))
8135 /* Data dependency; DEP_INSN writes a register that INSN reads some
8142 /* Get the delay iff the address of the store is the dependence. */
8143 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8146 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8153 /* If a load, then the dependence must be on the memory address. If
8154 the addresses aren't equal, then it might be a false dependency */
8155 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
8157 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
8158 || GET_CODE (SET_DEST (dep_pat)) != MEM
8159 || GET_CODE (SET_SRC (pat)) != MEM
8160 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
8161 XEXP (SET_SRC (pat), 0)))
8169 /* Compare to branch latency is 0. There is no benefit from
8170 separating compare and branch. */
8171 if (dep_type == TYPE_COMPARE)
8173 /* Floating point compare to branch latency is less than
8174 compare to conditional move. */
8175 if (dep_type == TYPE_FPCMP)
8184 /* Anti-dependencies only penalize the fpu unit. */
8185 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8197 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
8201 case PROCESSOR_SUPERSPARC:
8202 cost = supersparc_adjust_cost (insn, link, dep, cost);
8204 case PROCESSOR_HYPERSPARC:
8205 case PROCESSOR_SPARCLITE86X:
8206 cost = hypersparc_adjust_cost (insn, link, dep, cost);
8215 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
8216 int sched_verbose ATTRIBUTE_UNUSED,
8217 int max_ready ATTRIBUTE_UNUSED)
8221 sparc_use_sched_lookahead (void)
8223 if (sparc_cpu == PROCESSOR_NIAGARA
8224 || sparc_cpu == PROCESSOR_NIAGARA2)
8226 if (sparc_cpu == PROCESSOR_ULTRASPARC
8227 || sparc_cpu == PROCESSOR_ULTRASPARC3)
8229 if ((1 << sparc_cpu) &
8230 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
8231 (1 << PROCESSOR_SPARCLITE86X)))
8237 sparc_issue_rate (void)
8241 case PROCESSOR_NIAGARA:
8242 case PROCESSOR_NIAGARA2:
8246 /* Assume V9 processors are capable of at least dual-issue. */
8248 case PROCESSOR_SUPERSPARC:
8250 case PROCESSOR_HYPERSPARC:
8251 case PROCESSOR_SPARCLITE86X:
8253 case PROCESSOR_ULTRASPARC:
8254 case PROCESSOR_ULTRASPARC3:
8260 set_extends (rtx insn)
8262 register rtx pat = PATTERN (insn);
8264 switch (GET_CODE (SET_SRC (pat)))
8266 /* Load and some shift instructions zero extend. */
8269 /* sethi clears the high bits */
8271 /* LO_SUM is used with sethi. sethi cleared the high
8272 bits and the values used with lo_sum are positive */
8274 /* Store flag stores 0 or 1 */
8284 rtx op0 = XEXP (SET_SRC (pat), 0);
8285 rtx op1 = XEXP (SET_SRC (pat), 1);
8286 if (GET_CODE (op1) == CONST_INT)
8287 return INTVAL (op1) >= 0;
8288 if (GET_CODE (op0) != REG)
8290 if (sparc_check_64 (op0, insn) == 1)
8292 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8297 rtx op0 = XEXP (SET_SRC (pat), 0);
8298 rtx op1 = XEXP (SET_SRC (pat), 1);
8299 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
8301 if (GET_CODE (op1) == CONST_INT)
8302 return INTVAL (op1) >= 0;
8303 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8306 return GET_MODE (SET_SRC (pat)) == SImode;
8307 /* Positive integers leave the high bits zero. */
8309 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
8311 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
8314 return - (GET_MODE (SET_SRC (pat)) == SImode);
8316 return sparc_check_64 (SET_SRC (pat), insn);
8322 /* We _ought_ to have only one kind per function, but... */
8323 static GTY(()) rtx sparc_addr_diff_list;
8324 static GTY(()) rtx sparc_addr_list;
8327 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
8329 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
8331 sparc_addr_diff_list
8332 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
8334 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
8338 sparc_output_addr_vec (rtx vec)
8340 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8341 int idx, vlen = XVECLEN (body, 0);
8343 #ifdef ASM_OUTPUT_ADDR_VEC_START
8344 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8347 #ifdef ASM_OUTPUT_CASE_LABEL
8348 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8351 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8354 for (idx = 0; idx < vlen; idx++)
8356 ASM_OUTPUT_ADDR_VEC_ELT
8357 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
8360 #ifdef ASM_OUTPUT_ADDR_VEC_END
8361 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8366 sparc_output_addr_diff_vec (rtx vec)
8368 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8369 rtx base = XEXP (XEXP (body, 0), 0);
8370 int idx, vlen = XVECLEN (body, 1);
8372 #ifdef ASM_OUTPUT_ADDR_VEC_START
8373 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8376 #ifdef ASM_OUTPUT_CASE_LABEL
8377 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8380 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8383 for (idx = 0; idx < vlen; idx++)
8385 ASM_OUTPUT_ADDR_DIFF_ELT
8388 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
8389 CODE_LABEL_NUMBER (base));
8392 #ifdef ASM_OUTPUT_ADDR_VEC_END
8393 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8398 sparc_output_deferred_case_vectors (void)
8403 if (sparc_addr_list == NULL_RTX
8404 && sparc_addr_diff_list == NULL_RTX)
8407 /* Align to cache line in the function's code section. */
8408 switch_to_section (current_function_section ());
8410 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
8412 ASM_OUTPUT_ALIGN (asm_out_file, align);
8414 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
8415 sparc_output_addr_vec (XEXP (t, 0));
8416 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
8417 sparc_output_addr_diff_vec (XEXP (t, 0));
8419 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
8422 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
8423 unknown. Return 1 if the high bits are zero, -1 if the register is
8426 sparc_check_64 (rtx x, rtx insn)
8428 /* If a register is set only once it is safe to ignore insns this
8429 code does not know how to handle. The loop will either recognize
8430 the single set and return the correct value or fail to recognize
8435 gcc_assert (GET_CODE (x) == REG);
8437 if (GET_MODE (x) == DImode)
8438 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
8440 if (flag_expensive_optimizations
8441 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
8447 insn = get_last_insn_anywhere ();
8452 while ((insn = PREV_INSN (insn)))
8454 switch (GET_CODE (insn))
8467 rtx pat = PATTERN (insn);
8468 if (GET_CODE (pat) != SET)
8470 if (rtx_equal_p (x, SET_DEST (pat)))
8471 return set_extends (insn);
8472 if (y && rtx_equal_p (y, SET_DEST (pat)))
8473 return set_extends (insn);
8474 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
8482 /* Returns assembly code to perform a DImode shift using
8483 a 64-bit global or out register on SPARC-V8+. */
8485 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
8487 static char asm_code[60];
8489 /* The scratch register is only required when the destination
8490 register is not a 64-bit global or out register. */
8491 if (which_alternative != 2)
8492 operands[3] = operands[0];
8494 /* We can only shift by constants <= 63. */
8495 if (GET_CODE (operands[2]) == CONST_INT)
8496 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
8498 if (GET_CODE (operands[1]) == CONST_INT)
8500 output_asm_insn ("mov\t%1, %3", operands);
8504 output_asm_insn ("sllx\t%H1, 32, %3", operands);
8505 if (sparc_check_64 (operands[1], insn) <= 0)
8506 output_asm_insn ("srl\t%L1, 0, %L1", operands);
8507 output_asm_insn ("or\t%L1, %3, %3", operands);
8510 strcpy(asm_code, opcode);
8512 if (which_alternative != 2)
8513 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
8515 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
8518 /* Output rtl to increment the profiler label LABELNO
8519 for profiling a function entry. */
8522 sparc_profile_hook (int labelno)
8527 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
8528 if (NO_PROFILE_COUNTERS)
8530 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
8534 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8535 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
8536 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
8540 #ifdef TARGET_SOLARIS
8541 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
8544 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
8545 tree decl ATTRIBUTE_UNUSED)
8547 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
8549 solaris_elf_asm_comdat_section (name, flags, decl);
8553 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
8555 if (!(flags & SECTION_DEBUG))
8556 fputs (",#alloc", asm_out_file);
8557 if (flags & SECTION_WRITE)
8558 fputs (",#write", asm_out_file);
8559 if (flags & SECTION_TLS)
8560 fputs (",#tls", asm_out_file);
8561 if (flags & SECTION_CODE)
8562 fputs (",#execinstr", asm_out_file);
8564 /* ??? Handle SECTION_BSS. */
8566 fputc ('\n', asm_out_file);
8568 #endif /* TARGET_SOLARIS */
8570 /* We do not allow indirect calls to be optimized into sibling calls.
8572 We cannot use sibling calls when delayed branches are disabled
8573 because they will likely require the call delay slot to be filled.
8575 Also, on SPARC 32-bit we cannot emit a sibling call when the
8576 current function returns a structure. This is because the "unimp
8577 after call" convention would cause the callee to return to the
8578 wrong place. The generic code already disallows cases where the
8579 function being called returns a structure.
8581 It may seem strange how this last case could occur. Usually there
8582 is code after the call which jumps to epilogue code which dumps the
8583 return value into the struct return area. That ought to invalidate
8584 the sibling call right? Well, in the C++ case we can end up passing
8585 the pointer to the struct return area to a constructor (which returns
8586 void) and then nothing else happens. Such a sibling call would look
8587 valid without the added check here.
8589 VxWorks PIC PLT entries require the global pointer to be initialized
8590 on entry. We therefore can't emit sibling calls to them. */
8592 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8595 && flag_delayed_branch
8596 && (TARGET_ARCH64 || ! cfun->returns_struct)
8597 && !(TARGET_VXWORKS_RTP
8599 && !targetm.binds_local_p (decl)));
8602 /* libfunc renaming. */
8605 sparc_init_libfuncs (void)
8609 /* Use the subroutines that Sun's library provides for integer
8610 multiply and divide. The `*' prevents an underscore from
8611 being prepended by the compiler. .umul is a little faster
8613 set_optab_libfunc (smul_optab, SImode, "*.umul");
8614 set_optab_libfunc (sdiv_optab, SImode, "*.div");
8615 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
8616 set_optab_libfunc (smod_optab, SImode, "*.rem");
8617 set_optab_libfunc (umod_optab, SImode, "*.urem");
8619 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
8620 set_optab_libfunc (add_optab, TFmode, "_Q_add");
8621 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
8622 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
8623 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
8624 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
8626 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
8627 is because with soft-float, the SFmode and DFmode sqrt
8628 instructions will be absent, and the compiler will notice and
8629 try to use the TFmode sqrt instruction for calls to the
8630 builtin function sqrt, but this fails. */
8632 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
8634 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
8635 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
8636 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
8637 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
8638 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
8639 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
8641 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
8642 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
8643 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
8644 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
8646 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
8647 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
8648 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
8649 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
8651 if (DITF_CONVERSION_LIBFUNCS)
8653 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
8654 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
8655 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
8656 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
8659 if (SUN_CONVERSION_LIBFUNCS)
8661 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
8662 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
8663 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
8664 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
8669 /* In the SPARC 64bit ABI, SImode multiply and divide functions
8670 do not exist in the library. Make sure the compiler does not
8671 emit calls to them by accident. (It should always use the
8672 hardware instructions.) */
8673 set_optab_libfunc (smul_optab, SImode, 0);
8674 set_optab_libfunc (sdiv_optab, SImode, 0);
8675 set_optab_libfunc (udiv_optab, SImode, 0);
8676 set_optab_libfunc (smod_optab, SImode, 0);
8677 set_optab_libfunc (umod_optab, SImode, 0);
8679 if (SUN_INTEGER_MULTIPLY_64)
8681 set_optab_libfunc (smul_optab, DImode, "__mul64");
8682 set_optab_libfunc (sdiv_optab, DImode, "__div64");
8683 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
8684 set_optab_libfunc (smod_optab, DImode, "__rem64");
8685 set_optab_libfunc (umod_optab, DImode, "__urem64");
8688 if (SUN_CONVERSION_LIBFUNCS)
8690 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
8691 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
8692 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
8693 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
8698 #define def_builtin(NAME, CODE, TYPE) \
8699 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
8702 /* Implement the TARGET_INIT_BUILTINS target hook.
8703 Create builtin functions for special SPARC instructions. */
8706 sparc_init_builtins (void)
8709 sparc_vis_init_builtins ();
8712 /* Create builtin functions for VIS 1.0 instructions. */
8715 sparc_vis_init_builtins (void)
8717 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
8718 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
8719 tree v4hi = build_vector_type (intHI_type_node, 4);
8720 tree v2hi = build_vector_type (intHI_type_node, 2);
8721 tree v2si = build_vector_type (intSI_type_node, 2);
8723 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
8724 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
8725 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
8726 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
8727 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
8728 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
8729 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
8730 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
8731 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
8732 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
8733 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
8734 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
8735 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
8737 intDI_type_node, 0);
8738 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
8740 intDI_type_node, 0);
8741 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
8743 intSI_type_node, 0);
8744 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
8746 intDI_type_node, 0);
8748 /* Packing and expanding vectors. */
8749 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
8750 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
8751 v8qi_ftype_v2si_v8qi);
8752 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
8754 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
8755 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
8756 v8qi_ftype_v4qi_v4qi);
8758 /* Multiplications. */
8759 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
8760 v4hi_ftype_v4qi_v4hi);
8761 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
8762 v4hi_ftype_v4qi_v2hi);
8763 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
8764 v4hi_ftype_v4qi_v2hi);
8765 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
8766 v4hi_ftype_v8qi_v4hi);
8767 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
8768 v4hi_ftype_v8qi_v4hi);
8769 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
8770 v2si_ftype_v4qi_v2hi);
8771 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
8772 v2si_ftype_v4qi_v2hi);
8774 /* Data aligning. */
8775 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
8776 v4hi_ftype_v4hi_v4hi);
8777 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
8778 v8qi_ftype_v8qi_v8qi);
8779 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
8780 v2si_ftype_v2si_v2si);
8781 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
8784 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
8787 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
8790 /* Pixel distance. */
8791 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
8792 di_ftype_v8qi_v8qi_di);
8795 /* Handle TARGET_EXPAND_BUILTIN target hook.
8796 Expand builtin functions for sparc intrinsics. */
8799 sparc_expand_builtin (tree exp, rtx target,
8800 rtx subtarget ATTRIBUTE_UNUSED,
8801 enum machine_mode tmode ATTRIBUTE_UNUSED,
8802 int ignore ATTRIBUTE_UNUSED)
8805 call_expr_arg_iterator iter;
8806 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8807 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
8809 enum machine_mode mode[4];
8812 mode[0] = insn_data[icode].operand[0].mode;
8814 || GET_MODE (target) != mode[0]
8815 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
8816 op[0] = gen_reg_rtx (mode[0]);
8820 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8823 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8824 op[arg_count] = expand_normal (arg);
8826 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8828 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8834 pat = GEN_FCN (icode) (op[0], op[1]);
8837 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8840 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8855 sparc_vis_mul8x16 (int e8, int e16)
8857 return (e8 * e16 + 128) / 256;
8860 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8861 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8862 constants. A tree list with the results of the multiplications is returned,
8863 and each element in the list is of INNER_TYPE. */
8866 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8868 tree n_elts = NULL_TREE;
8873 case CODE_FOR_fmul8x16_vis:
8874 for (; elts0 && elts1;
8875 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8878 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8879 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8880 n_elts = tree_cons (NULL_TREE,
8881 build_int_cst (inner_type, val),
8886 case CODE_FOR_fmul8x16au_vis:
8887 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8889 for (; elts0; elts0 = TREE_CHAIN (elts0))
8892 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8894 n_elts = tree_cons (NULL_TREE,
8895 build_int_cst (inner_type, val),
8900 case CODE_FOR_fmul8x16al_vis:
8901 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8903 for (; elts0; elts0 = TREE_CHAIN (elts0))
8906 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8908 n_elts = tree_cons (NULL_TREE,
8909 build_int_cst (inner_type, val),
8918 return nreverse (n_elts);
8921 /* Handle TARGET_FOLD_BUILTIN target hook.
8922 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8923 result of the function call is ignored. NULL_TREE is returned if the
8924 function could not be folded. */
8927 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
8928 tree *args, bool ignore)
8930 tree arg0, arg1, arg2;
8931 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8932 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
8935 && icode != CODE_FOR_alignaddrsi_vis
8936 && icode != CODE_FOR_alignaddrdi_vis)
8937 return build_zero_cst (rtype);
8941 case CODE_FOR_fexpand_vis:
8945 if (TREE_CODE (arg0) == VECTOR_CST)
8947 tree inner_type = TREE_TYPE (rtype);
8948 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8949 tree n_elts = NULL_TREE;
8951 for (; elts; elts = TREE_CHAIN (elts))
8953 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8954 n_elts = tree_cons (NULL_TREE,
8955 build_int_cst (inner_type, val),
8958 return build_vector (rtype, nreverse (n_elts));
8962 case CODE_FOR_fmul8x16_vis:
8963 case CODE_FOR_fmul8x16au_vis:
8964 case CODE_FOR_fmul8x16al_vis:
8970 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8972 tree inner_type = TREE_TYPE (rtype);
8973 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8974 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8975 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
8978 return build_vector (rtype, n_elts);
8982 case CODE_FOR_fpmerge_vis:
8988 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8990 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8991 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8992 tree n_elts = NULL_TREE;
8994 for (; elts0 && elts1;
8995 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8997 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8998 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
9001 return build_vector (rtype, nreverse (n_elts));
9005 case CODE_FOR_pdist_vis:
9013 if (TREE_CODE (arg0) == VECTOR_CST
9014 && TREE_CODE (arg1) == VECTOR_CST
9015 && TREE_CODE (arg2) == INTEGER_CST)
9018 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
9019 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
9020 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
9021 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
9023 for (; elts0 && elts1;
9024 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
9026 unsigned HOST_WIDE_INT
9027 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9028 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
9029 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
9030 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
9032 unsigned HOST_WIDE_INT l;
9035 overflow |= neg_double (low1, high1, &l, &h);
9036 overflow |= add_double (low0, high0, l, h, &l, &h);
9038 overflow |= neg_double (l, h, &l, &h);
9040 overflow |= add_double (low, high, l, h, &low, &high);
9043 gcc_assert (overflow == 0);
9045 return build_int_cst_wide (rtype, low, high);
9055 /* ??? This duplicates information provided to the compiler by the
9056 ??? scheduler description. Some day, teach genautomata to output
9057 ??? the latencies and then CSE will just use that. */
9060 sparc_rtx_costs (rtx x, int code, int outer_code, int *total,
9061 bool speed ATTRIBUTE_UNUSED)
9063 enum machine_mode mode = GET_MODE (x);
9064 bool float_mode_p = FLOAT_MODE_P (mode);
9069 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
9087 if (GET_MODE (x) == VOIDmode
9088 && ((CONST_DOUBLE_HIGH (x) == 0
9089 && CONST_DOUBLE_LOW (x) < 0x1000)
9090 || (CONST_DOUBLE_HIGH (x) == -1
9091 && CONST_DOUBLE_LOW (x) < 0
9092 && CONST_DOUBLE_LOW (x) >= -0x1000)))
9099 /* If outer-code was a sign or zero extension, a cost
9100 of COSTS_N_INSNS (1) was already added in. This is
9101 why we are subtracting it back out. */
9102 if (outer_code == ZERO_EXTEND)
9104 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
9106 else if (outer_code == SIGN_EXTEND)
9108 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
9110 else if (float_mode_p)
9112 *total = sparc_costs->float_load;
9116 *total = sparc_costs->int_load;
9124 *total = sparc_costs->float_plusminus;
9126 *total = COSTS_N_INSNS (1);
9131 *total = sparc_costs->float_mul;
9132 else if (! TARGET_HARD_MUL)
9133 *total = COSTS_N_INSNS (25);
9139 if (sparc_costs->int_mul_bit_factor)
9143 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
9145 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
9146 for (nbits = 0; value != 0; value &= value - 1)
9149 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
9150 && GET_MODE (XEXP (x, 1)) == VOIDmode)
9152 rtx x1 = XEXP (x, 1);
9153 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
9154 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
9156 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
9158 for (; value2 != 0; value2 &= value2 - 1)
9166 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
9167 bit_cost = COSTS_N_INSNS (bit_cost);
9171 *total = sparc_costs->int_mulX + bit_cost;
9173 *total = sparc_costs->int_mul + bit_cost;
9180 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
9190 *total = sparc_costs->float_div_df;
9192 *total = sparc_costs->float_div_sf;
9197 *total = sparc_costs->int_divX;
9199 *total = sparc_costs->int_div;
9206 *total = COSTS_N_INSNS (1);
9213 case UNSIGNED_FLOAT:
9217 case FLOAT_TRUNCATE:
9218 *total = sparc_costs->float_move;
9223 *total = sparc_costs->float_sqrt_df;
9225 *total = sparc_costs->float_sqrt_sf;
9230 *total = sparc_costs->float_cmp;
9232 *total = COSTS_N_INSNS (1);
9237 *total = sparc_costs->float_cmove;
9239 *total = sparc_costs->int_cmove;
9243 /* Handle the NAND vector patterns. */
9244 if (sparc_vector_mode_supported_p (GET_MODE (x))
9245 && GET_CODE (XEXP (x, 0)) == NOT
9246 && GET_CODE (XEXP (x, 1)) == NOT)
9248 *total = COSTS_N_INSNS (1);
9259 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
9262 general_or_i64_p (reg_class_t rclass)
9264 return (rclass == GENERAL_REGS || rclass == I64_REGS);
9267 /* Implement TARGET_REGISTER_MOVE_COST. */
9270 sparc_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
9271 reg_class_t from, reg_class_t to)
9273 if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
9274 || (general_or_i64_p (from) && FP_REG_CLASS_P (to))
9275 || from == FPCC_REGS
9278 if (sparc_cpu == PROCESSOR_ULTRASPARC
9279 || sparc_cpu == PROCESSOR_ULTRASPARC3
9280 || sparc_cpu == PROCESSOR_NIAGARA
9281 || sparc_cpu == PROCESSOR_NIAGARA2)
9290 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
9291 This is achieved by means of a manual dynamic stack space allocation in
9292 the current frame. We make the assumption that SEQ doesn't contain any
9293 function calls, with the possible exception of calls to the GOT helper. */
9296 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
9298 /* We must preserve the lowest 16 words for the register save area. */
9299 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
9300 /* We really need only 2 words of fresh stack space. */
9301 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
9304 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
9305 SPARC_STACK_BIAS + offset));
9307 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
9308 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
9310 emit_insn (gen_rtx_SET (VOIDmode,
9311 adjust_address (slot, word_mode, UNITS_PER_WORD),
9315 emit_insn (gen_rtx_SET (VOIDmode,
9317 adjust_address (slot, word_mode, UNITS_PER_WORD)));
9318 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
9319 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
9322 /* Output the assembler code for a thunk function. THUNK_DECL is the
9323 declaration for the thunk function itself, FUNCTION is the decl for
9324 the target function. DELTA is an immediate constant offset to be
9325 added to THIS. If VCALL_OFFSET is nonzero, the word at address
9326 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
9329 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
9330 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9333 rtx this_rtx, insn, funexp;
9334 unsigned int int_arg_first;
9336 reload_completed = 1;
9337 epilogue_completed = 1;
9339 emit_note (NOTE_INSN_PROLOGUE_END);
9341 if (flag_delayed_branch)
9343 /* We will emit a regular sibcall below, so we need to instruct
9344 output_sibcall that we are in a leaf function. */
9345 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
9347 /* This will cause final.c to invoke leaf_renumber_regs so we
9348 must behave as if we were in a not-yet-leafified function. */
9349 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
9353 /* We will emit the sibcall manually below, so we will need to
9354 manually spill non-leaf registers. */
9355 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
9357 /* We really are in a leaf function. */
9358 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
9361 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
9362 returns a structure, the structure return pointer is there instead. */
9364 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9365 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
9367 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
9369 /* Add DELTA. When possible use a plain add, otherwise load it into
9370 a register first. */
9373 rtx delta_rtx = GEN_INT (delta);
9375 if (! SPARC_SIMM13_P (delta))
9377 rtx scratch = gen_rtx_REG (Pmode, 1);
9378 emit_move_insn (scratch, delta_rtx);
9379 delta_rtx = scratch;
9382 /* THIS_RTX += DELTA. */
9383 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
9386 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
9389 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
9390 rtx scratch = gen_rtx_REG (Pmode, 1);
9392 gcc_assert (vcall_offset < 0);
9394 /* SCRATCH = *THIS_RTX. */
9395 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
9397 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
9398 may not have any available scratch register at this point. */
9399 if (SPARC_SIMM13_P (vcall_offset))
9401 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
9402 else if (! fixed_regs[5]
9403 /* The below sequence is made up of at least 2 insns,
9404 while the default method may need only one. */
9405 && vcall_offset < -8192)
9407 rtx scratch2 = gen_rtx_REG (Pmode, 5);
9408 emit_move_insn (scratch2, vcall_offset_rtx);
9409 vcall_offset_rtx = scratch2;
9413 rtx increment = GEN_INT (-4096);
9415 /* VCALL_OFFSET is a negative number whose typical range can be
9416 estimated as -32768..0 in 32-bit mode. In almost all cases
9417 it is therefore cheaper to emit multiple add insns than
9418 spilling and loading the constant into a register (at least
9420 while (! SPARC_SIMM13_P (vcall_offset))
9422 emit_insn (gen_add2_insn (scratch, increment));
9423 vcall_offset += 4096;
9425 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
9428 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
9429 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
9430 gen_rtx_PLUS (Pmode,
9432 vcall_offset_rtx)));
9434 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
9435 emit_insn (gen_add2_insn (this_rtx, scratch));
9438 /* Generate a tail call to the target function. */
9439 if (! TREE_USED (function))
9441 assemble_external (function);
9442 TREE_USED (function) = 1;
9444 funexp = XEXP (DECL_RTL (function), 0);
9446 if (flag_delayed_branch)
9448 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
9449 insn = emit_call_insn (gen_sibcall (funexp));
9450 SIBLING_CALL_P (insn) = 1;
9454 /* The hoops we have to jump through in order to generate a sibcall
9455 without using delay slots... */
9456 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
9460 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
9462 /* Delay emitting the GOT helper function because it needs to
9463 change the section and we are emitting assembly code. */
9464 load_got_register (); /* clobbers %o7 */
9465 scratch = sparc_legitimize_pic_address (funexp, scratch);
9468 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
9470 else if (TARGET_ARCH32)
9472 emit_insn (gen_rtx_SET (VOIDmode,
9474 gen_rtx_HIGH (SImode, funexp)));
9475 emit_insn (gen_rtx_SET (VOIDmode,
9477 gen_rtx_LO_SUM (SImode, scratch, funexp)));
9479 else /* TARGET_ARCH64 */
9481 switch (sparc_cmodel)
9485 /* The destination can serve as a temporary. */
9486 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
9491 /* The destination cannot serve as a temporary. */
9492 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
9494 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
9497 emit_and_preserve (seq, spill_reg, 0);
9505 emit_jump_insn (gen_indirect_jump (scratch));
9510 /* Run just enough of rest_of_compilation to get the insns emitted.
9511 There's not really enough bulk here to make other passes such as
9512 instruction scheduling worth while. Note that use_thunk calls
9513 assemble_start_function and assemble_end_function. */
9514 insn = get_insns ();
9515 insn_locators_alloc ();
9516 shorten_branches (insn);
9517 final_start_function (insn, file, 1);
9518 final (insn, file, 1);
9519 final_end_function ();
9521 reload_completed = 0;
9522 epilogue_completed = 0;
9525 /* Return true if sparc_output_mi_thunk would be able to output the
9526 assembler code for the thunk function specified by the arguments
9527 it is passed, and false otherwise. */
9529 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
9530 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
9531 HOST_WIDE_INT vcall_offset,
9532 const_tree function ATTRIBUTE_UNUSED)
9534 /* Bound the loop used in the default method above. */
9535 return (vcall_offset >= -32768 || ! fixed_regs[5]);
9538 /* How to allocate a 'struct machine_function'. */
9540 static struct machine_function *
9541 sparc_init_machine_status (void)
9543 return ggc_alloc_cleared_machine_function ();
9546 /* Locate some local-dynamic symbol still in use by this function
9547 so that we can print its name in local-dynamic base patterns. */
9550 get_some_local_dynamic_name (void)
9554 if (cfun->machine->some_ld_name)
9555 return cfun->machine->some_ld_name;
9557 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
9559 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
9560 return cfun->machine->some_ld_name;
9566 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
9571 && GET_CODE (x) == SYMBOL_REF
9572 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
9574 cfun->machine->some_ld_name = XSTR (x, 0);
9581 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
9582 This is called from dwarf2out.c to emit call frame instructions
9583 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
9585 sparc_dwarf_handle_frame_unspec (const char *label,
9586 rtx pattern ATTRIBUTE_UNUSED,
9587 int index ATTRIBUTE_UNUSED)
9589 gcc_assert (index == UNSPECV_SAVEW);
9590 dwarf2out_window_save (label);
9593 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
9594 We need to emit DTP-relative relocations. */
9597 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
9602 fputs ("\t.word\t%r_tls_dtpoff32(", file);
9605 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
9610 output_addr_const (file, x);
9614 /* Do whatever processing is required at the end of a file. */
9617 sparc_file_end (void)
9619 /* If we need to emit the special GOT helper function, do so now. */
9622 const char *name = XSTR (got_helper_rtx, 0);
9623 const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
9624 #ifdef DWARF2_UNWIND_INFO
9628 if (USE_HIDDEN_LINKONCE)
9630 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
9631 get_identifier (name),
9632 build_function_type_list (void_type_node,
9634 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
9635 NULL_TREE, void_type_node);
9636 TREE_STATIC (decl) = 1;
9637 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
9638 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
9639 DECL_VISIBILITY_SPECIFIED (decl) = 1;
9640 resolve_unique_section (decl, 0, flag_function_sections);
9641 allocate_struct_function (decl, true);
9643 current_function_decl = decl;
9644 init_varasm_status ();
9645 assemble_start_function (decl, name);
9649 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9650 switch_to_section (text_section);
9652 ASM_OUTPUT_ALIGN (asm_out_file, align);
9653 ASM_OUTPUT_LABEL (asm_out_file, name);
9656 #ifdef DWARF2_UNWIND_INFO
9657 do_cfi = dwarf2out_do_cfi_asm ();
9659 fprintf (asm_out_file, "\t.cfi_startproc\n");
9661 if (flag_delayed_branch)
9662 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
9663 reg_name, reg_name);
9665 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
9666 reg_name, reg_name);
9667 #ifdef DWARF2_UNWIND_INFO
9669 fprintf (asm_out_file, "\t.cfi_endproc\n");
9673 if (NEED_INDICATE_EXEC_STACK)
9674 file_end_indicate_exec_stack ();
9677 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9678 /* Implement TARGET_MANGLE_TYPE. */
9681 sparc_mangle_type (const_tree type)
9684 && TYPE_MAIN_VARIANT (type) == long_double_type_node
9685 && TARGET_LONG_DOUBLE_128)
9688 /* For all other types, use normal C++ mangling. */
9693 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
9694 compare and swap on the word containing the byte or half-word. */
9697 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
9699 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
9700 rtx addr = gen_reg_rtx (Pmode);
9701 rtx off = gen_reg_rtx (SImode);
9702 rtx oldv = gen_reg_rtx (SImode);
9703 rtx newv = gen_reg_rtx (SImode);
9704 rtx oldvalue = gen_reg_rtx (SImode);
9705 rtx newvalue = gen_reg_rtx (SImode);
9706 rtx res = gen_reg_rtx (SImode);
9707 rtx resv = gen_reg_rtx (SImode);
9708 rtx memsi, val, mask, end_label, loop_label, cc;
9710 emit_insn (gen_rtx_SET (VOIDmode, addr,
9711 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
9713 if (Pmode != SImode)
9714 addr1 = gen_lowpart (SImode, addr1);
9715 emit_insn (gen_rtx_SET (VOIDmode, off,
9716 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
9718 memsi = gen_rtx_MEM (SImode, addr);
9719 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
9720 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
9722 val = force_reg (SImode, memsi);
9724 emit_insn (gen_rtx_SET (VOIDmode, off,
9725 gen_rtx_XOR (SImode, off,
9726 GEN_INT (GET_MODE (mem) == QImode
9729 emit_insn (gen_rtx_SET (VOIDmode, off,
9730 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
9732 if (GET_MODE (mem) == QImode)
9733 mask = force_reg (SImode, GEN_INT (0xff));
9735 mask = force_reg (SImode, GEN_INT (0xffff));
9737 emit_insn (gen_rtx_SET (VOIDmode, mask,
9738 gen_rtx_ASHIFT (SImode, mask, off)));
9740 emit_insn (gen_rtx_SET (VOIDmode, val,
9741 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9744 oldval = gen_lowpart (SImode, oldval);
9745 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9746 gen_rtx_ASHIFT (SImode, oldval, off)));
9748 newval = gen_lowpart_common (SImode, newval);
9749 emit_insn (gen_rtx_SET (VOIDmode, newv,
9750 gen_rtx_ASHIFT (SImode, newval, off)));
9752 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9753 gen_rtx_AND (SImode, oldv, mask)));
9755 emit_insn (gen_rtx_SET (VOIDmode, newv,
9756 gen_rtx_AND (SImode, newv, mask)));
9758 end_label = gen_label_rtx ();
9759 loop_label = gen_label_rtx ();
9760 emit_label (loop_label);
9762 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
9763 gen_rtx_IOR (SImode, oldv, val)));
9765 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
9766 gen_rtx_IOR (SImode, newv, val)));
9768 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
9770 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
9772 emit_insn (gen_rtx_SET (VOIDmode, resv,
9773 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9776 cc = gen_compare_reg_1 (NE, resv, val);
9777 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
9779 /* Use cbranchcc4 to separate the compare and branch! */
9780 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
9781 cc, const0_rtx, loop_label));
9783 emit_label (end_label);
9785 emit_insn (gen_rtx_SET (VOIDmode, res,
9786 gen_rtx_AND (SImode, res, mask)));
9788 emit_insn (gen_rtx_SET (VOIDmode, res,
9789 gen_rtx_LSHIFTRT (SImode, res, off)));
9791 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
9794 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
9797 sparc_frame_pointer_required (void)
9799 return !(leaf_function_p () && only_leaf_regs_used ());
9802 /* The way this is structured, we can't eliminate SFP in favor of SP
9803 if the frame pointer is required: we want to use the SFP->HFP elimination
9804 in that case. But the test in update_eliminables doesn't know we are
9805 assuming below that we only do the former elimination. */
9808 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
9810 return (to == HARD_FRAME_POINTER_REGNUM
9811 || !targetm.frame_pointer_required ());
9814 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
9815 they won't be allocated. */
9818 sparc_conditional_register_usage (void)
9820 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
9822 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9823 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9825 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
9826 /* then honor it. */
9827 if (TARGET_ARCH32 && fixed_regs[5])
9829 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
9834 for (regno = SPARC_FIRST_V9_FP_REG;
9835 regno <= SPARC_LAST_V9_FP_REG;
9837 fixed_regs[regno] = 1;
9838 /* %fcc0 is used by v8 and v9. */
9839 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
9840 regno <= SPARC_LAST_V9_FCC_REG;
9842 fixed_regs[regno] = 1;
9847 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
9848 fixed_regs[regno] = 1;
9850 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
9851 /* then honor it. Likewise with g3 and g4. */
9852 if (fixed_regs[2] == 2)
9853 fixed_regs[2] = ! TARGET_APP_REGS;
9854 if (fixed_regs[3] == 2)
9855 fixed_regs[3] = ! TARGET_APP_REGS;
9856 if (TARGET_ARCH32 && fixed_regs[4] == 2)
9857 fixed_regs[4] = ! TARGET_APP_REGS;
9858 else if (TARGET_CM_EMBMEDANY)
9860 else if (fixed_regs[4] == 2)
9864 /* Implement TARGET_PREFERRED_RELOAD_CLASS
9866 - We can't load constants into FP registers.
9867 - We can't load FP constants into integer registers when soft-float,
9868 because there is no soft-float pattern with a r/F constraint.
9869 - We can't load FP constants into integer registers for TFmode unless
9870 it is 0.0L, because there is no movtf pattern with a r/F constraint.
9871 - Try and reload integer constants (symbolic or otherwise) back into
9872 registers directly, rather than having them dumped to memory. */
9875 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
9879 if (FP_REG_CLASS_P (rclass)
9880 || rclass == GENERAL_OR_FP_REGS
9881 || rclass == GENERAL_OR_EXTRA_FP_REGS
9882 || (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT && ! TARGET_FPU)
9883 || (GET_MODE (x) == TFmode && ! const_zero_operand (x, TFmode)))
9886 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
9887 return GENERAL_REGS;
9893 #include "gt-sparc.h"