1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
38 #include "insn-attr.h"
49 #include "target-def.h"
50 #include "cfglayout.h"
51 #include "tree-gimple.h"
52 #include "langhooks.h"
58 struct processor_costs cypress_costs = {
59 COSTS_N_INSNS (2), /* int load */
60 COSTS_N_INSNS (2), /* int signed load */
61 COSTS_N_INSNS (2), /* int zeroed load */
62 COSTS_N_INSNS (2), /* float load */
63 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
64 COSTS_N_INSNS (5), /* fadd, fsub */
65 COSTS_N_INSNS (1), /* fcmp */
66 COSTS_N_INSNS (1), /* fmov, fmovr */
67 COSTS_N_INSNS (7), /* fmul */
68 COSTS_N_INSNS (37), /* fdivs */
69 COSTS_N_INSNS (37), /* fdivd */
70 COSTS_N_INSNS (63), /* fsqrts */
71 COSTS_N_INSNS (63), /* fsqrtd */
72 COSTS_N_INSNS (1), /* imul */
73 COSTS_N_INSNS (1), /* imulX */
74 0, /* imul bit factor */
75 COSTS_N_INSNS (1), /* idiv */
76 COSTS_N_INSNS (1), /* idivX */
77 COSTS_N_INSNS (1), /* movcc/movr */
78 0, /* shift penalty */
82 struct processor_costs supersparc_costs = {
83 COSTS_N_INSNS (1), /* int load */
84 COSTS_N_INSNS (1), /* int signed load */
85 COSTS_N_INSNS (1), /* int zeroed load */
86 COSTS_N_INSNS (0), /* float load */
87 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
88 COSTS_N_INSNS (3), /* fadd, fsub */
89 COSTS_N_INSNS (3), /* fcmp */
90 COSTS_N_INSNS (1), /* fmov, fmovr */
91 COSTS_N_INSNS (3), /* fmul */
92 COSTS_N_INSNS (6), /* fdivs */
93 COSTS_N_INSNS (9), /* fdivd */
94 COSTS_N_INSNS (12), /* fsqrts */
95 COSTS_N_INSNS (12), /* fsqrtd */
96 COSTS_N_INSNS (4), /* imul */
97 COSTS_N_INSNS (4), /* imulX */
98 0, /* imul bit factor */
99 COSTS_N_INSNS (4), /* idiv */
100 COSTS_N_INSNS (4), /* idivX */
101 COSTS_N_INSNS (1), /* movcc/movr */
102 1, /* shift penalty */
106 struct processor_costs hypersparc_costs = {
107 COSTS_N_INSNS (1), /* int load */
108 COSTS_N_INSNS (1), /* int signed load */
109 COSTS_N_INSNS (1), /* int zeroed load */
110 COSTS_N_INSNS (1), /* float load */
111 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
112 COSTS_N_INSNS (1), /* fadd, fsub */
113 COSTS_N_INSNS (1), /* fcmp */
114 COSTS_N_INSNS (1), /* fmov, fmovr */
115 COSTS_N_INSNS (1), /* fmul */
116 COSTS_N_INSNS (8), /* fdivs */
117 COSTS_N_INSNS (12), /* fdivd */
118 COSTS_N_INSNS (17), /* fsqrts */
119 COSTS_N_INSNS (17), /* fsqrtd */
120 COSTS_N_INSNS (17), /* imul */
121 COSTS_N_INSNS (17), /* imulX */
122 0, /* imul bit factor */
123 COSTS_N_INSNS (17), /* idiv */
124 COSTS_N_INSNS (17), /* idivX */
125 COSTS_N_INSNS (1), /* movcc/movr */
126 0, /* shift penalty */
130 struct processor_costs sparclet_costs = {
131 COSTS_N_INSNS (3), /* int load */
132 COSTS_N_INSNS (3), /* int signed load */
133 COSTS_N_INSNS (1), /* int zeroed load */
134 COSTS_N_INSNS (1), /* float load */
135 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
136 COSTS_N_INSNS (1), /* fadd, fsub */
137 COSTS_N_INSNS (1), /* fcmp */
138 COSTS_N_INSNS (1), /* fmov, fmovr */
139 COSTS_N_INSNS (1), /* fmul */
140 COSTS_N_INSNS (1), /* fdivs */
141 COSTS_N_INSNS (1), /* fdivd */
142 COSTS_N_INSNS (1), /* fsqrts */
143 COSTS_N_INSNS (1), /* fsqrtd */
144 COSTS_N_INSNS (5), /* imul */
145 COSTS_N_INSNS (5), /* imulX */
146 0, /* imul bit factor */
147 COSTS_N_INSNS (5), /* idiv */
148 COSTS_N_INSNS (5), /* idivX */
149 COSTS_N_INSNS (1), /* movcc/movr */
150 0, /* shift penalty */
154 struct processor_costs ultrasparc_costs = {
155 COSTS_N_INSNS (2), /* int load */
156 COSTS_N_INSNS (3), /* int signed load */
157 COSTS_N_INSNS (2), /* int zeroed load */
158 COSTS_N_INSNS (2), /* float load */
159 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
160 COSTS_N_INSNS (4), /* fadd, fsub */
161 COSTS_N_INSNS (1), /* fcmp */
162 COSTS_N_INSNS (2), /* fmov, fmovr */
163 COSTS_N_INSNS (4), /* fmul */
164 COSTS_N_INSNS (13), /* fdivs */
165 COSTS_N_INSNS (23), /* fdivd */
166 COSTS_N_INSNS (13), /* fsqrts */
167 COSTS_N_INSNS (23), /* fsqrtd */
168 COSTS_N_INSNS (4), /* imul */
169 COSTS_N_INSNS (4), /* imulX */
170 2, /* imul bit factor */
171 COSTS_N_INSNS (37), /* idiv */
172 COSTS_N_INSNS (68), /* idivX */
173 COSTS_N_INSNS (2), /* movcc/movr */
174 2, /* shift penalty */
178 struct processor_costs ultrasparc3_costs = {
179 COSTS_N_INSNS (2), /* int load */
180 COSTS_N_INSNS (3), /* int signed load */
181 COSTS_N_INSNS (3), /* int zeroed load */
182 COSTS_N_INSNS (2), /* float load */
183 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
184 COSTS_N_INSNS (4), /* fadd, fsub */
185 COSTS_N_INSNS (5), /* fcmp */
186 COSTS_N_INSNS (3), /* fmov, fmovr */
187 COSTS_N_INSNS (4), /* fmul */
188 COSTS_N_INSNS (17), /* fdivs */
189 COSTS_N_INSNS (20), /* fdivd */
190 COSTS_N_INSNS (20), /* fsqrts */
191 COSTS_N_INSNS (29), /* fsqrtd */
192 COSTS_N_INSNS (6), /* imul */
193 COSTS_N_INSNS (6), /* imulX */
194 0, /* imul bit factor */
195 COSTS_N_INSNS (40), /* idiv */
196 COSTS_N_INSNS (71), /* idivX */
197 COSTS_N_INSNS (2), /* movcc/movr */
198 0, /* shift penalty */
202 struct processor_costs niagara_costs = {
203 COSTS_N_INSNS (3), /* int load */
204 COSTS_N_INSNS (3), /* int signed load */
205 COSTS_N_INSNS (3), /* int zeroed load */
206 COSTS_N_INSNS (9), /* float load */
207 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
208 COSTS_N_INSNS (8), /* fadd, fsub */
209 COSTS_N_INSNS (26), /* fcmp */
210 COSTS_N_INSNS (8), /* fmov, fmovr */
211 COSTS_N_INSNS (29), /* fmul */
212 COSTS_N_INSNS (54), /* fdivs */
213 COSTS_N_INSNS (83), /* fdivd */
214 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
215 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
216 COSTS_N_INSNS (11), /* imul */
217 COSTS_N_INSNS (11), /* imulX */
218 0, /* imul bit factor */
219 COSTS_N_INSNS (72), /* idiv */
220 COSTS_N_INSNS (72), /* idivX */
221 COSTS_N_INSNS (1), /* movcc/movr */
222 0, /* shift penalty */
226 struct processor_costs niagara2_costs = {
227 COSTS_N_INSNS (3), /* int load */
228 COSTS_N_INSNS (3), /* int signed load */
229 COSTS_N_INSNS (3), /* int zeroed load */
230 COSTS_N_INSNS (3), /* float load */
231 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
232 COSTS_N_INSNS (6), /* fadd, fsub */
233 COSTS_N_INSNS (6), /* fcmp */
234 COSTS_N_INSNS (6), /* fmov, fmovr */
235 COSTS_N_INSNS (6), /* fmul */
236 COSTS_N_INSNS (19), /* fdivs */
237 COSTS_N_INSNS (33), /* fdivd */
238 COSTS_N_INSNS (19), /* fsqrts */
239 COSTS_N_INSNS (33), /* fsqrtd */
240 COSTS_N_INSNS (5), /* imul */
241 COSTS_N_INSNS (5), /* imulX */
242 0, /* imul bit factor */
243 COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
244 COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
245 COSTS_N_INSNS (1), /* movcc/movr */
246 0, /* shift penalty */
249 const struct processor_costs *sparc_costs = &cypress_costs;
251 #ifdef HAVE_AS_RELAX_OPTION
252 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
253 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
254 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
255 somebody does not branch between the sethi and jmp. */
256 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
258 #define LEAF_SIBCALL_SLOT_RESERVED_P \
259 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
262 /* Global variables for machine-dependent things. */
264 /* Size of frame. Need to know this to emit return insns from leaf procedures.
265 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
266 reload pass. This is important as the value is later used for scheduling
267 (to see what can go in a delay slot).
268 APPARENT_FSIZE is the size of the stack less the register save area and less
269 the outgoing argument area. It is used when saving call preserved regs. */
270 static HOST_WIDE_INT apparent_fsize;
271 static HOST_WIDE_INT actual_fsize;
273 /* Number of live general or floating point registers needed to be
274 saved (as 4-byte quantities). */
275 static int num_gfregs;
277 /* The alias set for prologue/epilogue register save/restore. */
278 static GTY(()) alias_set_type sparc_sr_alias_set;
280 /* The alias set for the structure return value. */
281 static GTY(()) alias_set_type struct_value_alias_set;
283 /* Save the operands last given to a compare for use when we
284 generate a scc or bcc insn. */
285 rtx sparc_compare_op0, sparc_compare_op1, sparc_compare_emitted;
287 /* Vector to say how input registers are mapped to output registers.
288 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
289 eliminate it. You must use -fomit-frame-pointer to get that. */
290 char leaf_reg_remap[] =
291 { 0, 1, 2, 3, 4, 5, 6, 7,
292 -1, -1, -1, -1, -1, -1, 14, -1,
293 -1, -1, -1, -1, -1, -1, -1, -1,
294 8, 9, 10, 11, 12, 13, -1, 15,
296 32, 33, 34, 35, 36, 37, 38, 39,
297 40, 41, 42, 43, 44, 45, 46, 47,
298 48, 49, 50, 51, 52, 53, 54, 55,
299 56, 57, 58, 59, 60, 61, 62, 63,
300 64, 65, 66, 67, 68, 69, 70, 71,
301 72, 73, 74, 75, 76, 77, 78, 79,
302 80, 81, 82, 83, 84, 85, 86, 87,
303 88, 89, 90, 91, 92, 93, 94, 95,
304 96, 97, 98, 99, 100};
306 /* Vector, indexed by hard register number, which contains 1
307 for a register that is allowable in a candidate for leaf
308 function treatment. */
309 char sparc_leaf_regs[] =
310 { 1, 1, 1, 1, 1, 1, 1, 1,
311 0, 0, 0, 0, 0, 0, 1, 0,
312 0, 0, 0, 0, 0, 0, 0, 0,
313 1, 1, 1, 1, 1, 1, 0, 1,
314 1, 1, 1, 1, 1, 1, 1, 1,
315 1, 1, 1, 1, 1, 1, 1, 1,
316 1, 1, 1, 1, 1, 1, 1, 1,
317 1, 1, 1, 1, 1, 1, 1, 1,
318 1, 1, 1, 1, 1, 1, 1, 1,
319 1, 1, 1, 1, 1, 1, 1, 1,
320 1, 1, 1, 1, 1, 1, 1, 1,
321 1, 1, 1, 1, 1, 1, 1, 1,
324 struct machine_function GTY(())
326 /* Some local-dynamic TLS symbol name. */
327 const char *some_ld_name;
329 /* True if the current function is leaf and uses only leaf regs,
330 so that the SPARC leaf function optimization can be applied.
331 Private version of current_function_uses_only_leaf_regs, see
332 sparc_expand_prologue for the rationale. */
335 /* True if the data calculated by sparc_expand_prologue are valid. */
336 bool prologue_data_valid_p;
339 #define sparc_leaf_function_p cfun->machine->leaf_function_p
340 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
342 /* Register we pretend to think the frame pointer is allocated to.
343 Normally, this is %fp, but if we are in a leaf procedure, this
344 is %sp+"something". We record "something" separately as it may
345 be too big for reg+constant addressing. */
346 static rtx frame_base_reg;
347 static HOST_WIDE_INT frame_base_offset;
349 /* 1 if the next opcode is to be specially indented. */
350 int sparc_indent_opcode = 0;
352 static bool sparc_handle_option (size_t, const char *, int);
353 static void sparc_init_modes (void);
354 static void scan_record_type (tree, int *, int *, int *);
355 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
356 tree, int, int, int *, int *);
358 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
359 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
361 static void sparc_output_addr_vec (rtx);
362 static void sparc_output_addr_diff_vec (rtx);
363 static void sparc_output_deferred_case_vectors (void);
364 static rtx sparc_builtin_saveregs (void);
365 static int epilogue_renumber (rtx *, int);
366 static bool sparc_assemble_integer (rtx, unsigned int, int);
367 static int set_extends (rtx);
368 static void emit_pic_helper (void);
369 static void load_pic_register (bool);
370 static int save_or_restore_regs (int, int, rtx, int, int);
371 static void emit_save_or_restore_regs (int);
372 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
373 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
374 #ifdef OBJECT_FORMAT_ELF
375 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
378 static int sparc_adjust_cost (rtx, rtx, rtx, int);
379 static int sparc_issue_rate (void);
380 static void sparc_sched_init (FILE *, int, int);
381 static int sparc_use_sched_lookahead (void);
383 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
384 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
385 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
386 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
387 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
389 static bool sparc_function_ok_for_sibcall (tree, tree);
390 static void sparc_init_libfuncs (void);
391 static void sparc_init_builtins (void);
392 static void sparc_vis_init_builtins (void);
393 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
394 static tree sparc_fold_builtin (tree, tree, bool);
395 static int sparc_vis_mul8x16 (int, int);
396 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
397 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
398 HOST_WIDE_INT, tree);
399 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
400 HOST_WIDE_INT, const_tree);
401 static struct machine_function * sparc_init_machine_status (void);
402 static bool sparc_cannot_force_const_mem (rtx);
403 static rtx sparc_tls_get_addr (void);
404 static rtx sparc_tls_got (void);
405 static const char *get_some_local_dynamic_name (void);
406 static int get_some_local_dynamic_name_1 (rtx *, void *);
407 static bool sparc_rtx_costs (rtx, int, int, int *);
408 static bool sparc_promote_prototypes (const_tree);
409 static rtx sparc_struct_value_rtx (tree, int);
410 static bool sparc_return_in_memory (const_tree, const_tree);
411 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
412 static void sparc_va_start (tree, rtx);
413 static tree sparc_gimplify_va_arg (tree, tree, tree *, tree *);
414 static bool sparc_vector_mode_supported_p (enum machine_mode);
415 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
416 enum machine_mode, const_tree, bool);
417 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
418 enum machine_mode, tree, bool);
419 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
420 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
421 static void sparc_file_end (void);
422 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
423 static const char *sparc_mangle_type (const_tree);
425 #ifdef SUBTARGET_ATTRIBUTE_TABLE
426 const struct attribute_spec sparc_attribute_table[];
429 /* Option handling. */
432 enum cmodel sparc_cmodel;
434 char sparc_hard_reg_printed[8];
436 struct sparc_cpu_select sparc_select[] =
438 /* switch name, tune arch */
439 { (char *)0, "default", 1, 1 },
440 { (char *)0, "-mcpu=", 1, 1 },
441 { (char *)0, "-mtune=", 1, 0 },
445 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
446 enum processor_type sparc_cpu;
448 /* Whether
\fan FPU option was specified. */
449 static bool fpu_option_set = false;
451 /* Initialize the GCC target structure. */
453 /* The sparc default is to use .half rather than .short for aligned
454 HI objects. Use .word instead of .long on non-ELF systems. */
455 #undef TARGET_ASM_ALIGNED_HI_OP
456 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
457 #ifndef OBJECT_FORMAT_ELF
458 #undef TARGET_ASM_ALIGNED_SI_OP
459 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
462 #undef TARGET_ASM_UNALIGNED_HI_OP
463 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
464 #undef TARGET_ASM_UNALIGNED_SI_OP
465 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
466 #undef TARGET_ASM_UNALIGNED_DI_OP
467 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
469 /* The target hook has to handle DI-mode values. */
470 #undef TARGET_ASM_INTEGER
471 #define TARGET_ASM_INTEGER sparc_assemble_integer
473 #undef TARGET_ASM_FUNCTION_PROLOGUE
474 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
475 #undef TARGET_ASM_FUNCTION_EPILOGUE
476 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
478 #undef TARGET_SCHED_ADJUST_COST
479 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
480 #undef TARGET_SCHED_ISSUE_RATE
481 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
482 #undef TARGET_SCHED_INIT
483 #define TARGET_SCHED_INIT sparc_sched_init
484 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
485 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
487 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
488 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
490 #undef TARGET_INIT_LIBFUNCS
491 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
492 #undef TARGET_INIT_BUILTINS
493 #define TARGET_INIT_BUILTINS sparc_init_builtins
495 #undef TARGET_EXPAND_BUILTIN
496 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
497 #undef TARGET_FOLD_BUILTIN
498 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
501 #undef TARGET_HAVE_TLS
502 #define TARGET_HAVE_TLS true
505 #undef TARGET_CANNOT_FORCE_CONST_MEM
506 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
508 #undef TARGET_ASM_OUTPUT_MI_THUNK
509 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
510 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
511 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
513 #undef TARGET_RTX_COSTS
514 #define TARGET_RTX_COSTS sparc_rtx_costs
515 #undef TARGET_ADDRESS_COST
516 #define TARGET_ADDRESS_COST hook_int_rtx_0
518 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
519 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
520 test for this value. */
521 #undef TARGET_PROMOTE_FUNCTION_ARGS
522 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
524 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
525 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
526 test for this value. */
527 #undef TARGET_PROMOTE_FUNCTION_RETURN
528 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
530 #undef TARGET_PROMOTE_PROTOTYPES
531 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
533 #undef TARGET_STRUCT_VALUE_RTX
534 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
535 #undef TARGET_RETURN_IN_MEMORY
536 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
537 #undef TARGET_MUST_PASS_IN_STACK
538 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
539 #undef TARGET_PASS_BY_REFERENCE
540 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
541 #undef TARGET_ARG_PARTIAL_BYTES
542 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
544 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
545 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
546 #undef TARGET_STRICT_ARGUMENT_NAMING
547 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
549 #undef TARGET_EXPAND_BUILTIN_VA_START
550 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
551 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
552 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
554 #undef TARGET_VECTOR_MODE_SUPPORTED_P
555 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
557 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
558 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
560 #ifdef SUBTARGET_INSERT_ATTRIBUTES
561 #undef TARGET_INSERT_ATTRIBUTES
562 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
565 #ifdef SUBTARGET_ATTRIBUTE_TABLE
566 #undef TARGET_ATTRIBUTE_TABLE
567 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
570 #undef TARGET_RELAXED_ORDERING
571 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
573 #undef TARGET_DEFAULT_TARGET_FLAGS
574 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
575 #undef TARGET_HANDLE_OPTION
576 #define TARGET_HANDLE_OPTION sparc_handle_option
579 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
580 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
583 #undef TARGET_ASM_FILE_END
584 #define TARGET_ASM_FILE_END sparc_file_end
586 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
587 #undef TARGET_MANGLE_TYPE
588 #define TARGET_MANGLE_TYPE sparc_mangle_type
591 struct gcc_target targetm = TARGET_INITIALIZER;
593 /* Implement TARGET_HANDLE_OPTION. */
596 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
601 case OPT_mhard_float:
602 case OPT_msoft_float:
603 fpu_option_set = true;
607 sparc_select[1].string = arg;
611 sparc_select[2].string = arg;
618 /* Validate and override various options, and do some machine dependent
622 sparc_override_options (void)
624 static struct code_model {
625 const char *const name;
627 } const cmodels[] = {
629 { "medlow", CM_MEDLOW },
630 { "medmid", CM_MEDMID },
631 { "medany", CM_MEDANY },
632 { "embmedany", CM_EMBMEDANY },
635 const struct code_model *cmodel;
636 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
637 static struct cpu_default {
639 const char *const name;
640 } const cpu_default[] = {
641 /* There must be one entry here for each TARGET_CPU value. */
642 { TARGET_CPU_sparc, "cypress" },
643 { TARGET_CPU_sparclet, "tsc701" },
644 { TARGET_CPU_sparclite, "f930" },
645 { TARGET_CPU_v8, "v8" },
646 { TARGET_CPU_hypersparc, "hypersparc" },
647 { TARGET_CPU_sparclite86x, "sparclite86x" },
648 { TARGET_CPU_supersparc, "supersparc" },
649 { TARGET_CPU_v9, "v9" },
650 { TARGET_CPU_ultrasparc, "ultrasparc" },
651 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
652 { TARGET_CPU_niagara, "niagara" },
653 { TARGET_CPU_niagara2, "niagara2" },
656 const struct cpu_default *def;
657 /* Table of values for -m{cpu,tune}=. */
658 static struct cpu_table {
659 const char *const name;
660 const enum processor_type processor;
663 } const cpu_table[] = {
664 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
665 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
666 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
667 /* TI TMS390Z55 supersparc */
668 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
669 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
670 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
671 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
672 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
673 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
674 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
675 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
677 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
679 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
680 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
681 /* TI ultrasparc I, II, IIi */
682 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
683 /* Although insns using %y are deprecated, it is a clear win on current
685 |MASK_DEPRECATED_V8_INSNS},
686 /* TI ultrasparc III */
687 /* ??? Check if %y issue still holds true in ultra3. */
688 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
690 { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
691 { "niagara2", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9},
694 const struct cpu_table *cpu;
695 const struct sparc_cpu_select *sel;
698 #ifndef SPARC_BI_ARCH
699 /* Check for unsupported architecture size. */
700 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
701 error ("%s is not supported by this configuration",
702 DEFAULT_ARCH32_P ? "-m64" : "-m32");
705 /* We force all 64bit archs to use 128 bit long double */
706 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
708 error ("-mlong-double-64 not allowed with -m64");
709 target_flags |= MASK_LONG_DOUBLE_128;
712 /* Code model selection. */
713 sparc_cmodel = SPARC_DEFAULT_CMODEL;
717 sparc_cmodel = CM_32;
720 if (sparc_cmodel_string != NULL)
724 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
725 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
727 if (cmodel->name == NULL)
728 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
730 sparc_cmodel = cmodel->value;
733 error ("-mcmodel= is not supported on 32 bit systems");
736 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
738 /* Set the default CPU. */
739 for (def = &cpu_default[0]; def->name; ++def)
740 if (def->cpu == TARGET_CPU_DEFAULT)
742 gcc_assert (def->name);
743 sparc_select[0].string = def->name;
745 for (sel = &sparc_select[0]; sel->name; ++sel)
749 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
750 if (! strcmp (sel->string, cpu->name))
753 sparc_cpu = cpu->processor;
757 target_flags &= ~cpu->disable;
758 target_flags |= cpu->enable;
764 error ("bad value (%s) for %s switch", sel->string, sel->name);
768 /* If -mfpu or -mno-fpu was explicitly used, don't override with
769 the processor default. */
771 target_flags = (target_flags & ~MASK_FPU) | fpu;
773 /* Don't allow -mvis if FPU is disabled. */
775 target_flags &= ~MASK_VIS;
777 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
779 -m64 also implies v9. */
780 if (TARGET_VIS || TARGET_ARCH64)
782 target_flags |= MASK_V9;
783 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
786 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
787 if (TARGET_V9 && TARGET_ARCH32)
788 target_flags |= MASK_DEPRECATED_V8_INSNS;
790 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
791 if (! TARGET_V9 || TARGET_ARCH64)
792 target_flags &= ~MASK_V8PLUS;
794 /* Don't use stack biasing in 32 bit mode. */
796 target_flags &= ~MASK_STACK_BIAS;
798 /* Supply a default value for align_functions. */
799 if (align_functions == 0
800 && (sparc_cpu == PROCESSOR_ULTRASPARC
801 || sparc_cpu == PROCESSOR_ULTRASPARC3
802 || sparc_cpu == PROCESSOR_NIAGARA
803 || sparc_cpu == PROCESSOR_NIAGARA2))
804 align_functions = 32;
806 /* Validate PCC_STRUCT_RETURN. */
807 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
808 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
810 /* Only use .uaxword when compiling for a 64-bit target. */
812 targetm.asm_out.unaligned_op.di = NULL;
814 /* Do various machine dependent initializations. */
817 /* Acquire unique alias sets for our private stuff. */
818 sparc_sr_alias_set = new_alias_set ();
819 struct_value_alias_set = new_alias_set ();
821 /* Set up function hooks. */
822 init_machine_status = sparc_init_machine_status;
827 case PROCESSOR_CYPRESS:
828 sparc_costs = &cypress_costs;
831 case PROCESSOR_SPARCLITE:
832 case PROCESSOR_SUPERSPARC:
833 sparc_costs = &supersparc_costs;
837 case PROCESSOR_HYPERSPARC:
838 case PROCESSOR_SPARCLITE86X:
839 sparc_costs = &hypersparc_costs;
841 case PROCESSOR_SPARCLET:
842 case PROCESSOR_TSC701:
843 sparc_costs = &sparclet_costs;
846 case PROCESSOR_ULTRASPARC:
847 sparc_costs = &ultrasparc_costs;
849 case PROCESSOR_ULTRASPARC3:
850 sparc_costs = &ultrasparc3_costs;
852 case PROCESSOR_NIAGARA:
853 sparc_costs = &niagara_costs;
855 case PROCESSOR_NIAGARA2:
856 sparc_costs = &niagara2_costs;
860 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
861 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
862 target_flags |= MASK_LONG_DOUBLE_128;
865 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
866 set_param_value ("simultaneous-prefetches",
867 ((sparc_cpu == PROCESSOR_ULTRASPARC
868 || sparc_cpu == PROCESSOR_NIAGARA
869 || sparc_cpu == PROCESSOR_NIAGARA2)
871 : (sparc_cpu == PROCESSOR_ULTRASPARC3
873 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
874 set_param_value ("l1-cache-line-size",
875 ((sparc_cpu == PROCESSOR_ULTRASPARC
876 || sparc_cpu == PROCESSOR_ULTRASPARC3
877 || sparc_cpu == PROCESSOR_NIAGARA
878 || sparc_cpu == PROCESSOR_NIAGARA2)
882 #ifdef SUBTARGET_ATTRIBUTE_TABLE
883 /* Table of valid machine attributes. */
884 const struct attribute_spec sparc_attribute_table[] =
886 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
887 SUBTARGET_ATTRIBUTE_TABLE,
888 { NULL, 0, 0, false, false, false, NULL }
892 /* Miscellaneous utilities. */
894 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
895 or branch on register contents instructions. */
898 v9_regcmp_p (enum rtx_code code)
900 return (code == EQ || code == NE || code == GE || code == LT
901 || code == LE || code == GT);
904 /* Nonzero if OP is a floating point constant which can
905 be loaded into an integer register using a single
906 sethi instruction. */
911 if (GET_CODE (op) == CONST_DOUBLE)
916 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
917 REAL_VALUE_TO_TARGET_SINGLE (r, i);
918 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
924 /* Nonzero if OP is a floating point constant which can
925 be loaded into an integer register using a single
931 if (GET_CODE (op) == CONST_DOUBLE)
936 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
937 REAL_VALUE_TO_TARGET_SINGLE (r, i);
938 return SPARC_SIMM13_P (i);
944 /* Nonzero if OP is a floating point constant which can
945 be loaded into an integer register using a high/losum
946 instruction sequence. */
949 fp_high_losum_p (rtx op)
951 /* The constraints calling this should only be in
952 SFmode move insns, so any constant which cannot
953 be moved using a single insn will do. */
954 if (GET_CODE (op) == CONST_DOUBLE)
959 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
960 REAL_VALUE_TO_TARGET_SINGLE (r, i);
961 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
967 /* Expand a move instruction. Return true if all work is done. */
970 sparc_expand_move (enum machine_mode mode, rtx *operands)
972 /* Handle sets of MEM first. */
973 if (GET_CODE (operands[0]) == MEM)
975 /* 0 is a register (or a pair of registers) on SPARC. */
976 if (register_or_zero_operand (operands[1], mode))
979 if (!reload_in_progress)
981 operands[0] = validize_mem (operands[0]);
982 operands[1] = force_reg (mode, operands[1]);
986 /* Fixup TLS cases. */
988 && CONSTANT_P (operands[1])
989 && GET_CODE (operands[1]) != HIGH
990 && sparc_tls_referenced_p (operands [1]))
992 rtx sym = operands[1];
995 if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == PLUS)
997 addend = XEXP (XEXP (sym, 0), 1);
998 sym = XEXP (XEXP (sym, 0), 0);
1001 gcc_assert (SPARC_SYMBOL_REF_TLS_P (sym));
1003 sym = legitimize_tls_address (sym);
1006 sym = gen_rtx_PLUS (mode, sym, addend);
1007 sym = force_operand (sym, operands[0]);
1012 /* Fixup PIC cases. */
1013 if (flag_pic && CONSTANT_P (operands[1]))
1015 if (pic_address_needs_scratch (operands[1]))
1016 operands[1] = legitimize_pic_address (operands[1], mode, 0);
1018 /* VxWorks does not impose a fixed gap between segments; the run-time
1019 gap can be different from the object-file gap. We therefore can't
1020 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1021 are absolutely sure that X is in the same segment as the GOT.
1022 Unfortunately, the flexibility of linker scripts means that we
1023 can't be sure of that in general, so assume that _G_O_T_-relative
1024 accesses are never valid on VxWorks. */
1025 if (GET_CODE (operands[1]) == LABEL_REF && !TARGET_VXWORKS_RTP)
1029 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1035 gcc_assert (TARGET_ARCH64);
1036 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1041 if (symbolic_operand (operands[1], mode))
1043 operands[1] = legitimize_pic_address (operands[1],
1045 (reload_in_progress ?
1052 /* If we are trying to toss an integer constant into FP registers,
1053 or loading a FP or vector constant, force it into memory. */
1054 if (CONSTANT_P (operands[1])
1055 && REG_P (operands[0])
1056 && (SPARC_FP_REG_P (REGNO (operands[0]))
1057 || SCALAR_FLOAT_MODE_P (mode)
1058 || VECTOR_MODE_P (mode)))
1060 /* emit_group_store will send such bogosity to us when it is
1061 not storing directly into memory. So fix this up to avoid
1062 crashes in output_constant_pool. */
1063 if (operands [1] == const0_rtx)
1064 operands[1] = CONST0_RTX (mode);
1066 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1067 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1068 && const_zero_operand (operands[1], mode))
1071 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1072 /* We are able to build any SF constant in integer registers
1073 with at most 2 instructions. */
1075 /* And any DF constant in integer registers. */
1077 && (reload_completed || reload_in_progress))))
1080 operands[1] = force_const_mem (mode, operands[1]);
1081 if (!reload_in_progress)
1082 operands[1] = validize_mem (operands[1]);
1086 /* Accept non-constants and valid constants unmodified. */
1087 if (!CONSTANT_P (operands[1])
1088 || GET_CODE (operands[1]) == HIGH
1089 || input_operand (operands[1], mode))
1095 /* All QImode constants require only one insn, so proceed. */
1100 sparc_emit_set_const32 (operands[0], operands[1]);
1104 /* input_operand should have filtered out 32-bit mode. */
1105 sparc_emit_set_const64 (operands[0], operands[1]);
1115 /* Load OP1, a 32-bit constant, into OP0, a register.
1116 We know it can't be done in one insn when we get
1117 here, the move expander guarantees this. */
1120 sparc_emit_set_const32 (rtx op0, rtx op1)
1122 enum machine_mode mode = GET_MODE (op0);
1125 if (reload_in_progress || reload_completed)
1128 temp = gen_reg_rtx (mode);
1130 if (GET_CODE (op1) == CONST_INT)
1132 gcc_assert (!small_int_operand (op1, mode)
1133 && !const_high_operand (op1, mode));
1135 /* Emit them as real moves instead of a HIGH/LO_SUM,
1136 this way CSE can see everything and reuse intermediate
1137 values if it wants. */
1138 emit_insn (gen_rtx_SET (VOIDmode, temp,
1139 GEN_INT (INTVAL (op1)
1140 & ~(HOST_WIDE_INT)0x3ff)));
1142 emit_insn (gen_rtx_SET (VOIDmode,
1144 gen_rtx_IOR (mode, temp,
1145 GEN_INT (INTVAL (op1) & 0x3ff))));
1149 /* A symbol, emit in the traditional way. */
1150 emit_insn (gen_rtx_SET (VOIDmode, temp,
1151 gen_rtx_HIGH (mode, op1)));
1152 emit_insn (gen_rtx_SET (VOIDmode,
1153 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1157 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1158 If TEMP is nonzero, we are forbidden to use any other scratch
1159 registers. Otherwise, we are allowed to generate them as needed.
1161 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1162 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1165 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1167 rtx temp1, temp2, temp3, temp4, temp5;
1170 if (temp && GET_MODE (temp) == TImode)
1173 temp = gen_rtx_REG (DImode, REGNO (temp));
1176 /* SPARC-V9 code-model support. */
1177 switch (sparc_cmodel)
1180 /* The range spanned by all instructions in the object is less
1181 than 2^31 bytes (2GB) and the distance from any instruction
1182 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1183 than 2^31 bytes (2GB).
1185 The executable must be in the low 4TB of the virtual address
1188 sethi %hi(symbol), %temp1
1189 or %temp1, %lo(symbol), %reg */
1191 temp1 = temp; /* op0 is allowed. */
1193 temp1 = gen_reg_rtx (DImode);
1195 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1196 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1200 /* The range spanned by all instructions in the object is less
1201 than 2^31 bytes (2GB) and the distance from any instruction
1202 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1203 than 2^31 bytes (2GB).
1205 The executable must be in the low 16TB of the virtual address
1208 sethi %h44(symbol), %temp1
1209 or %temp1, %m44(symbol), %temp2
1210 sllx %temp2, 12, %temp3
1211 or %temp3, %l44(symbol), %reg */
1216 temp3 = temp; /* op0 is allowed. */
1220 temp1 = gen_reg_rtx (DImode);
1221 temp2 = gen_reg_rtx (DImode);
1222 temp3 = gen_reg_rtx (DImode);
1225 emit_insn (gen_seth44 (temp1, op1));
1226 emit_insn (gen_setm44 (temp2, temp1, op1));
1227 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1228 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1229 emit_insn (gen_setl44 (op0, temp3, op1));
1233 /* The range spanned by all instructions in the object is less
1234 than 2^31 bytes (2GB) and the distance from any instruction
1235 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1236 than 2^31 bytes (2GB).
1238 The executable can be placed anywhere in the virtual address
1241 sethi %hh(symbol), %temp1
1242 sethi %lm(symbol), %temp2
1243 or %temp1, %hm(symbol), %temp3
1244 sllx %temp3, 32, %temp4
1245 or %temp4, %temp2, %temp5
1246 or %temp5, %lo(symbol), %reg */
1249 /* It is possible that one of the registers we got for operands[2]
1250 might coincide with that of operands[0] (which is why we made
1251 it TImode). Pick the other one to use as our scratch. */
1252 if (rtx_equal_p (temp, op0))
1254 gcc_assert (ti_temp);
1255 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1258 temp2 = temp; /* op0 is _not_ allowed, see above. */
1265 temp1 = gen_reg_rtx (DImode);
1266 temp2 = gen_reg_rtx (DImode);
1267 temp3 = gen_reg_rtx (DImode);
1268 temp4 = gen_reg_rtx (DImode);
1269 temp5 = gen_reg_rtx (DImode);
1272 emit_insn (gen_sethh (temp1, op1));
1273 emit_insn (gen_setlm (temp2, op1));
1274 emit_insn (gen_sethm (temp3, temp1, op1));
1275 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1276 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1277 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1278 gen_rtx_PLUS (DImode, temp4, temp2)));
1279 emit_insn (gen_setlo (op0, temp5, op1));
1283 /* Old old old backwards compatibility kruft here.
1284 Essentially it is MEDLOW with a fixed 64-bit
1285 virtual base added to all data segment addresses.
1286 Text-segment stuff is computed like MEDANY, we can't
1287 reuse the code above because the relocation knobs
1290 Data segment: sethi %hi(symbol), %temp1
1291 add %temp1, EMBMEDANY_BASE_REG, %temp2
1292 or %temp2, %lo(symbol), %reg */
1293 if (data_segment_operand (op1, GET_MODE (op1)))
1297 temp1 = temp; /* op0 is allowed. */
1302 temp1 = gen_reg_rtx (DImode);
1303 temp2 = gen_reg_rtx (DImode);
1306 emit_insn (gen_embmedany_sethi (temp1, op1));
1307 emit_insn (gen_embmedany_brsum (temp2, temp1));
1308 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1311 /* Text segment: sethi %uhi(symbol), %temp1
1312 sethi %hi(symbol), %temp2
1313 or %temp1, %ulo(symbol), %temp3
1314 sllx %temp3, 32, %temp4
1315 or %temp4, %temp2, %temp5
1316 or %temp5, %lo(symbol), %reg */
1321 /* It is possible that one of the registers we got for operands[2]
1322 might coincide with that of operands[0] (which is why we made
1323 it TImode). Pick the other one to use as our scratch. */
1324 if (rtx_equal_p (temp, op0))
1326 gcc_assert (ti_temp);
1327 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1330 temp2 = temp; /* op0 is _not_ allowed, see above. */
1337 temp1 = gen_reg_rtx (DImode);
1338 temp2 = gen_reg_rtx (DImode);
1339 temp3 = gen_reg_rtx (DImode);
1340 temp4 = gen_reg_rtx (DImode);
1341 temp5 = gen_reg_rtx (DImode);
1344 emit_insn (gen_embmedany_textuhi (temp1, op1));
1345 emit_insn (gen_embmedany_texthi (temp2, op1));
1346 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1347 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1348 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1349 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1350 gen_rtx_PLUS (DImode, temp4, temp2)));
1351 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1360 #if HOST_BITS_PER_WIDE_INT == 32
1362 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1367 /* These avoid problems when cross compiling. If we do not
1368 go through all this hair then the optimizer will see
1369 invalid REG_EQUAL notes or in some cases none at all. */
1370 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1371 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1372 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1373 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1375 /* The optimizer is not to assume anything about exactly
1376 which bits are set for a HIGH, they are unspecified.
1377 Unfortunately this leads to many missed optimizations
1378 during CSE. We mask out the non-HIGH bits, and matches
1379 a plain movdi, to alleviate this problem. */
1381 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1383 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1387 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1389 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1393 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1395 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1399 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1401 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1404 /* Worker routines for 64-bit constant formation on arch64.
1405 One of the key things to be doing in these emissions is
1406 to create as many temp REGs as possible. This makes it
1407 possible for half-built constants to be used later when
1408 such values are similar to something required later on.
1409 Without doing this, the optimizer cannot see such
1412 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1413 unsigned HOST_WIDE_INT, int);
1416 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1417 unsigned HOST_WIDE_INT low_bits, int is_neg)
1419 unsigned HOST_WIDE_INT high_bits;
1422 high_bits = (~low_bits) & 0xffffffff;
1424 high_bits = low_bits;
1426 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1429 emit_insn (gen_rtx_SET (VOIDmode, op0,
1430 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1434 /* If we are XOR'ing with -1, then we should emit a one's complement
1435 instead. This way the combiner will notice logical operations
1436 such as ANDN later on and substitute. */
1437 if ((low_bits & 0x3ff) == 0x3ff)
1439 emit_insn (gen_rtx_SET (VOIDmode, op0,
1440 gen_rtx_NOT (DImode, temp)));
1444 emit_insn (gen_rtx_SET (VOIDmode, op0,
1445 gen_safe_XOR64 (temp,
1446 (-(HOST_WIDE_INT)0x400
1447 | (low_bits & 0x3ff)))));
1452 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1453 unsigned HOST_WIDE_INT, int);
1456 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1457 unsigned HOST_WIDE_INT high_bits,
1458 unsigned HOST_WIDE_INT low_immediate,
1463 if ((high_bits & 0xfffffc00) != 0)
1465 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1466 if ((high_bits & ~0xfffffc00) != 0)
1467 emit_insn (gen_rtx_SET (VOIDmode, op0,
1468 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1474 emit_insn (gen_safe_SET64 (temp, high_bits));
1478 /* Now shift it up into place. */
1479 emit_insn (gen_rtx_SET (VOIDmode, op0,
1480 gen_rtx_ASHIFT (DImode, temp2,
1481 GEN_INT (shift_count))));
1483 /* If there is a low immediate part piece, finish up by
1484 putting that in as well. */
1485 if (low_immediate != 0)
1486 emit_insn (gen_rtx_SET (VOIDmode, op0,
1487 gen_safe_OR64 (op0, low_immediate)));
1490 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1491 unsigned HOST_WIDE_INT);
1493 /* Full 64-bit constant decomposition. Even though this is the
1494 'worst' case, we still optimize a few things away. */
1496 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1497 unsigned HOST_WIDE_INT high_bits,
1498 unsigned HOST_WIDE_INT low_bits)
1502 if (reload_in_progress || reload_completed)
1505 sub_temp = gen_reg_rtx (DImode);
1507 if ((high_bits & 0xfffffc00) != 0)
1509 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1510 if ((high_bits & ~0xfffffc00) != 0)
1511 emit_insn (gen_rtx_SET (VOIDmode,
1513 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1519 emit_insn (gen_safe_SET64 (temp, high_bits));
1523 if (!reload_in_progress && !reload_completed)
1525 rtx temp2 = gen_reg_rtx (DImode);
1526 rtx temp3 = gen_reg_rtx (DImode);
1527 rtx temp4 = gen_reg_rtx (DImode);
1529 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1530 gen_rtx_ASHIFT (DImode, sub_temp,
1533 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1534 if ((low_bits & ~0xfffffc00) != 0)
1536 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1537 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1538 emit_insn (gen_rtx_SET (VOIDmode, op0,
1539 gen_rtx_PLUS (DImode, temp4, temp3)));
1543 emit_insn (gen_rtx_SET (VOIDmode, op0,
1544 gen_rtx_PLUS (DImode, temp4, temp2)));
1549 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1550 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1551 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1554 /* We are in the middle of reload, so this is really
1555 painful. However we do still make an attempt to
1556 avoid emitting truly stupid code. */
1557 if (low1 != const0_rtx)
1559 emit_insn (gen_rtx_SET (VOIDmode, op0,
1560 gen_rtx_ASHIFT (DImode, sub_temp,
1561 GEN_INT (to_shift))));
1562 emit_insn (gen_rtx_SET (VOIDmode, op0,
1563 gen_rtx_IOR (DImode, op0, low1)));
1571 if (low2 != const0_rtx)
1573 emit_insn (gen_rtx_SET (VOIDmode, op0,
1574 gen_rtx_ASHIFT (DImode, sub_temp,
1575 GEN_INT (to_shift))));
1576 emit_insn (gen_rtx_SET (VOIDmode, op0,
1577 gen_rtx_IOR (DImode, op0, low2)));
1585 emit_insn (gen_rtx_SET (VOIDmode, op0,
1586 gen_rtx_ASHIFT (DImode, sub_temp,
1587 GEN_INT (to_shift))));
1588 if (low3 != const0_rtx)
1589 emit_insn (gen_rtx_SET (VOIDmode, op0,
1590 gen_rtx_IOR (DImode, op0, low3)));
1595 /* Analyze a 64-bit constant for certain properties. */
1596 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1597 unsigned HOST_WIDE_INT,
1598 int *, int *, int *);
1601 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1602 unsigned HOST_WIDE_INT low_bits,
1603 int *hbsp, int *lbsp, int *abbasp)
1605 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1608 lowest_bit_set = highest_bit_set = -1;
1612 if ((lowest_bit_set == -1)
1613 && ((low_bits >> i) & 1))
1615 if ((highest_bit_set == -1)
1616 && ((high_bits >> (32 - i - 1)) & 1))
1617 highest_bit_set = (64 - i - 1);
1620 && ((highest_bit_set == -1)
1621 || (lowest_bit_set == -1)));
1627 if ((lowest_bit_set == -1)
1628 && ((high_bits >> i) & 1))
1629 lowest_bit_set = i + 32;
1630 if ((highest_bit_set == -1)
1631 && ((low_bits >> (32 - i - 1)) & 1))
1632 highest_bit_set = 32 - i - 1;
1635 && ((highest_bit_set == -1)
1636 || (lowest_bit_set == -1)));
1638 /* If there are no bits set this should have gone out
1639 as one instruction! */
1640 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1641 all_bits_between_are_set = 1;
1642 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1646 if ((low_bits & (1 << i)) != 0)
1651 if ((high_bits & (1 << (i - 32))) != 0)
1654 all_bits_between_are_set = 0;
1657 *hbsp = highest_bit_set;
1658 *lbsp = lowest_bit_set;
1659 *abbasp = all_bits_between_are_set;
1662 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1665 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1666 unsigned HOST_WIDE_INT low_bits)
1668 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1671 || high_bits == 0xffffffff)
1674 analyze_64bit_constant (high_bits, low_bits,
1675 &highest_bit_set, &lowest_bit_set,
1676 &all_bits_between_are_set);
1678 if ((highest_bit_set == 63
1679 || lowest_bit_set == 0)
1680 && all_bits_between_are_set != 0)
1683 if ((highest_bit_set - lowest_bit_set) < 21)
1689 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1690 unsigned HOST_WIDE_INT,
1693 static unsigned HOST_WIDE_INT
1694 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1695 unsigned HOST_WIDE_INT low_bits,
1696 int lowest_bit_set, int shift)
1698 HOST_WIDE_INT hi, lo;
1700 if (lowest_bit_set < 32)
1702 lo = (low_bits >> lowest_bit_set) << shift;
1703 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1708 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1710 gcc_assert (! (hi & lo));
1714 /* Here we are sure to be arch64 and this is an integer constant
1715 being loaded into a register. Emit the most efficient
1716 insn sequence possible. Detection of all the 1-insn cases
1717 has been done already. */
1719 sparc_emit_set_const64 (rtx op0, rtx op1)
1721 unsigned HOST_WIDE_INT high_bits, low_bits;
1722 int lowest_bit_set, highest_bit_set;
1723 int all_bits_between_are_set;
1726 /* Sanity check that we know what we are working with. */
1727 gcc_assert (TARGET_ARCH64
1728 && (GET_CODE (op0) == SUBREG
1729 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1731 if (reload_in_progress || reload_completed)
1734 if (GET_CODE (op1) != CONST_INT)
1736 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1741 temp = gen_reg_rtx (DImode);
1743 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1744 low_bits = (INTVAL (op1) & 0xffffffff);
1746 /* low_bits bits 0 --> 31
1747 high_bits bits 32 --> 63 */
1749 analyze_64bit_constant (high_bits, low_bits,
1750 &highest_bit_set, &lowest_bit_set,
1751 &all_bits_between_are_set);
1753 /* First try for a 2-insn sequence. */
1755 /* These situations are preferred because the optimizer can
1756 * do more things with them:
1758 * sllx %reg, shift, %reg
1760 * srlx %reg, shift, %reg
1761 * 3) mov some_small_const, %reg
1762 * sllx %reg, shift, %reg
1764 if (((highest_bit_set == 63
1765 || lowest_bit_set == 0)
1766 && all_bits_between_are_set != 0)
1767 || ((highest_bit_set - lowest_bit_set) < 12))
1769 HOST_WIDE_INT the_const = -1;
1770 int shift = lowest_bit_set;
1772 if ((highest_bit_set != 63
1773 && lowest_bit_set != 0)
1774 || all_bits_between_are_set == 0)
1777 create_simple_focus_bits (high_bits, low_bits,
1780 else if (lowest_bit_set == 0)
1781 shift = -(63 - highest_bit_set);
1783 gcc_assert (SPARC_SIMM13_P (the_const));
1784 gcc_assert (shift != 0);
1786 emit_insn (gen_safe_SET64 (temp, the_const));
1788 emit_insn (gen_rtx_SET (VOIDmode,
1790 gen_rtx_ASHIFT (DImode,
1794 emit_insn (gen_rtx_SET (VOIDmode,
1796 gen_rtx_LSHIFTRT (DImode,
1798 GEN_INT (-shift))));
1802 /* Now a range of 22 or less bits set somewhere.
1803 * 1) sethi %hi(focus_bits), %reg
1804 * sllx %reg, shift, %reg
1805 * 2) sethi %hi(focus_bits), %reg
1806 * srlx %reg, shift, %reg
1808 if ((highest_bit_set - lowest_bit_set) < 21)
1810 unsigned HOST_WIDE_INT focus_bits =
1811 create_simple_focus_bits (high_bits, low_bits,
1812 lowest_bit_set, 10);
1814 gcc_assert (SPARC_SETHI_P (focus_bits));
1815 gcc_assert (lowest_bit_set != 10);
1817 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1819 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1820 if (lowest_bit_set < 10)
1821 emit_insn (gen_rtx_SET (VOIDmode,
1823 gen_rtx_LSHIFTRT (DImode, temp,
1824 GEN_INT (10 - lowest_bit_set))));
1825 else if (lowest_bit_set > 10)
1826 emit_insn (gen_rtx_SET (VOIDmode,
1828 gen_rtx_ASHIFT (DImode, temp,
1829 GEN_INT (lowest_bit_set - 10))));
1833 /* 1) sethi %hi(low_bits), %reg
1834 * or %reg, %lo(low_bits), %reg
1835 * 2) sethi %hi(~low_bits), %reg
1836 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1839 || high_bits == 0xffffffff)
1841 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1842 (high_bits == 0xffffffff));
1846 /* Now, try 3-insn sequences. */
1848 /* 1) sethi %hi(high_bits), %reg
1849 * or %reg, %lo(high_bits), %reg
1850 * sllx %reg, 32, %reg
1854 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1858 /* We may be able to do something quick
1859 when the constant is negated, so try that. */
1860 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1861 (~low_bits) & 0xfffffc00))
1863 /* NOTE: The trailing bits get XOR'd so we need the
1864 non-negated bits, not the negated ones. */
1865 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1867 if ((((~high_bits) & 0xffffffff) == 0
1868 && ((~low_bits) & 0x80000000) == 0)
1869 || (((~high_bits) & 0xffffffff) == 0xffffffff
1870 && ((~low_bits) & 0x80000000) != 0))
1872 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1874 if ((SPARC_SETHI_P (fast_int)
1875 && (~high_bits & 0xffffffff) == 0)
1876 || SPARC_SIMM13_P (fast_int))
1877 emit_insn (gen_safe_SET64 (temp, fast_int));
1879 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1884 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1885 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1886 sparc_emit_set_const64 (temp, negated_const);
1889 /* If we are XOR'ing with -1, then we should emit a one's complement
1890 instead. This way the combiner will notice logical operations
1891 such as ANDN later on and substitute. */
1892 if (trailing_bits == 0x3ff)
1894 emit_insn (gen_rtx_SET (VOIDmode, op0,
1895 gen_rtx_NOT (DImode, temp)));
1899 emit_insn (gen_rtx_SET (VOIDmode,
1901 gen_safe_XOR64 (temp,
1902 (-0x400 | trailing_bits))));
1907 /* 1) sethi %hi(xxx), %reg
1908 * or %reg, %lo(xxx), %reg
1909 * sllx %reg, yyy, %reg
1911 * ??? This is just a generalized version of the low_bits==0
1912 * thing above, FIXME...
1914 if ((highest_bit_set - lowest_bit_set) < 32)
1916 unsigned HOST_WIDE_INT focus_bits =
1917 create_simple_focus_bits (high_bits, low_bits,
1920 /* We can't get here in this state. */
1921 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1923 /* So what we know is that the set bits straddle the
1924 middle of the 64-bit word. */
1925 sparc_emit_set_const64_quick2 (op0, temp,
1931 /* 1) sethi %hi(high_bits), %reg
1932 * or %reg, %lo(high_bits), %reg
1933 * sllx %reg, 32, %reg
1934 * or %reg, low_bits, %reg
1936 if (SPARC_SIMM13_P(low_bits)
1937 && ((int)low_bits > 0))
1939 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1943 /* The easiest way when all else fails, is full decomposition. */
1945 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1946 high_bits, low_bits, ~high_bits, ~low_bits);
1948 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1950 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1952 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1953 return the mode to be used for the comparison. For floating-point,
1954 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1955 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1956 processing is needed. */
1959 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1961 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1987 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1988 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1990 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1991 return CCX_NOOVmode;
1997 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2004 /* X and Y are two things to compare using CODE. Emit the compare insn and
2005 return the rtx for the cc reg in the proper mode. */
2008 gen_compare_reg (enum rtx_code code)
2010 rtx x = sparc_compare_op0;
2011 rtx y = sparc_compare_op1;
2012 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
2015 if (sparc_compare_emitted != NULL_RTX)
2017 cc_reg = sparc_compare_emitted;
2018 sparc_compare_emitted = NULL_RTX;
2022 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2023 fcc regs (cse can't tell they're really call clobbered regs and will
2024 remove a duplicate comparison even if there is an intervening function
2025 call - it will then try to reload the cc reg via an int reg which is why
2026 we need the movcc patterns). It is possible to provide the movcc
2027 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2028 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2029 to tell cse that CCFPE mode registers (even pseudos) are call
2032 /* ??? This is an experiment. Rather than making changes to cse which may
2033 or may not be easy/clean, we do our own cse. This is possible because
2034 we will generate hard registers. Cse knows they're call clobbered (it
2035 doesn't know the same thing about pseudos). If we guess wrong, no big
2036 deal, but if we win, great! */
2038 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2039 #if 1 /* experiment */
2042 /* We cycle through the registers to ensure they're all exercised. */
2043 static int next_fcc_reg = 0;
2044 /* Previous x,y for each fcc reg. */
2045 static rtx prev_args[4][2];
2047 /* Scan prev_args for x,y. */
2048 for (reg = 0; reg < 4; reg++)
2049 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2054 prev_args[reg][0] = x;
2055 prev_args[reg][1] = y;
2056 next_fcc_reg = (next_fcc_reg + 1) & 3;
2058 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2061 cc_reg = gen_reg_rtx (mode);
2062 #endif /* ! experiment */
2063 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2064 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2066 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2068 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
2069 gen_rtx_COMPARE (mode, x, y)));
2074 /* This function is used for v9 only.
2075 CODE is the code for an Scc's comparison.
2076 OPERANDS[0] is the target of the Scc insn.
2077 OPERANDS[1] is the value we compare against const0_rtx (which hasn't
2078 been generated yet).
2080 This function is needed to turn
2083 (gt (reg:CCX 100 %icc)
2087 (gt:DI (reg:CCX 100 %icc)
2090 IE: The instruction recognizer needs to see the mode of the comparison to
2091 find the right instruction. We could use "gt:DI" right in the
2092 define_expand, but leaving it out allows us to handle DI, SI, etc.
2094 We refer to the global sparc compare operands sparc_compare_op0 and
2095 sparc_compare_op1. */
2098 gen_v9_scc (enum rtx_code compare_code, register rtx *operands)
2101 && (GET_MODE (sparc_compare_op0) == DImode
2102 || GET_MODE (operands[0]) == DImode))
2105 /* Try to use the movrCC insns. */
2107 && GET_MODE_CLASS (GET_MODE (sparc_compare_op0)) == MODE_INT
2108 && sparc_compare_op1 == const0_rtx
2109 && v9_regcmp_p (compare_code))
2111 rtx op0 = sparc_compare_op0;
2114 /* Special case for op0 != 0. This can be done with one instruction if
2115 operands[0] == sparc_compare_op0. */
2117 if (compare_code == NE
2118 && GET_MODE (operands[0]) == DImode
2119 && rtx_equal_p (op0, operands[0]))
2121 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2122 gen_rtx_IF_THEN_ELSE (DImode,
2123 gen_rtx_fmt_ee (compare_code, DImode,
2130 if (reg_overlap_mentioned_p (operands[0], op0))
2132 /* Handle the case where operands[0] == sparc_compare_op0.
2133 We "early clobber" the result. */
2134 op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
2135 emit_move_insn (op0, sparc_compare_op0);
2138 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2139 if (GET_MODE (op0) != DImode)
2141 temp = gen_reg_rtx (DImode);
2142 convert_move (temp, op0, 0);
2146 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2147 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2148 gen_rtx_fmt_ee (compare_code, DImode,
2156 operands[1] = gen_compare_reg (compare_code);
2158 switch (GET_MODE (operands[1]))
2168 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2169 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2170 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2171 gen_rtx_fmt_ee (compare_code,
2172 GET_MODE (operands[1]),
2173 operands[1], const0_rtx),
2174 const1_rtx, operands[0])));
2179 /* Emit a conditional jump insn for the v9 architecture using comparison code
2180 CODE and jump target LABEL.
2181 This function exists to take advantage of the v9 brxx insns. */
2184 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2186 gcc_assert (sparc_compare_emitted == NULL_RTX);
2187 emit_jump_insn (gen_rtx_SET (VOIDmode,
2189 gen_rtx_IF_THEN_ELSE (VOIDmode,
2190 gen_rtx_fmt_ee (code, GET_MODE (op0),
2192 gen_rtx_LABEL_REF (VOIDmode, label),
2196 /* Generate a DFmode part of a hard TFmode register.
2197 REG is the TFmode hard register, LOW is 1 for the
2198 low 64bit of the register and 0 otherwise.
2201 gen_df_reg (rtx reg, int low)
2203 int regno = REGNO (reg);
2205 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2206 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2207 return gen_rtx_REG (DFmode, regno);
2210 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2211 Unlike normal calls, TFmode operands are passed by reference. It is
2212 assumed that no more than 3 operands are required. */
2215 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2217 rtx ret_slot = NULL, arg[3], func_sym;
2220 /* We only expect to be called for conversions, unary, and binary ops. */
2221 gcc_assert (nargs == 2 || nargs == 3);
2223 for (i = 0; i < nargs; ++i)
2225 rtx this_arg = operands[i];
2228 /* TFmode arguments and return values are passed by reference. */
2229 if (GET_MODE (this_arg) == TFmode)
2231 int force_stack_temp;
2233 force_stack_temp = 0;
2234 if (TARGET_BUGGY_QP_LIB && i == 0)
2235 force_stack_temp = 1;
2237 if (GET_CODE (this_arg) == MEM
2238 && ! force_stack_temp)
2239 this_arg = XEXP (this_arg, 0);
2240 else if (CONSTANT_P (this_arg)
2241 && ! force_stack_temp)
2243 this_slot = force_const_mem (TFmode, this_arg);
2244 this_arg = XEXP (this_slot, 0);
2248 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2250 /* Operand 0 is the return value. We'll copy it out later. */
2252 emit_move_insn (this_slot, this_arg);
2254 ret_slot = this_slot;
2256 this_arg = XEXP (this_slot, 0);
2263 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2265 if (GET_MODE (operands[0]) == TFmode)
2268 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2269 arg[0], GET_MODE (arg[0]),
2270 arg[1], GET_MODE (arg[1]));
2272 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2273 arg[0], GET_MODE (arg[0]),
2274 arg[1], GET_MODE (arg[1]),
2275 arg[2], GET_MODE (arg[2]));
2278 emit_move_insn (operands[0], ret_slot);
2284 gcc_assert (nargs == 2);
2286 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2287 GET_MODE (operands[0]), 1,
2288 arg[1], GET_MODE (arg[1]));
2290 if (ret != operands[0])
2291 emit_move_insn (operands[0], ret);
2295 /* Expand soft-float TFmode calls to sparc abi routines. */
2298 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2320 emit_soft_tfmode_libcall (func, 3, operands);
2324 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2328 gcc_assert (code == SQRT);
2331 emit_soft_tfmode_libcall (func, 2, operands);
2335 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2342 switch (GET_MODE (operands[1]))
2355 case FLOAT_TRUNCATE:
2356 switch (GET_MODE (operands[0]))
2370 switch (GET_MODE (operands[1]))
2383 case UNSIGNED_FLOAT:
2384 switch (GET_MODE (operands[1]))
2398 switch (GET_MODE (operands[0]))
2412 switch (GET_MODE (operands[0]))
2429 emit_soft_tfmode_libcall (func, 2, operands);
2432 /* Expand a hard-float tfmode operation. All arguments must be in
2436 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2440 if (GET_RTX_CLASS (code) == RTX_UNARY)
2442 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2443 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2447 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2448 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2449 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2450 operands[1], operands[2]);
2453 if (register_operand (operands[0], VOIDmode))
2456 dest = gen_reg_rtx (GET_MODE (operands[0]));
2458 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2460 if (dest != operands[0])
2461 emit_move_insn (operands[0], dest);
2465 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2467 if (TARGET_HARD_QUAD)
2468 emit_hard_tfmode_operation (code, operands);
2470 emit_soft_tfmode_binop (code, operands);
2474 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2476 if (TARGET_HARD_QUAD)
2477 emit_hard_tfmode_operation (code, operands);
2479 emit_soft_tfmode_unop (code, operands);
2483 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2485 if (TARGET_HARD_QUAD)
2486 emit_hard_tfmode_operation (code, operands);
2488 emit_soft_tfmode_cvt (code, operands);
2491 /* Return nonzero if a branch/jump/call instruction will be emitting
2492 nop into its delay slot. */
2495 empty_delay_slot (rtx insn)
2499 /* If no previous instruction (should not happen), return true. */
2500 if (PREV_INSN (insn) == NULL)
2503 seq = NEXT_INSN (PREV_INSN (insn));
2504 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2510 /* Return nonzero if TRIAL can go into the call delay slot. */
2513 tls_call_delay (rtx trial)
2518 call __tls_get_addr, %tgd_call (foo)
2519 add %l7, %o0, %o0, %tgd_add (foo)
2520 while Sun as/ld does not. */
2521 if (TARGET_GNU_TLS || !TARGET_TLS)
2524 pat = PATTERN (trial);
2526 /* We must reject tgd_add{32|64}, i.e.
2527 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2528 and tldm_add{32|64}, i.e.
2529 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2531 if (GET_CODE (pat) == SET
2532 && GET_CODE (SET_SRC (pat)) == PLUS)
2534 rtx unspec = XEXP (SET_SRC (pat), 1);
2536 if (GET_CODE (unspec) == UNSPEC
2537 && (XINT (unspec, 1) == UNSPEC_TLSGD
2538 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2545 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2546 instruction. RETURN_P is true if the v9 variant 'return' is to be
2547 considered in the test too.
2549 TRIAL must be a SET whose destination is a REG appropriate for the
2550 'restore' instruction or, if RETURN_P is true, for the 'return'
2554 eligible_for_restore_insn (rtx trial, bool return_p)
2556 rtx pat = PATTERN (trial);
2557 rtx src = SET_SRC (pat);
2559 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2560 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2561 && arith_operand (src, GET_MODE (src)))
2564 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2566 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2569 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2570 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2571 && arith_double_operand (src, GET_MODE (src)))
2572 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2574 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2575 else if (! TARGET_FPU && register_operand (src, SFmode))
2578 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2579 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2582 /* If we have the 'return' instruction, anything that does not use
2583 local or output registers and can go into a delay slot wins. */
2584 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2585 && (get_attr_in_uncond_branch_delay (trial)
2586 == IN_UNCOND_BRANCH_DELAY_TRUE))
2589 /* The 'restore src1,src2,dest' pattern for SImode. */
2590 else if (GET_CODE (src) == PLUS
2591 && register_operand (XEXP (src, 0), SImode)
2592 && arith_operand (XEXP (src, 1), SImode))
2595 /* The 'restore src1,src2,dest' pattern for DImode. */
2596 else if (GET_CODE (src) == PLUS
2597 && register_operand (XEXP (src, 0), DImode)
2598 && arith_double_operand (XEXP (src, 1), DImode))
2601 /* The 'restore src1,%lo(src2),dest' pattern. */
2602 else if (GET_CODE (src) == LO_SUM
2603 && ! TARGET_CM_MEDMID
2604 && ((register_operand (XEXP (src, 0), SImode)
2605 && immediate_operand (XEXP (src, 1), SImode))
2607 && register_operand (XEXP (src, 0), DImode)
2608 && immediate_operand (XEXP (src, 1), DImode))))
2611 /* The 'restore src,src,dest' pattern. */
2612 else if (GET_CODE (src) == ASHIFT
2613 && (register_operand (XEXP (src, 0), SImode)
2614 || register_operand (XEXP (src, 0), DImode))
2615 && XEXP (src, 1) == const1_rtx)
2621 /* Return nonzero if TRIAL can go into the function return's
2625 eligible_for_return_delay (rtx trial)
2629 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2632 if (get_attr_length (trial) != 1)
2635 /* If there are any call-saved registers, we should scan TRIAL if it
2636 does not reference them. For now just make it easy. */
2640 /* If the function uses __builtin_eh_return, the eh_return machinery
2641 occupies the delay slot. */
2642 if (current_function_calls_eh_return)
2645 /* In the case of a true leaf function, anything can go into the slot. */
2646 if (sparc_leaf_function_p)
2647 return get_attr_in_uncond_branch_delay (trial)
2648 == IN_UNCOND_BRANCH_DELAY_TRUE;
2650 pat = PATTERN (trial);
2652 /* Otherwise, only operations which can be done in tandem with
2653 a `restore' or `return' insn can go into the delay slot. */
2654 if (GET_CODE (SET_DEST (pat)) != REG
2655 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2658 /* If this instruction sets up floating point register and we have a return
2659 instruction, it can probably go in. But restore will not work
2661 if (REGNO (SET_DEST (pat)) >= 32)
2663 && ! epilogue_renumber (&pat, 1)
2664 && (get_attr_in_uncond_branch_delay (trial)
2665 == IN_UNCOND_BRANCH_DELAY_TRUE));
2667 return eligible_for_restore_insn (trial, true);
2670 /* Return nonzero if TRIAL can go into the sibling call's
2674 eligible_for_sibcall_delay (rtx trial)
2678 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2681 if (get_attr_length (trial) != 1)
2684 pat = PATTERN (trial);
2686 if (sparc_leaf_function_p)
2688 /* If the tail call is done using the call instruction,
2689 we have to restore %o7 in the delay slot. */
2690 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2693 /* %g1 is used to build the function address */
2694 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2700 /* Otherwise, only operations which can be done in tandem with
2701 a `restore' insn can go into the delay slot. */
2702 if (GET_CODE (SET_DEST (pat)) != REG
2703 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2704 || REGNO (SET_DEST (pat)) >= 32)
2707 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2709 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2712 return eligible_for_restore_insn (trial, false);
2716 short_branch (int uid1, int uid2)
2718 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2720 /* Leave a few words of "slop". */
2721 if (delta >= -1023 && delta <= 1022)
2727 /* Return nonzero if REG is not used after INSN.
2728 We assume REG is a reload reg, and therefore does
2729 not live past labels or calls or jumps. */
2731 reg_unused_after (rtx reg, rtx insn)
2733 enum rtx_code code, prev_code = UNKNOWN;
2735 while ((insn = NEXT_INSN (insn)))
2737 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2740 code = GET_CODE (insn);
2741 if (GET_CODE (insn) == CODE_LABEL)
2746 rtx set = single_set (insn);
2747 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2750 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2752 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2760 /* Determine if it's legal to put X into the constant pool. This
2761 is not possible if X contains the address of a symbol that is
2762 not constant (TLS) or not known at final link time (PIC). */
2765 sparc_cannot_force_const_mem (rtx x)
2767 switch (GET_CODE (x))
2772 /* Accept all non-symbolic constants. */
2776 /* Labels are OK iff we are non-PIC. */
2777 return flag_pic != 0;
2780 /* 'Naked' TLS symbol references are never OK,
2781 non-TLS symbols are OK iff we are non-PIC. */
2782 if (SYMBOL_REF_TLS_MODEL (x))
2785 return flag_pic != 0;
2788 return sparc_cannot_force_const_mem (XEXP (x, 0));
2791 return sparc_cannot_force_const_mem (XEXP (x, 0))
2792 || sparc_cannot_force_const_mem (XEXP (x, 1));
2801 static GTY(()) char pic_helper_symbol_name[256];
2802 static GTY(()) rtx pic_helper_symbol;
2803 static GTY(()) bool pic_helper_emitted_p = false;
2804 static GTY(()) rtx global_offset_table;
2806 /* Ensure that we are not using patterns that are not OK with PIC. */
2814 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2815 && (GET_CODE (recog_data.operand[i]) != CONST
2816 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2817 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2818 == global_offset_table)
2819 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2827 /* Return true if X is an address which needs a temporary register when
2828 reloaded while generating PIC code. */
2831 pic_address_needs_scratch (rtx x)
2833 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2834 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2835 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2836 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2837 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2843 /* Determine if a given RTX is a valid constant. We already know this
2844 satisfies CONSTANT_P. */
2847 legitimate_constant_p (rtx x)
2851 switch (GET_CODE (x))
2854 /* TLS symbols are not constant. */
2855 if (SYMBOL_REF_TLS_MODEL (x))
2860 inner = XEXP (x, 0);
2862 /* Offsets of TLS symbols are never valid.
2863 Discourage CSE from creating them. */
2864 if (GET_CODE (inner) == PLUS
2865 && SPARC_SYMBOL_REF_TLS_P (XEXP (inner, 0)))
2870 if (GET_MODE (x) == VOIDmode)
2873 /* Floating point constants are generally not ok.
2874 The only exception is 0.0 in VIS. */
2876 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
2877 && const_zero_operand (x, GET_MODE (x)))
2883 /* Vector constants are generally not ok.
2884 The only exception is 0 in VIS. */
2886 && const_zero_operand (x, GET_MODE (x)))
2898 /* Determine if a given RTX is a valid constant address. */
2901 constant_address_p (rtx x)
2903 switch (GET_CODE (x))
2911 if (flag_pic && pic_address_needs_scratch (x))
2913 return legitimate_constant_p (x);
2916 return !flag_pic && legitimate_constant_p (x);
2923 /* Nonzero if the constant value X is a legitimate general operand
2924 when generating PIC code. It is given that flag_pic is on and
2925 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
2928 legitimate_pic_operand_p (rtx x)
2930 if (pic_address_needs_scratch (x))
2932 if (SPARC_SYMBOL_REF_TLS_P (x)
2933 || (GET_CODE (x) == CONST
2934 && GET_CODE (XEXP (x, 0)) == PLUS
2935 && SPARC_SYMBOL_REF_TLS_P (XEXP (XEXP (x, 0), 0))))
2940 /* Return nonzero if ADDR is a valid memory address.
2941 STRICT specifies whether strict register checking applies. */
2944 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
2946 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
2948 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
2950 else if (GET_CODE (addr) == PLUS)
2952 rs1 = XEXP (addr, 0);
2953 rs2 = XEXP (addr, 1);
2955 /* Canonicalize. REG comes first, if there are no regs,
2956 LO_SUM comes first. */
2958 && GET_CODE (rs1) != SUBREG
2960 || GET_CODE (rs2) == SUBREG
2961 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
2963 rs1 = XEXP (addr, 1);
2964 rs2 = XEXP (addr, 0);
2968 && rs1 == pic_offset_table_rtx
2970 && GET_CODE (rs2) != SUBREG
2971 && GET_CODE (rs2) != LO_SUM
2972 && GET_CODE (rs2) != MEM
2973 && ! SPARC_SYMBOL_REF_TLS_P (rs2)
2974 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
2975 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
2977 || GET_CODE (rs1) == SUBREG)
2978 && RTX_OK_FOR_OFFSET_P (rs2)))
2983 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
2984 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
2986 /* We prohibit REG + REG for TFmode when there are no quad move insns
2987 and we consequently need to split. We do this because REG+REG
2988 is not an offsettable address. If we get the situation in reload
2989 where source and destination of a movtf pattern are both MEMs with
2990 REG+REG address, then only one of them gets converted to an
2991 offsettable address. */
2993 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
2996 /* We prohibit REG + REG on ARCH32 if not optimizing for
2997 DFmode/DImode because then mem_min_alignment is likely to be zero
2998 after reload and the forced split would lack a matching splitter
3000 if (TARGET_ARCH32 && !optimize
3001 && (mode == DFmode || mode == DImode))
3004 else if (USE_AS_OFFSETABLE_LO10
3005 && GET_CODE (rs1) == LO_SUM
3007 && ! TARGET_CM_MEDMID
3008 && RTX_OK_FOR_OLO10_P (rs2))
3011 imm1 = XEXP (rs1, 1);
3012 rs1 = XEXP (rs1, 0);
3013 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3017 else if (GET_CODE (addr) == LO_SUM)
3019 rs1 = XEXP (addr, 0);
3020 imm1 = XEXP (addr, 1);
3022 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3025 /* We can't allow TFmode in 32-bit mode, because an offset greater
3026 than the alignment (8) may cause the LO_SUM to overflow. */
3027 if (mode == TFmode && TARGET_ARCH32)
3030 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3035 if (GET_CODE (rs1) == SUBREG)
3036 rs1 = SUBREG_REG (rs1);
3042 if (GET_CODE (rs2) == SUBREG)
3043 rs2 = SUBREG_REG (rs2);
3050 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3051 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3056 if ((REGNO (rs1) >= 32
3057 && REGNO (rs1) != FRAME_POINTER_REGNUM
3058 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3060 && (REGNO (rs2) >= 32
3061 && REGNO (rs2) != FRAME_POINTER_REGNUM
3062 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3068 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3070 static GTY(()) rtx sparc_tls_symbol;
3073 sparc_tls_get_addr (void)
3075 if (!sparc_tls_symbol)
3076 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3078 return sparc_tls_symbol;
3082 sparc_tls_got (void)
3087 current_function_uses_pic_offset_table = 1;
3088 return pic_offset_table_rtx;
3091 if (!global_offset_table)
3092 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3093 temp = gen_reg_rtx (Pmode);
3094 emit_move_insn (temp, global_offset_table);
3098 /* Return 1 if *X is a thread-local symbol. */
3101 sparc_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
3103 return SPARC_SYMBOL_REF_TLS_P (*x);
3106 /* Return 1 if X contains a thread-local symbol. */
3109 sparc_tls_referenced_p (rtx x)
3111 if (!TARGET_HAVE_TLS)
3114 return for_each_rtx (&x, &sparc_tls_symbol_ref_1, 0);
3117 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3118 this (thread-local) address. */
3121 legitimize_tls_address (rtx addr)
3123 rtx temp1, temp2, temp3, ret, o0, got, insn;
3125 gcc_assert (can_create_pseudo_p ());
3127 if (GET_CODE (addr) == SYMBOL_REF)
3128 switch (SYMBOL_REF_TLS_MODEL (addr))
3130 case TLS_MODEL_GLOBAL_DYNAMIC:
3132 temp1 = gen_reg_rtx (SImode);
3133 temp2 = gen_reg_rtx (SImode);
3134 ret = gen_reg_rtx (Pmode);
3135 o0 = gen_rtx_REG (Pmode, 8);
3136 got = sparc_tls_got ();
3137 emit_insn (gen_tgd_hi22 (temp1, addr));
3138 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3141 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3142 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3147 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3148 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3151 CALL_INSN_FUNCTION_USAGE (insn)
3152 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3153 CALL_INSN_FUNCTION_USAGE (insn));
3154 insn = get_insns ();
3156 emit_libcall_block (insn, ret, o0, addr);
3159 case TLS_MODEL_LOCAL_DYNAMIC:
3161 temp1 = gen_reg_rtx (SImode);
3162 temp2 = gen_reg_rtx (SImode);
3163 temp3 = gen_reg_rtx (Pmode);
3164 ret = gen_reg_rtx (Pmode);
3165 o0 = gen_rtx_REG (Pmode, 8);
3166 got = sparc_tls_got ();
3167 emit_insn (gen_tldm_hi22 (temp1));
3168 emit_insn (gen_tldm_lo10 (temp2, temp1));
3171 emit_insn (gen_tldm_add32 (o0, got, temp2));
3172 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3177 emit_insn (gen_tldm_add64 (o0, got, temp2));
3178 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3181 CALL_INSN_FUNCTION_USAGE (insn)
3182 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3183 CALL_INSN_FUNCTION_USAGE (insn));
3184 insn = get_insns ();
3186 emit_libcall_block (insn, temp3, o0,
3187 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3188 UNSPEC_TLSLD_BASE));
3189 temp1 = gen_reg_rtx (SImode);
3190 temp2 = gen_reg_rtx (SImode);
3191 emit_insn (gen_tldo_hix22 (temp1, addr));
3192 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3194 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3196 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3199 case TLS_MODEL_INITIAL_EXEC:
3200 temp1 = gen_reg_rtx (SImode);
3201 temp2 = gen_reg_rtx (SImode);
3202 temp3 = gen_reg_rtx (Pmode);
3203 got = sparc_tls_got ();
3204 emit_insn (gen_tie_hi22 (temp1, addr));
3205 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3207 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3209 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3212 ret = gen_reg_rtx (Pmode);
3214 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3217 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3221 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3224 case TLS_MODEL_LOCAL_EXEC:
3225 temp1 = gen_reg_rtx (Pmode);
3226 temp2 = gen_reg_rtx (Pmode);
3229 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3230 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3234 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3235 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3237 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3245 gcc_unreachable (); /* for now ... */
3251 /* Legitimize PIC addresses. If the address is already position-independent,
3252 we return ORIG. Newly generated position-independent addresses go into a
3253 reg. This is REG if nonzero, otherwise we allocate register(s) as
3257 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3260 if (GET_CODE (orig) == SYMBOL_REF
3261 /* See the comment in sparc_expand_move. */
3262 || (TARGET_VXWORKS_RTP && GET_CODE (orig) == LABEL_REF))
3264 rtx pic_ref, address;
3269 gcc_assert (! reload_in_progress && ! reload_completed);
3270 reg = gen_reg_rtx (Pmode);
3275 /* If not during reload, allocate another temp reg here for loading
3276 in the address, so that these instructions can be optimized
3278 rtx temp_reg = ((reload_in_progress || reload_completed)
3279 ? reg : gen_reg_rtx (Pmode));
3281 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3282 won't get confused into thinking that these two instructions
3283 are loading in the true address of the symbol. If in the
3284 future a PIC rtx exists, that should be used instead. */
3287 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3288 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3292 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3293 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3300 pic_ref = gen_const_mem (Pmode,
3301 gen_rtx_PLUS (Pmode,
3302 pic_offset_table_rtx, address));
3303 current_function_uses_pic_offset_table = 1;
3304 insn = emit_move_insn (reg, pic_ref);
3305 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3307 set_unique_reg_note (insn, REG_EQUAL, orig);
3310 else if (GET_CODE (orig) == CONST)
3314 if (GET_CODE (XEXP (orig, 0)) == PLUS
3315 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3320 gcc_assert (! reload_in_progress && ! reload_completed);
3321 reg = gen_reg_rtx (Pmode);
3324 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3325 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3326 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3327 base == reg ? 0 : reg);
3329 if (GET_CODE (offset) == CONST_INT)
3331 if (SMALL_INT (offset))
3332 return plus_constant (base, INTVAL (offset));
3333 else if (! reload_in_progress && ! reload_completed)
3334 offset = force_reg (Pmode, offset);
3336 /* If we reach here, then something is seriously wrong. */
3339 return gen_rtx_PLUS (Pmode, base, offset);
3341 else if (GET_CODE (orig) == LABEL_REF)
3342 /* ??? Why do we do this? */
3343 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3344 the register is live instead, in case it is eliminated. */
3345 current_function_uses_pic_offset_table = 1;
3350 /* Try machine-dependent ways of modifying an illegitimate address X
3351 to be legitimate. If we find one, return the new, valid address.
3353 OLDX is the address as it was before break_out_memory_refs was called.
3354 In some cases it is useful to look at this to decide what needs to be done.
3356 MODE is the mode of the operand pointed to by X. */
3359 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
3363 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3364 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3365 force_operand (XEXP (x, 0), NULL_RTX));
3366 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3367 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3368 force_operand (XEXP (x, 1), NULL_RTX));
3369 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3370 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3372 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3373 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3374 force_operand (XEXP (x, 1), NULL_RTX));
3376 if (x != orig_x && legitimate_address_p (mode, x, FALSE))
3379 if (SPARC_SYMBOL_REF_TLS_P (x))
3380 x = legitimize_tls_address (x);
3382 x = legitimize_pic_address (x, mode, 0);
3383 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3384 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3385 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3386 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3387 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3388 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3389 else if (GET_CODE (x) == SYMBOL_REF
3390 || GET_CODE (x) == CONST
3391 || GET_CODE (x) == LABEL_REF)
3392 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3396 /* Emit the special PIC helper function. */
3399 emit_pic_helper (void)
3401 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3404 switch_to_section (text_section);
3406 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3408 ASM_OUTPUT_ALIGN (asm_out_file, align);
3409 ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3410 if (flag_delayed_branch)
3411 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3412 pic_name, pic_name);
3414 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3415 pic_name, pic_name);
3417 pic_helper_emitted_p = true;
3420 /* Emit code to load the PIC register. */
3423 load_pic_register (bool delay_pic_helper)
3425 int orig_flag_pic = flag_pic;
3427 if (TARGET_VXWORKS_RTP)
3429 emit_insn (gen_vxworks_load_got ());
3430 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3434 /* If we haven't initialized the special PIC symbols, do so now. */
3435 if (!pic_helper_symbol_name[0])
3437 ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3438 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3439 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3442 /* If we haven't emitted the special PIC helper function, do so now unless
3443 we are requested to delay it. */
3444 if (!delay_pic_helper && !pic_helper_emitted_p)