1 /* Output routines for GCC for Renesas / SuperH SH.
2 Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
4 Contributed by Steve Chamberlain (sac@cygnus.com).
5 Improved by Jim Wilson (wilson@cygnus.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
27 #include "insn-config.h"
35 #include "hard-reg-set.h"
37 #include "insn-attr.h"
41 #include "integrate.h"
45 #include "target-def.h"
47 #include "langhooks.h"
48 #include "basic-block.h"
50 #include "cfglayout.h"
52 #include "sched-int.h"
54 #include "tree-gimple.h"
56 #include "alloc-pool.h"
57 #include "tm-constrs.h"
60 int code_for_indirect_jump_scratch = CODE_FOR_indirect_jump_scratch;
62 #define MSW (TARGET_LITTLE_ENDIAN ? 1 : 0)
63 #define LSW (TARGET_LITTLE_ENDIAN ? 0 : 1)
65 /* These are some macros to abstract register modes. */
66 #define CONST_OK_FOR_ADD(size) \
67 (TARGET_SHMEDIA ? CONST_OK_FOR_I10 (size) : CONST_OK_FOR_I08 (size))
68 #define GEN_MOV (*(TARGET_SHMEDIA64 ? gen_movdi : gen_movsi))
69 #define GEN_ADD3 (*(TARGET_SHMEDIA64 ? gen_adddi3 : gen_addsi3))
70 #define GEN_SUB3 (*(TARGET_SHMEDIA64 ? gen_subdi3 : gen_subsi3))
72 /* Used to simplify the logic below. Find the attributes wherever
74 #define SH_ATTRIBUTES(decl) \
75 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
76 : DECL_ATTRIBUTES (decl) \
77 ? (DECL_ATTRIBUTES (decl)) \
78 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
80 /* Set to 1 by expand_prologue() when the function is an interrupt handler. */
81 int current_function_interrupt;
83 tree sh_deferred_function_attributes;
84 tree *sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
86 /* Global variables for machine-dependent things. */
88 /* Which cpu are we scheduling for. */
89 enum processor_type sh_cpu;
91 /* Definitions used in ready queue reordering for first scheduling pass. */
93 /* Reg weights arrays for modes SFmode and SImode, indexed by insn LUID. */
94 static short *regmode_weight[2];
96 /* Total SFmode and SImode weights of scheduled insns. */
97 static int curr_regmode_pressure[2];
99 /* Number of r0 life regions. */
100 static int r0_life_regions;
102 /* If true, skip cycles for Q -> R movement. */
103 static int skip_cycles = 0;
105 /* Cached value of can_issue_more. This is cached in sh_variable_issue hook
106 and returned from sh_reorder2. */
107 static short cached_can_issue_more;
109 /* Saved operands from the last compare to use when we generate an scc
115 /* Provides the class number of the smallest class containing
118 enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
120 R0_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
121 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
122 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
123 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
124 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
125 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
126 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
127 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
128 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
129 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
130 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
131 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
132 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
133 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
134 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
135 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
136 FP0_REGS,FP_REGS, FP_REGS, FP_REGS,
137 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
138 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
139 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
140 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
141 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
142 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
143 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
144 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
145 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
146 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
147 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
148 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
149 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
150 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
151 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
152 TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
153 TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
154 DF_REGS, DF_REGS, DF_REGS, DF_REGS,
155 DF_REGS, DF_REGS, DF_REGS, DF_REGS,
156 NO_REGS, GENERAL_REGS, PR_REGS, T_REGS,
157 MAC_REGS, MAC_REGS, FPUL_REGS, FPSCR_REGS,
158 GENERAL_REGS, GENERAL_REGS,
161 char sh_register_names[FIRST_PSEUDO_REGISTER] \
162 [MAX_REGISTER_NAME_LENGTH + 1] = SH_REGISTER_NAMES_INITIALIZER;
164 char sh_additional_register_names[ADDREGNAMES_SIZE] \
165 [MAX_ADDITIONAL_REGISTER_NAME_LENGTH + 1]
166 = SH_ADDITIONAL_REGISTER_NAMES_INITIALIZER;
168 int assembler_dialect;
170 static bool shmedia_space_reserved_for_target_registers;
172 static bool sh_handle_option (size_t, const char *, int);
173 static void split_branches (rtx);
174 static int branch_dest (rtx);
175 static void force_into (rtx, rtx);
176 static void print_slot (rtx);
177 static rtx add_constant (rtx, enum machine_mode, rtx);
178 static void dump_table (rtx, rtx);
179 static int hi_const (rtx);
180 static int broken_move (rtx);
181 static int mova_p (rtx);
182 static rtx find_barrier (int, rtx, rtx);
183 static int noncall_uses_reg (rtx, rtx, rtx *);
184 static rtx gen_block_redirect (rtx, int, int);
185 static void sh_reorg (void);
186 static void output_stack_adjust (int, rtx, int, HARD_REG_SET *);
187 static rtx frame_insn (rtx);
188 static rtx push (int);
189 static void pop (int);
190 static void push_regs (HARD_REG_SET *, int);
191 static int calc_live_regs (HARD_REG_SET *);
192 static HOST_WIDE_INT rounded_frame_size (int);
193 static rtx mark_constant_pool_use (rtx);
194 const struct attribute_spec sh_attribute_table[];
195 static tree sh_handle_interrupt_handler_attribute (tree *, tree, tree, int, bool *);
196 static tree sh_handle_resbank_handler_attribute (tree *, tree,
198 static tree sh2a_handle_function_vector_handler_attribute (tree *, tree,
200 static tree sh_handle_sp_switch_attribute (tree *, tree, tree, int, bool *);
201 static tree sh_handle_trap_exit_attribute (tree *, tree, tree, int, bool *);
202 static tree sh_handle_renesas_attribute (tree *, tree, tree, int, bool *);
203 static void sh_output_function_epilogue (FILE *, HOST_WIDE_INT);
204 static void sh_insert_attributes (tree, tree *);
205 static const char *sh_check_pch_target_flags (int);
206 static int sh_adjust_cost (rtx, rtx, rtx, int);
207 static int sh_issue_rate (void);
208 static int sh_dfa_new_cycle (FILE *, int, rtx, int, int, int *sort_p);
209 static short find_set_regmode_weight (rtx, enum machine_mode);
210 static short find_insn_regmode_weight (rtx, enum machine_mode);
211 static void find_regmode_weight (basic_block, enum machine_mode);
212 static int find_r0_life_regions (basic_block);
213 static void sh_md_init_global (FILE *, int, int);
214 static void sh_md_finish_global (FILE *, int);
215 static int rank_for_reorder (const void *, const void *);
216 static void swap_reorder (rtx *, int);
217 static void ready_reorder (rtx *, int);
218 static short high_pressure (enum machine_mode);
219 static int sh_reorder (FILE *, int, rtx *, int *, int);
220 static int sh_reorder2 (FILE *, int, rtx *, int *, int);
221 static void sh_md_init (FILE *, int, int);
222 static int sh_variable_issue (FILE *, int, rtx, int);
224 static bool sh_function_ok_for_sibcall (tree, tree);
226 static bool sh_cannot_modify_jumps_p (void);
227 static int sh_target_reg_class (void);
228 static bool sh_optimize_target_register_callee_saved (bool);
229 static bool sh_ms_bitfield_layout_p (const_tree);
231 static void sh_init_builtins (void);
232 static void sh_media_init_builtins (void);
233 static rtx sh_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
234 static void sh_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
235 static void sh_file_start (void);
236 static int flow_dependent_p (rtx, rtx);
237 static void flow_dependent_p_1 (rtx, const_rtx, void *);
238 static int shiftcosts (rtx);
239 static int andcosts (rtx);
240 static int addsubcosts (rtx);
241 static int multcosts (rtx);
242 static bool unspec_caller_rtx_p (rtx);
243 static bool sh_cannot_copy_insn_p (rtx);
244 static bool sh_rtx_costs (rtx, int, int, int *);
245 static int sh_address_cost (rtx);
246 static int sh_pr_n_sets (void);
247 static rtx sh_allocate_initial_value (rtx);
248 static int shmedia_target_regs_stack_space (HARD_REG_SET *);
249 static int shmedia_reserve_space_for_target_registers_p (int, HARD_REG_SET *);
250 static int shmedia_target_regs_stack_adjust (HARD_REG_SET *);
251 static int scavenge_reg (HARD_REG_SET *s);
252 struct save_schedule_s;
253 static struct save_entry_s *sh5_schedule_saves (HARD_REG_SET *,
254 struct save_schedule_s *, int);
256 static rtx sh_struct_value_rtx (tree, int);
257 static bool sh_return_in_memory (const_tree, const_tree);
258 static rtx sh_builtin_saveregs (void);
259 static void sh_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
260 static bool sh_strict_argument_naming (CUMULATIVE_ARGS *);
261 static bool sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *);
262 static tree sh_build_builtin_va_list (void);
263 static void sh_va_start (tree, rtx);
264 static tree sh_gimplify_va_arg_expr (tree, tree, tree *, tree *);
265 static bool sh_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
267 static bool sh_callee_copies (CUMULATIVE_ARGS *, enum machine_mode,
269 static int sh_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
271 static bool sh_scalar_mode_supported_p (enum machine_mode);
272 static int sh_dwarf_calling_convention (const_tree);
273 static void sh_encode_section_info (tree, rtx, int);
274 static int sh2a_function_vector_p (tree);
277 /* Initialize the GCC target structure. */
278 #undef TARGET_ATTRIBUTE_TABLE
279 #define TARGET_ATTRIBUTE_TABLE sh_attribute_table
281 /* The next two are used for debug info when compiling with -gdwarf. */
282 #undef TARGET_ASM_UNALIGNED_HI_OP
283 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uaword\t"
284 #undef TARGET_ASM_UNALIGNED_SI_OP
285 #define TARGET_ASM_UNALIGNED_SI_OP "\t.ualong\t"
287 /* These are NULLed out on non-SH5 in OVERRIDE_OPTIONS. */
288 #undef TARGET_ASM_UNALIGNED_DI_OP
289 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaquad\t"
290 #undef TARGET_ASM_ALIGNED_DI_OP
291 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
293 #undef TARGET_ASM_FUNCTION_EPILOGUE
294 #define TARGET_ASM_FUNCTION_EPILOGUE sh_output_function_epilogue
296 #undef TARGET_ASM_OUTPUT_MI_THUNK
297 #define TARGET_ASM_OUTPUT_MI_THUNK sh_output_mi_thunk
299 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
300 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
302 #undef TARGET_ASM_FILE_START
303 #define TARGET_ASM_FILE_START sh_file_start
304 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
305 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
307 #undef TARGET_DEFAULT_TARGET_FLAGS
308 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
309 #undef TARGET_HANDLE_OPTION
310 #define TARGET_HANDLE_OPTION sh_handle_option
312 #undef TARGET_INSERT_ATTRIBUTES
313 #define TARGET_INSERT_ATTRIBUTES sh_insert_attributes
315 #undef TARGET_SCHED_ADJUST_COST
316 #define TARGET_SCHED_ADJUST_COST sh_adjust_cost
318 #undef TARGET_SCHED_ISSUE_RATE
319 #define TARGET_SCHED_ISSUE_RATE sh_issue_rate
321 /* The next 5 hooks have been implemented for reenabling sched1. With the
322 help of these macros we are limiting the movement of insns in sched1 to
323 reduce the register pressure. The overall idea is to keep count of SImode
324 and SFmode regs required by already scheduled insns. When these counts
325 cross some threshold values; give priority to insns that free registers.
326 The insn that frees registers is most likely to be the insn with lowest
327 LUID (original insn order); but such an insn might be there in the stalled
328 queue (Q) instead of the ready queue (R). To solve this, we skip cycles
329 upto a max of 8 cycles so that such insns may move from Q -> R.
331 The description of the hooks are as below:
333 TARGET_SCHED_INIT_GLOBAL: Added a new target hook in the generic
334 scheduler; it is called inside the sched_init function just after
335 find_insn_reg_weights function call. It is used to calculate the SImode
336 and SFmode weights of insns of basic blocks; much similar to what
337 find_insn_reg_weights does.
338 TARGET_SCHED_FINISH_GLOBAL: Corresponding cleanup hook.
340 TARGET_SCHED_DFA_NEW_CYCLE: Skip cycles if high register pressure is
341 indicated by TARGET_SCHED_REORDER2; doing this may move insns from
344 TARGET_SCHED_REORDER: If the register pressure for SImode or SFmode is
345 high; reorder the ready queue so that the insn with lowest LUID will be
348 TARGET_SCHED_REORDER2: If the register pressure is high, indicate to
349 TARGET_SCHED_DFA_NEW_CYCLE to skip cycles.
351 TARGET_SCHED_VARIABLE_ISSUE: Cache the value of can_issue_more so that it
352 can be returned from TARGET_SCHED_REORDER2.
354 TARGET_SCHED_INIT: Reset the register pressure counting variables. */
356 #undef TARGET_SCHED_DFA_NEW_CYCLE
357 #define TARGET_SCHED_DFA_NEW_CYCLE sh_dfa_new_cycle
359 #undef TARGET_SCHED_INIT_GLOBAL
360 #define TARGET_SCHED_INIT_GLOBAL sh_md_init_global
362 #undef TARGET_SCHED_FINISH_GLOBAL
363 #define TARGET_SCHED_FINISH_GLOBAL sh_md_finish_global
365 #undef TARGET_SCHED_VARIABLE_ISSUE
366 #define TARGET_SCHED_VARIABLE_ISSUE sh_variable_issue
368 #undef TARGET_SCHED_REORDER
369 #define TARGET_SCHED_REORDER sh_reorder
371 #undef TARGET_SCHED_REORDER2
372 #define TARGET_SCHED_REORDER2 sh_reorder2
374 #undef TARGET_SCHED_INIT
375 #define TARGET_SCHED_INIT sh_md_init
377 #undef TARGET_CANNOT_MODIFY_JUMPS_P
378 #define TARGET_CANNOT_MODIFY_JUMPS_P sh_cannot_modify_jumps_p
379 #undef TARGET_BRANCH_TARGET_REGISTER_CLASS
380 #define TARGET_BRANCH_TARGET_REGISTER_CLASS sh_target_reg_class
381 #undef TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED
382 #define TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED \
383 sh_optimize_target_register_callee_saved
385 #undef TARGET_MS_BITFIELD_LAYOUT_P
386 #define TARGET_MS_BITFIELD_LAYOUT_P sh_ms_bitfield_layout_p
388 #undef TARGET_INIT_BUILTINS
389 #define TARGET_INIT_BUILTINS sh_init_builtins
390 #undef TARGET_EXPAND_BUILTIN
391 #define TARGET_EXPAND_BUILTIN sh_expand_builtin
393 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
394 #define TARGET_FUNCTION_OK_FOR_SIBCALL sh_function_ok_for_sibcall
396 #undef TARGET_CANNOT_COPY_INSN_P
397 #define TARGET_CANNOT_COPY_INSN_P sh_cannot_copy_insn_p
398 #undef TARGET_RTX_COSTS
399 #define TARGET_RTX_COSTS sh_rtx_costs
400 #undef TARGET_ADDRESS_COST
401 #define TARGET_ADDRESS_COST sh_address_cost
402 #undef TARGET_ALLOCATE_INITIAL_VALUE
403 #define TARGET_ALLOCATE_INITIAL_VALUE sh_allocate_initial_value
405 #undef TARGET_MACHINE_DEPENDENT_REORG
406 #define TARGET_MACHINE_DEPENDENT_REORG sh_reorg
409 #undef TARGET_HAVE_TLS
410 #define TARGET_HAVE_TLS true
413 #undef TARGET_PROMOTE_PROTOTYPES
414 #define TARGET_PROMOTE_PROTOTYPES sh_promote_prototypes
415 #undef TARGET_PROMOTE_FUNCTION_ARGS
416 #define TARGET_PROMOTE_FUNCTION_ARGS sh_promote_prototypes
417 #undef TARGET_PROMOTE_FUNCTION_RETURN
418 #define TARGET_PROMOTE_FUNCTION_RETURN sh_promote_prototypes
420 #undef TARGET_STRUCT_VALUE_RTX
421 #define TARGET_STRUCT_VALUE_RTX sh_struct_value_rtx
422 #undef TARGET_RETURN_IN_MEMORY
423 #define TARGET_RETURN_IN_MEMORY sh_return_in_memory
425 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
426 #define TARGET_EXPAND_BUILTIN_SAVEREGS sh_builtin_saveregs
427 #undef TARGET_SETUP_INCOMING_VARARGS
428 #define TARGET_SETUP_INCOMING_VARARGS sh_setup_incoming_varargs
429 #undef TARGET_STRICT_ARGUMENT_NAMING
430 #define TARGET_STRICT_ARGUMENT_NAMING sh_strict_argument_naming
431 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
432 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED sh_pretend_outgoing_varargs_named
433 #undef TARGET_MUST_PASS_IN_STACK
434 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
435 #undef TARGET_PASS_BY_REFERENCE
436 #define TARGET_PASS_BY_REFERENCE sh_pass_by_reference
437 #undef TARGET_CALLEE_COPIES
438 #define TARGET_CALLEE_COPIES sh_callee_copies
439 #undef TARGET_ARG_PARTIAL_BYTES
440 #define TARGET_ARG_PARTIAL_BYTES sh_arg_partial_bytes
442 #undef TARGET_BUILD_BUILTIN_VA_LIST
443 #define TARGET_BUILD_BUILTIN_VA_LIST sh_build_builtin_va_list
444 #undef TARGET_EXPAND_BUILTIN_VA_START
445 #define TARGET_EXPAND_BUILTIN_VA_START sh_va_start
446 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
447 #define TARGET_GIMPLIFY_VA_ARG_EXPR sh_gimplify_va_arg_expr
449 #undef TARGET_SCALAR_MODE_SUPPORTED_P
450 #define TARGET_SCALAR_MODE_SUPPORTED_P sh_scalar_mode_supported_p
451 #undef TARGET_VECTOR_MODE_SUPPORTED_P
452 #define TARGET_VECTOR_MODE_SUPPORTED_P sh_vector_mode_supported_p
454 #undef TARGET_CHECK_PCH_TARGET_FLAGS
455 #define TARGET_CHECK_PCH_TARGET_FLAGS sh_check_pch_target_flags
457 #undef TARGET_DWARF_CALLING_CONVENTION
458 #define TARGET_DWARF_CALLING_CONVENTION sh_dwarf_calling_convention
460 /* Return regmode weight for insn. */
461 #define INSN_REGMODE_WEIGHT(INSN, MODE) regmode_weight[((MODE) == SImode) ? 0 : 1][INSN_UID (INSN)]
463 /* Return current register pressure for regmode. */
464 #define CURR_REGMODE_PRESSURE(MODE) curr_regmode_pressure[((MODE) == SImode) ? 0 : 1]
466 #undef TARGET_ENCODE_SECTION_INFO
467 #define TARGET_ENCODE_SECTION_INFO sh_encode_section_info
471 #undef TARGET_ENCODE_SECTION_INFO
472 #define TARGET_ENCODE_SECTION_INFO sh_symbian_encode_section_info
473 #undef TARGET_STRIP_NAME_ENCODING
474 #define TARGET_STRIP_NAME_ENCODING sh_symbian_strip_name_encoding
475 #undef TARGET_CXX_IMPORT_EXPORT_CLASS
476 #define TARGET_CXX_IMPORT_EXPORT_CLASS symbian_import_export_class
480 #undef TARGET_SECONDARY_RELOAD
481 #define TARGET_SECONDARY_RELOAD sh_secondary_reload
483 /* Machine-specific symbol_ref flags. */
484 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
486 struct gcc_target targetm = TARGET_INITIALIZER;
488 /* Implement TARGET_HANDLE_OPTION. */
491 sh_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED,
492 int value ATTRIBUTE_UNUSED)
497 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH1;
501 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2;
505 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A;
509 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_NOFPU;
513 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_SINGLE;
516 case OPT_m2a_single_only:
517 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_SINGLE_ONLY;
521 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2E;
525 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH3;
529 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH3E;
536 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4;
540 case OPT_m4_100_nofpu:
541 case OPT_m4_200_nofpu:
542 case OPT_m4_300_nofpu:
546 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_NOFPU;
550 case OPT_m4_100_single:
551 case OPT_m4_200_single:
552 case OPT_m4_300_single:
553 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_SINGLE;
556 case OPT_m4_single_only:
557 case OPT_m4_100_single_only:
558 case OPT_m4_200_single_only:
559 case OPT_m4_300_single_only:
560 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_SINGLE_ONLY;
564 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A;
569 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_NOFPU;
573 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_SINGLE;
576 case OPT_m4a_single_only:
577 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_SINGLE_ONLY;
581 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_32MEDIA;
584 case OPT_m5_32media_nofpu:
585 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_32MEDIA_NOFPU;
589 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_64MEDIA;
592 case OPT_m5_64media_nofpu:
593 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_64MEDIA_NOFPU;
597 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_COMPACT;
600 case OPT_m5_compact_nofpu:
601 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_COMPACT_NOFPU;
609 /* Print the operand address in x to the stream. */
612 print_operand_address (FILE *stream, rtx x)
614 switch (GET_CODE (x))
618 fprintf (stream, "@%s", reg_names[true_regnum (x)]);
623 rtx base = XEXP (x, 0);
624 rtx index = XEXP (x, 1);
626 switch (GET_CODE (index))
629 fprintf (stream, "@(%d,%s)", (int) INTVAL (index),
630 reg_names[true_regnum (base)]);
636 int base_num = true_regnum (base);
637 int index_num = true_regnum (index);
639 fprintf (stream, "@(r0,%s)",
640 reg_names[MAX (base_num, index_num)]);
651 fprintf (stream, "@-%s", reg_names[true_regnum (XEXP (x, 0))]);
655 fprintf (stream, "@%s+", reg_names[true_regnum (XEXP (x, 0))]);
659 x = mark_constant_pool_use (x);
660 output_addr_const (stream, x);
665 /* Print operand x (an rtx) in assembler syntax to file stream
666 according to modifier code.
668 '.' print a .s if insn needs delay slot
669 ',' print LOCAL_LABEL_PREFIX
670 '@' print trap, rte or rts depending upon pragma interruptness
671 '#' output a nop if there is nothing to put in the delay slot
672 ''' print likelihood suffix (/u for unlikely).
673 '>' print branch target if -fverbose-asm
674 'O' print a constant without the #
675 'R' print the LSW of a dp value - changes if in little endian
676 'S' print the MSW of a dp value - changes if in little endian
677 'T' print the next word of a dp value - same as 'R' in big endian mode.
678 'M' SHMEDIA: print an `x' if `m' will print `base,index'.
679 otherwise: print .b / .w / .l / .s / .d suffix if operand is a MEM.
680 'N' print 'r63' if the operand is (const_int 0).
681 'd' print a V2SF reg as dN instead of fpN.
682 'm' print a pair `base,offset' or `base,index', for LD and ST.
683 'U' Likewise for {LD,ST}{HI,LO}.
684 'V' print the position of a single bit set.
685 'W' print the position of a single bit cleared.
686 't' print a memory address which is a register.
687 'u' prints the lowest 16 bits of CONST_INT, as an unsigned value.
688 'o' output an operator. */
691 print_operand (FILE *stream, rtx x, int code)
694 enum machine_mode mode;
702 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
703 && get_attr_length (XVECEXP (final_sequence, 0, 1)))
704 fprintf (stream, ASSEMBLER_DIALECT ? "/s" : ".s");
707 fprintf (stream, "%s", LOCAL_LABEL_PREFIX);
710 trapa_attr = lookup_attribute ("trap_exit",
711 DECL_ATTRIBUTES (current_function_decl));
713 fprintf (stream, "trapa #%ld",
714 (long) TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (trapa_attr))));
715 else if (sh_cfun_interrupt_handler_p ())
717 if (sh_cfun_resbank_handler_p ())
718 fprintf (stream, "resbank\n");
719 fprintf (stream, "rte");
722 fprintf (stream, "rts");
725 /* Output a nop if there's nothing in the delay slot. */
726 if (dbr_sequence_length () == 0)
727 fprintf (stream, "\n\tnop");
731 rtx note = find_reg_note (current_output_insn, REG_BR_PROB, 0);
733 if (note && INTVAL (XEXP (note, 0)) * 2 < REG_BR_PROB_BASE)
734 fputs ("/u", stream);
738 if (flag_verbose_asm && JUMP_LABEL (current_output_insn))
740 fputs ("\t! target: ", stream);
741 output_addr_const (stream, JUMP_LABEL (current_output_insn));
745 x = mark_constant_pool_use (x);
746 output_addr_const (stream, x);
748 /* N.B.: %R / %S / %T adjust memory addresses by four.
749 For SHMEDIA, that means they can be used to access the first and
750 second 32 bit part of a 64 bit (or larger) value that
751 might be held in floating point registers or memory.
752 While they can be used to access 64 bit parts of a larger value
753 held in general purpose registers, that won't work with memory -
754 neither for fp registers, since the frxx names are used. */
756 if (REG_P (x) || GET_CODE (x) == SUBREG)
758 regno = true_regnum (x);
759 regno += FP_REGISTER_P (regno) ? 1 : LSW;
760 fputs (reg_names[regno], (stream));
764 x = adjust_address (x, SImode, 4 * LSW);
765 print_operand_address (stream, XEXP (x, 0));
772 if (mode == VOIDmode)
774 if (GET_MODE_SIZE (mode) >= 8)
775 sub = simplify_subreg (SImode, x, mode, 4 * LSW);
777 print_operand (stream, sub, 0);
779 output_operand_lossage ("invalid operand to %%R");
783 if (REG_P (x) || GET_CODE (x) == SUBREG)
785 regno = true_regnum (x);
786 regno += FP_REGISTER_P (regno) ? 0 : MSW;
787 fputs (reg_names[regno], (stream));
791 x = adjust_address (x, SImode, 4 * MSW);
792 print_operand_address (stream, XEXP (x, 0));
799 if (mode == VOIDmode)
801 if (GET_MODE_SIZE (mode) >= 8)
802 sub = simplify_subreg (SImode, x, mode, 4 * MSW);
804 print_operand (stream, sub, 0);
806 output_operand_lossage ("invalid operand to %%S");
810 /* Next word of a double. */
811 switch (GET_CODE (x))
814 fputs (reg_names[REGNO (x) + 1], (stream));
817 if (GET_CODE (XEXP (x, 0)) != PRE_DEC
818 && GET_CODE (XEXP (x, 0)) != POST_INC)
819 x = adjust_address (x, SImode, 4);
820 print_operand_address (stream, XEXP (x, 0));
828 gcc_assert (GET_CODE (x) == MEM);
830 switch (GET_CODE (x))
834 print_operand (stream, x, 0);
842 switch (GET_CODE (x))
844 case PLUS: fputs ("add", stream); break;
845 case MINUS: fputs ("sub", stream); break;
846 case MULT: fputs ("mul", stream); break;
847 case DIV: fputs ("div", stream); break;
848 case EQ: fputs ("eq", stream); break;
849 case NE: fputs ("ne", stream); break;
850 case GT: case LT: fputs ("gt", stream); break;
851 case GE: case LE: fputs ("ge", stream); break;
852 case GTU: case LTU: fputs ("gtu", stream); break;
853 case GEU: case LEU: fputs ("geu", stream); break;
861 if (GET_CODE (x) == MEM
862 && GET_CODE (XEXP (x, 0)) == PLUS
863 && (GET_CODE (XEXP (XEXP (x, 0), 1)) == REG
864 || GET_CODE (XEXP (XEXP (x, 0), 1)) == SUBREG))
869 if (GET_CODE (x) == MEM)
871 switch (GET_MODE (x))
873 case QImode: fputs (".b", stream); break;
874 case HImode: fputs (".w", stream); break;
875 case SImode: fputs (".l", stream); break;
876 case SFmode: fputs (".s", stream); break;
877 case DFmode: fputs (".d", stream); break;
878 default: gcc_unreachable ();
885 gcc_assert (GET_CODE (x) == MEM);
889 switch (GET_CODE (x))
893 print_operand (stream, x, 0);
894 fputs (", 0", stream);
898 print_operand (stream, XEXP (x, 0), 0);
899 fputs (", ", stream);
900 print_operand (stream, XEXP (x, 1), 0);
910 int num = exact_log2 (INTVAL (x));
911 gcc_assert (num >= 0);
912 fprintf (stream, "#%d", num);
918 int num = exact_log2 (~INTVAL (x));
919 gcc_assert (num >= 0);
920 fprintf (stream, "#%d", num);
925 gcc_assert (GET_CODE (x) == REG && GET_MODE (x) == V2SFmode);
927 fprintf ((stream), "d%s", reg_names[REGNO (x)] + 1);
931 if (x == CONST0_RTX (GET_MODE (x)))
933 fprintf ((stream), "r63");
938 if (GET_CODE (x) == CONST_INT)
940 fprintf ((stream), "%u", (unsigned) INTVAL (x) & (0x10000 - 1));
950 switch (GET_CODE (x))
954 rtx inner = XEXP (x, 0);
956 enum machine_mode inner_mode;
958 /* We might see SUBREGs with vector mode registers inside. */
959 if (GET_CODE (inner) == SUBREG
960 && (GET_MODE_SIZE (GET_MODE (inner))
961 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
962 && subreg_lowpart_p (inner))
963 inner = SUBREG_REG (inner);
964 if (GET_CODE (inner) == CONST_INT)
966 x = GEN_INT (trunc_int_for_mode (INTVAL (inner), GET_MODE (x)));
969 inner_mode = GET_MODE (inner);
970 if (GET_CODE (inner) == SUBREG
971 && (GET_MODE_SIZE (GET_MODE (inner))
972 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
973 && GET_CODE (SUBREG_REG (inner)) == REG)
975 offset = subreg_regno_offset (REGNO (SUBREG_REG (inner)),
976 GET_MODE (SUBREG_REG (inner)),
979 inner = SUBREG_REG (inner);
981 if (GET_CODE (inner) != REG || GET_MODE_SIZE (inner_mode) > 8)
983 /* Floating point register pairs are always big endian;
984 general purpose registers are 64 bit wide. */
985 regno = REGNO (inner);
986 regno = (HARD_REGNO_NREGS (regno, inner_mode)
987 - HARD_REGNO_NREGS (regno, mode))
995 /* FIXME: We need this on SHmedia32 because reload generates
996 some sign-extended HI or QI loads into DImode registers
997 but, because Pmode is SImode, the address ends up with a
998 subreg:SI of the DImode register. Maybe reload should be
999 fixed so as to apply alter_subreg to such loads? */
1001 gcc_assert (trapping_target_operand (x, VOIDmode));
1002 x = XEXP (XEXP (x, 2), 0);
1003 goto default_output;
1005 gcc_assert (SUBREG_BYTE (x) == 0
1006 && GET_CODE (SUBREG_REG (x)) == REG);
1014 if (FP_REGISTER_P (regno)
1015 && mode == V16SFmode)
1016 fprintf ((stream), "mtrx%s", reg_names[regno] + 2);
1017 else if (FP_REGISTER_P (REGNO (x))
1018 && mode == V4SFmode)
1019 fprintf ((stream), "fv%s", reg_names[regno] + 2);
1020 else if (GET_CODE (x) == REG
1021 && mode == V2SFmode)
1022 fprintf ((stream), "fp%s", reg_names[regno] + 2);
1023 else if (FP_REGISTER_P (REGNO (x))
1024 && GET_MODE_SIZE (mode) > 4)
1025 fprintf ((stream), "d%s", reg_names[regno] + 1);
1027 fputs (reg_names[regno], (stream));
1031 output_address (XEXP (x, 0));
1036 && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1037 || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
1038 && (GET_MODE (XEXP (x, 0)) == DImode
1039 || GET_MODE (XEXP (x, 0)) == SImode)
1040 && GET_CODE (XEXP (XEXP (x, 0), 0)) == TRUNCATE
1041 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode)
1043 rtx val = XEXP (XEXP (XEXP (x, 0), 0), 0);
1045 bool nested_expr = false;
1047 fputc ('(', stream);
1048 if (GET_CODE (val) == ASHIFTRT)
1050 fputc ('(', stream);
1051 val2 = XEXP (val, 0);
1053 if (GET_CODE (val2) == CONST
1054 || GET_RTX_CLASS (GET_CODE (val2)) != RTX_OBJ)
1056 fputc ('(', stream);
1059 output_addr_const (stream, val2);
1061 fputc (')', stream);
1062 if (GET_CODE (val) == ASHIFTRT)
1064 fputs (" >> ", stream);
1065 output_addr_const (stream, XEXP (val, 1));
1066 fputc (')', stream);
1068 fputs (" & 65535)", stream);
1075 fputc ('#', stream);
1076 output_addr_const (stream, x);
1084 /* Encode symbol attributes of a SYMBOL_REF into its
1085 SYMBOL_REF_FLAGS. */
1087 sh_encode_section_info (tree decl, rtx rtl, int first)
1089 default_encode_section_info (decl, rtl, first);
1091 if (TREE_CODE (decl) == FUNCTION_DECL
1092 && sh2a_function_vector_p (decl) && TARGET_SH2A)
1093 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FUNCVEC_FUNCTION;
1096 /* Like force_operand, but guarantees that VALUE ends up in TARGET. */
1098 force_into (rtx value, rtx target)
1100 value = force_operand (value, target);
1101 if (! rtx_equal_p (value, target))
1102 emit_insn (gen_move_insn (target, value));
1105 /* Emit code to perform a block move. Choose the best method.
1107 OPERANDS[0] is the destination.
1108 OPERANDS[1] is the source.
1109 OPERANDS[2] is the size.
1110 OPERANDS[3] is the alignment safe to use. */
1113 expand_block_move (rtx *operands)
1115 int align = INTVAL (operands[3]);
1116 int constp = (GET_CODE (operands[2]) == CONST_INT);
1117 int bytes = (constp ? INTVAL (operands[2]) : 0);
1122 /* If we could use mov.l to move words and dest is word-aligned, we
1123 can use movua.l for loads and still generate a relatively short
1124 and efficient sequence. */
1125 if (TARGET_SH4A_ARCH && align < 4
1126 && MEM_ALIGN (operands[0]) >= 32
1127 && can_move_by_pieces (bytes, 32))
1129 rtx dest = copy_rtx (operands[0]);
1130 rtx src = copy_rtx (operands[1]);
1131 /* We could use different pseudos for each copied word, but
1132 since movua can only load into r0, it's kind of
1134 rtx temp = gen_reg_rtx (SImode);
1135 rtx src_addr = copy_addr_to_reg (XEXP (src, 0));
1138 while (copied + 4 <= bytes)
1140 rtx to = adjust_address (dest, SImode, copied);
1141 rtx from = adjust_automodify_address (src, BLKmode,
1144 set_mem_size (from, GEN_INT (4));
1145 emit_insn (gen_movua (temp, from));
1146 emit_move_insn (src_addr, plus_constant (src_addr, 4));
1147 emit_move_insn (to, temp);
1152 move_by_pieces (adjust_address (dest, BLKmode, copied),
1153 adjust_automodify_address (src, BLKmode,
1155 bytes - copied, align, 0);
1160 /* If it isn't a constant number of bytes, or if it doesn't have 4 byte
1161 alignment, or if it isn't a multiple of 4 bytes, then fail. */
1162 if (align < 4 || (bytes % 4 != 0))
1165 if (TARGET_HARD_SH4)
1169 else if (bytes == 12)
1171 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1172 rtx r4 = gen_rtx_REG (SImode, 4);
1173 rtx r5 = gen_rtx_REG (SImode, 5);
1175 function_symbol (func_addr_rtx, "__movmemSI12_i4", SFUNC_STATIC);
1176 force_into (XEXP (operands[0], 0), r4);
1177 force_into (XEXP (operands[1], 0), r5);
1178 emit_insn (gen_block_move_real_i4 (func_addr_rtx));
1181 else if (! TARGET_SMALLCODE)
1183 const char *entry_name;
1184 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1186 rtx r4 = gen_rtx_REG (SImode, 4);
1187 rtx r5 = gen_rtx_REG (SImode, 5);
1188 rtx r6 = gen_rtx_REG (SImode, 6);
1190 entry_name = (bytes & 4 ? "__movmem_i4_odd" : "__movmem_i4_even");
1191 function_symbol (func_addr_rtx, entry_name, SFUNC_STATIC);
1192 force_into (XEXP (operands[0], 0), r4);
1193 force_into (XEXP (operands[1], 0), r5);
1195 dwords = bytes >> 3;
1196 emit_insn (gen_move_insn (r6, GEN_INT (dwords - 1)));
1197 emit_insn (gen_block_lump_real_i4 (func_addr_rtx));
1206 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1207 rtx r4 = gen_rtx_REG (SImode, 4);
1208 rtx r5 = gen_rtx_REG (SImode, 5);
1210 sprintf (entry, "__movmemSI%d", bytes);
1211 function_symbol (func_addr_rtx, entry, SFUNC_STATIC);
1212 force_into (XEXP (operands[0], 0), r4);
1213 force_into (XEXP (operands[1], 0), r5);
1214 emit_insn (gen_block_move_real (func_addr_rtx));
1218 /* This is the same number of bytes as a memcpy call, but to a different
1219 less common function name, so this will occasionally use more space. */
1220 if (! TARGET_SMALLCODE)
1222 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1223 int final_switch, while_loop;
1224 rtx r4 = gen_rtx_REG (SImode, 4);
1225 rtx r5 = gen_rtx_REG (SImode, 5);
1226 rtx r6 = gen_rtx_REG (SImode, 6);
1228 function_symbol (func_addr_rtx, "__movmem", SFUNC_STATIC);
1229 force_into (XEXP (operands[0], 0), r4);
1230 force_into (XEXP (operands[1], 0), r5);
1232 /* r6 controls the size of the move. 16 is decremented from it
1233 for each 64 bytes moved. Then the negative bit left over is used
1234 as an index into a list of move instructions. e.g., a 72 byte move
1235 would be set up with size(r6) = 14, for one iteration through the
1236 big while loop, and a switch of -2 for the last part. */
1238 final_switch = 16 - ((bytes / 4) % 16);
1239 while_loop = ((bytes / 4) / 16 - 1) * 16;
1240 emit_insn (gen_move_insn (r6, GEN_INT (while_loop + final_switch)));
1241 emit_insn (gen_block_lump_real (func_addr_rtx));
1248 /* Prepare operands for a move define_expand; specifically, one of the
1249 operands must be in a register. */
1252 prepare_move_operands (rtx operands[], enum machine_mode mode)
1254 if ((mode == SImode || mode == DImode)
1256 && ! ((mode == Pmode || mode == ptr_mode)
1257 && tls_symbolic_operand (operands[1], Pmode) != 0))
1260 if (SYMBOLIC_CONST_P (operands[1]))
1262 if (GET_CODE (operands[0]) == MEM)
1263 operands[1] = force_reg (Pmode, operands[1]);
1264 else if (TARGET_SHMEDIA
1265 && GET_CODE (operands[1]) == LABEL_REF
1266 && target_reg_operand (operands[0], mode))
1270 temp = (!can_create_pseudo_p ()
1272 : gen_reg_rtx (Pmode));
1273 operands[1] = legitimize_pic_address (operands[1], mode, temp);
1276 else if (GET_CODE (operands[1]) == CONST
1277 && GET_CODE (XEXP (operands[1], 0)) == PLUS
1278 && SYMBOLIC_CONST_P (XEXP (XEXP (operands[1], 0), 0)))
1280 temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
1281 temp = legitimize_pic_address (XEXP (XEXP (operands[1], 0), 0),
1283 operands[1] = expand_binop (mode, add_optab, temp,
1284 XEXP (XEXP (operands[1], 0), 1),
1285 (!can_create_pseudo_p ()
1287 : gen_reg_rtx (Pmode)),
1288 0, OPTAB_LIB_WIDEN);
1292 if (! reload_in_progress && ! reload_completed)
1294 /* Copy the source to a register if both operands aren't registers. */
1295 if (! register_operand (operands[0], mode)
1296 && ! sh_register_operand (operands[1], mode))
1297 operands[1] = copy_to_mode_reg (mode, operands[1]);
1299 if (GET_CODE (operands[0]) == MEM && ! memory_operand (operands[0], mode))
1301 /* This is like change_address_1 (operands[0], mode, 0, 1) ,
1302 except that we can't use that function because it is static. */
1303 rtx new = change_address (operands[0], mode, 0);
1304 MEM_COPY_ATTRIBUTES (new, operands[0]);
1308 /* This case can happen while generating code to move the result
1309 of a library call to the target. Reject `st r0,@(rX,rY)' because
1310 reload will fail to find a spill register for rX, since r0 is already
1311 being used for the source. */
1313 && refers_to_regno_p (R0_REG, R0_REG + 1, operands[1], (rtx *)0)
1314 && GET_CODE (operands[0]) == MEM
1315 && GET_CODE (XEXP (operands[0], 0)) == PLUS
1316 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == REG)
1317 operands[1] = copy_to_mode_reg (mode, operands[1]);
1320 if (mode == Pmode || mode == ptr_mode)
1323 enum tls_model tls_kind;
1327 if (GET_CODE (op1) == CONST
1328 && GET_CODE (XEXP (op1, 0)) == PLUS
1329 && tls_symbolic_operand (XEXP (XEXP (op1, 0), 0), Pmode))
1331 opc = XEXP (XEXP (op1, 0), 1);
1332 op1 = XEXP (XEXP (op1, 0), 0);
1337 if ((tls_kind = tls_symbolic_operand (op1, Pmode)))
1339 rtx tga_op1, tga_ret, tmp, tmp2;
1343 case TLS_MODEL_GLOBAL_DYNAMIC:
1344 tga_ret = gen_rtx_REG (Pmode, R0_REG);
1345 emit_call_insn (gen_tls_global_dynamic (tga_ret, op1));
1349 case TLS_MODEL_LOCAL_DYNAMIC:
1350 tga_ret = gen_rtx_REG (Pmode, R0_REG);
1351 emit_call_insn (gen_tls_local_dynamic (tga_ret, op1));
1353 tmp = gen_reg_rtx (Pmode);
1354 emit_move_insn (tmp, tga_ret);
1356 if (register_operand (op0, Pmode))
1359 tmp2 = gen_reg_rtx (Pmode);
1361 emit_insn (gen_symDTPOFF2reg (tmp2, op1, tmp));
1365 case TLS_MODEL_INITIAL_EXEC:
1368 /* Don't schedule insns for getting GOT address when
1369 the first scheduling is enabled, to avoid spill
1371 if (flag_schedule_insns)
1372 emit_insn (gen_blockage ());
1373 emit_insn (gen_GOTaddr2picreg ());
1374 emit_use (gen_rtx_REG (SImode, PIC_REG));
1375 if (flag_schedule_insns)
1376 emit_insn (gen_blockage ());
1378 tga_op1 = !can_create_pseudo_p () ? op0 : gen_reg_rtx (Pmode);
1379 tmp = gen_sym2GOTTPOFF (op1);
1380 emit_insn (gen_tls_initial_exec (tga_op1, tmp));
1384 case TLS_MODEL_LOCAL_EXEC:
1385 tmp2 = gen_reg_rtx (Pmode);
1386 emit_insn (gen_load_gbr (tmp2));
1387 tmp = gen_reg_rtx (Pmode);
1388 emit_insn (gen_symTPOFF2reg (tmp, op1));
1390 if (register_operand (op0, Pmode))
1393 op1 = gen_reg_rtx (Pmode);
1395 emit_insn (gen_addsi3 (op1, tmp, tmp2));
1402 emit_insn (gen_addsi3 (op1, op1, force_reg (SImode, opc)));
1411 prepare_cbranch_operands (rtx *operands, enum machine_mode mode,
1412 enum rtx_code comparison)
1415 rtx scratch = NULL_RTX;
1417 if (comparison == CODE_FOR_nothing)
1418 comparison = GET_CODE (operands[0]);
1420 scratch = operands[4];
1421 if (GET_CODE (operands[1]) == CONST_INT
1422 && GET_CODE (operands[2]) != CONST_INT)
1424 rtx tmp = operands[1];
1426 operands[1] = operands[2];
1428 comparison = swap_condition (comparison);
1430 if (GET_CODE (operands[2]) == CONST_INT)
1432 HOST_WIDE_INT val = INTVAL (operands[2]);
1433 if ((val == -1 || val == -0x81)
1434 && (comparison == GT || comparison == LE))
1436 comparison = (comparison == GT) ? GE : LT;
1437 operands[2] = gen_int_mode (val + 1, mode);
1439 else if ((val == 1 || val == 0x80)
1440 && (comparison == GE || comparison == LT))
1442 comparison = (comparison == GE) ? GT : LE;
1443 operands[2] = gen_int_mode (val - 1, mode);
1445 else if (val == 1 && (comparison == GEU || comparison == LTU))
1447 comparison = (comparison == GEU) ? NE : EQ;
1448 operands[2] = CONST0_RTX (mode);
1450 else if (val == 0x80 && (comparison == GEU || comparison == LTU))
1452 comparison = (comparison == GEU) ? GTU : LEU;
1453 operands[2] = gen_int_mode (val - 1, mode);
1455 else if (val == 0 && (comparison == GTU || comparison == LEU))
1456 comparison = (comparison == GTU) ? NE : EQ;
1457 else if (mode == SImode
1458 && ((val == 0x7fffffff
1459 && (comparison == GTU || comparison == LEU))
1460 || ((unsigned HOST_WIDE_INT) val
1461 == (unsigned HOST_WIDE_INT) 0x7fffffff + 1
1462 && (comparison == GEU || comparison == LTU))))
1464 comparison = (comparison == GTU || comparison == GEU) ? LT : GE;
1465 operands[2] = CONST0_RTX (mode);
1469 if (can_create_pseudo_p ())
1470 operands[1] = force_reg (mode, op1);
1471 /* When we are handling DImode comparisons, we want to keep constants so
1472 that we can optimize the component comparisons; however, memory loads
1473 are better issued as a whole so that they can be scheduled well.
1474 SImode equality comparisons allow I08 constants, but only when they
1475 compare r0. Hence, if operands[1] has to be loaded from somewhere else
1476 into a register, that register might as well be r0, and we allow the
1477 constant. If it is already in a register, this is likely to be
1478 allocated to a different hard register, thus we load the constant into
1479 a register unless it is zero. */
1480 if (!REG_P (operands[2])
1481 && (GET_CODE (operands[2]) != CONST_INT
1482 || (mode == SImode && operands[2] != CONST0_RTX (SImode)
1483 && ((comparison != EQ && comparison != NE)
1484 || (REG_P (op1) && REGNO (op1) != R0_REG)
1485 || !satisfies_constraint_I08 (operands[2])))))
1487 if (scratch && GET_MODE (scratch) == mode)
1489 emit_move_insn (scratch, operands[2]);
1490 operands[2] = scratch;
1492 else if (can_create_pseudo_p ())
1493 operands[2] = force_reg (mode, operands[2]);
1499 expand_cbranchsi4 (rtx *operands, enum rtx_code comparison, int probability)
1501 rtx (*branch_expander) (rtx) = gen_branch_true;
1504 comparison = prepare_cbranch_operands (operands, SImode, comparison);
1507 case NE: case LT: case LE: case LTU: case LEU:
1508 comparison = reverse_condition (comparison);
1509 branch_expander = gen_branch_false;
1512 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, T_REG),
1513 gen_rtx_fmt_ee (comparison, SImode,
1514 operands[1], operands[2])));
1515 jump = emit_jump_insn (branch_expander (operands[3]));
1516 if (probability >= 0)
1518 = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (probability),
1523 /* ??? How should we distribute probabilities when more than one branch
1524 is generated. So far we only have soem ad-hoc observations:
1525 - If the operands are random, they are likely to differ in both parts.
1526 - If comparing items in a hash chain, the operands are random or equal;
1527 operation should be EQ or NE.
1528 - If items are searched in an ordered tree from the root, we can expect
1529 the highpart to be unequal about half of the time; operation should be
1530 an inequality comparison, operands non-constant, and overall probability
1531 about 50%. Likewise for quicksort.
1532 - Range checks will be often made against constants. Even if we assume for
1533 simplicity an even distribution of the non-constant operand over a
1534 sub-range here, the same probability could be generated with differently
1535 wide sub-ranges - as long as the ratio of the part of the subrange that
1536 is before the threshold to the part that comes after the threshold stays
1537 the same. Thus, we can't really tell anything here;
1538 assuming random distribution is at least simple.
1542 expand_cbranchdi4 (rtx *operands, enum rtx_code comparison)
1544 enum rtx_code msw_taken, msw_skip, lsw_taken;
1545 rtx skip_label = NULL_RTX;
1546 rtx op1h, op1l, op2h, op2l;
1549 int msw_taken_prob = -1, msw_skip_prob = -1, lsw_taken_prob = -1;
1550 rtx scratch = operands[4];
1552 comparison = prepare_cbranch_operands (operands, DImode, comparison);
1553 op1h = gen_highpart_mode (SImode, DImode, operands[1]);
1554 op2h = gen_highpart_mode (SImode, DImode, operands[2]);
1555 op1l = gen_lowpart (SImode, operands[1]);
1556 op2l = gen_lowpart (SImode, operands[2]);
1557 msw_taken = msw_skip = lsw_taken = CODE_FOR_nothing;
1558 prob = split_branch_probability;
1559 rev_prob = REG_BR_PROB_BASE - prob;
1562 /* ??? Should we use the cmpeqdi_t pattern for equality comparisons?
1563 That costs 1 cycle more when the first branch can be predicted taken,
1564 but saves us mispredicts because only one branch needs prediction.
1565 It also enables generating the cmpeqdi_t-1 pattern. */
1567 if (TARGET_CMPEQDI_T)
1569 emit_insn (gen_cmpeqdi_t (operands[1], operands[2]));
1570 emit_jump_insn (gen_branch_true (operands[3]));
1577 /* If we had more precision, we'd use rev_prob - (rev_prob >> 32) .
1579 msw_skip_prob = rev_prob;
1580 if (REG_BR_PROB_BASE <= 65535)
1581 lsw_taken_prob = prob ? REG_BR_PROB_BASE : 0;
1584 gcc_assert (HOST_BITS_PER_WIDEST_INT >= 64);
1588 - ((HOST_WIDEST_INT) REG_BR_PROB_BASE * rev_prob
1589 / ((HOST_WIDEST_INT) prob << 32)))
1595 if (TARGET_CMPEQDI_T)
1597 emit_insn (gen_cmpeqdi_t (operands[1], operands[2]));
1598 emit_jump_insn (gen_branch_false (operands[3]));
1602 msw_taken_prob = prob;
1607 msw_taken = comparison;
1608 if (GET_CODE (op2l) == CONST_INT && INTVAL (op2l) == -1)
1610 if (comparison != GTU || op2h != CONST0_RTX (SImode))
1611 msw_skip = swap_condition (msw_taken);
1615 if (op2l == CONST0_RTX (SImode))
1616 msw_taken = comparison;
1619 msw_taken = comparison == GE ? GT : GTU;
1620 msw_skip = swap_condition (msw_taken);
1625 msw_taken = comparison;
1626 if (op2l == CONST0_RTX (SImode))
1628 msw_skip = swap_condition (msw_taken);
1632 if (GET_CODE (op2l) == CONST_INT && INTVAL (op2l) == -1)
1633 msw_taken = comparison;
1637 if (comparison == LE)
1639 else if (op2h != CONST0_RTX (SImode))
1643 msw_skip = swap_condition (msw_taken);
1646 default: return false;
1648 num_branches = ((msw_taken != CODE_FOR_nothing)
1649 + (msw_skip != CODE_FOR_nothing)
1650 + (lsw_taken != CODE_FOR_nothing));
1651 if (comparison != EQ && comparison != NE && num_branches > 1)
1653 if (!CONSTANT_P (operands[2])
1654 && prob >= (int) (REG_BR_PROB_BASE * 3 / 8U)
1655 && prob <= (int) (REG_BR_PROB_BASE * 5 / 8U))
1657 msw_taken_prob = prob / 2U;
1659 = REG_BR_PROB_BASE * rev_prob / (REG_BR_PROB_BASE + rev_prob);
1660 lsw_taken_prob = prob;
1664 msw_taken_prob = prob;
1665 msw_skip_prob = REG_BR_PROB_BASE;
1666 /* ??? If we have a constant op2h, should we use that when
1667 calculating lsw_taken_prob? */
1668 lsw_taken_prob = prob;
1673 operands[4] = NULL_RTX;
1674 if (reload_completed
1675 && ! arith_reg_or_0_operand (op2h, SImode) && true_regnum (op1h)
1676 && (msw_taken != CODE_FOR_nothing || msw_skip != CODE_FOR_nothing))
1678 emit_move_insn (scratch, operands[2]);
1679 operands[2] = scratch;
1681 if (msw_taken != CODE_FOR_nothing)
1682 expand_cbranchsi4 (operands, msw_taken, msw_taken_prob);
1683 if (msw_skip != CODE_FOR_nothing)
1685 rtx taken_label = operands[3];
1687 /* Operands were possibly modified, but msw_skip doesn't expect this.
1688 Always use the original ones. */
1689 if (msw_taken != CODE_FOR_nothing)
1695 operands[3] = skip_label = gen_label_rtx ();
1696 expand_cbranchsi4 (operands, msw_skip, msw_skip_prob);
1697 operands[3] = taken_label;
1701 if (lsw_taken != CODE_FOR_nothing)
1703 if (reload_completed
1704 && ! arith_reg_or_0_operand (op2l, SImode) && true_regnum (op1l))
1705 operands[4] = scratch;
1706 expand_cbranchsi4 (operands, lsw_taken, lsw_taken_prob);
1708 if (msw_skip != CODE_FOR_nothing)
1709 emit_label (skip_label);
1713 /* Prepare the operands for an scc instruction; make sure that the
1714 compare has been done. */
1716 prepare_scc_operands (enum rtx_code code)
1718 rtx t_reg = gen_rtx_REG (SImode, T_REG);
1719 enum rtx_code oldcode = code;
1720 enum machine_mode mode;
1722 /* First need a compare insn. */
1726 /* It isn't possible to handle this case. */
1743 if (code != oldcode)
1745 rtx tmp = sh_compare_op0;
1746 sh_compare_op0 = sh_compare_op1;
1747 sh_compare_op1 = tmp;
1750 mode = GET_MODE (sh_compare_op0);
1751 if (mode == VOIDmode)
1752 mode = GET_MODE (sh_compare_op1);
1754 sh_compare_op0 = force_reg (mode, sh_compare_op0);
1755 if ((code != EQ && code != NE
1756 && (sh_compare_op1 != const0_rtx
1757 || code == GTU || code == GEU || code == LTU || code == LEU))
1758 || (mode == DImode && sh_compare_op1 != const0_rtx)
1759 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1760 sh_compare_op1 = force_reg (mode, sh_compare_op1);
1762 if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
1763 (mode == SFmode ? emit_sf_insn : emit_df_insn)
1764 (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
1765 gen_rtx_SET (VOIDmode, t_reg,
1766 gen_rtx_fmt_ee (code, SImode,
1767 sh_compare_op0, sh_compare_op1)),
1768 gen_rtx_USE (VOIDmode, get_fpscr_rtx ()))));
1770 emit_insn (gen_rtx_SET (VOIDmode, t_reg,
1771 gen_rtx_fmt_ee (code, SImode,
1772 sh_compare_op0, sh_compare_op1)));
1777 /* Called from the md file, set up the operands of a compare instruction. */
1780 from_compare (rtx *operands, int code)
1782 enum machine_mode mode = GET_MODE (sh_compare_op0);
1784 if (mode == VOIDmode)
1785 mode = GET_MODE (sh_compare_op1);
1788 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1790 /* Force args into regs, since we can't use constants here. */
1791 sh_compare_op0 = force_reg (mode, sh_compare_op0);
1792 if (sh_compare_op1 != const0_rtx
1793 || code == GTU || code == GEU
1794 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1795 sh_compare_op1 = force_reg (mode, sh_compare_op1);
1797 if (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT && code == GE)
1799 from_compare (operands, GT);
1800 insn = gen_ieee_ccmpeqsf_t (sh_compare_op0, sh_compare_op1);
1803 insn = gen_rtx_SET (VOIDmode,
1804 gen_rtx_REG (SImode, T_REG),
1805 gen_rtx_fmt_ee (code, SImode,
1806 sh_compare_op0, sh_compare_op1));
1807 if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
1809 insn = gen_rtx_PARALLEL (VOIDmode,
1811 gen_rtx_USE (VOIDmode, get_fpscr_rtx ())));
1812 (mode == SFmode ? emit_sf_insn : emit_df_insn) (insn);
1818 /* Functions to output assembly code. */
1820 /* Return a sequence of instructions to perform DI or DF move.
1822 Since the SH cannot move a DI or DF in one instruction, we have
1823 to take care when we see overlapping source and dest registers. */
1826 output_movedouble (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1827 enum machine_mode mode)
1829 rtx dst = operands[0];
1830 rtx src = operands[1];
1832 if (GET_CODE (dst) == MEM
1833 && GET_CODE (XEXP (dst, 0)) == PRE_DEC)
1834 return "mov.l %T1,%0\n\tmov.l %1,%0";
1836 if (register_operand (dst, mode)
1837 && register_operand (src, mode))
1839 if (REGNO (src) == MACH_REG)
1840 return "sts mach,%S0\n\tsts macl,%R0";
1842 /* When mov.d r1,r2 do r2->r3 then r1->r2;
1843 when mov.d r1,r0 do r1->r0 then r2->r1. */
1845 if (REGNO (src) + 1 == REGNO (dst))
1846 return "mov %T1,%T0\n\tmov %1,%0";
1848 return "mov %1,%0\n\tmov %T1,%T0";
1850 else if (GET_CODE (src) == CONST_INT)
1852 if (INTVAL (src) < 0)
1853 output_asm_insn ("mov #-1,%S0", operands);
1855 output_asm_insn ("mov #0,%S0", operands);
1857 return "mov %1,%R0";
1859 else if (GET_CODE (src) == MEM)
1862 int dreg = REGNO (dst);
1863 rtx inside = XEXP (src, 0);
1865 switch (GET_CODE (inside))
1868 ptrreg = REGNO (inside);
1872 ptrreg = subreg_regno (inside);
1876 ptrreg = REGNO (XEXP (inside, 0));
1877 /* ??? A r0+REG address shouldn't be possible here, because it isn't
1878 an offsettable address. Unfortunately, offsettable addresses use
1879 QImode to check the offset, and a QImode offsettable address
1880 requires r0 for the other operand, which is not currently
1881 supported, so we can't use the 'o' constraint.
1882 Thus we must check for and handle r0+REG addresses here.
1883 We punt for now, since this is likely very rare. */
1884 gcc_assert (GET_CODE (XEXP (inside, 1)) != REG);
1888 return "mov.l %1,%0\n\tmov.l %1+4,%T0";
1890 return "mov.l %1,%0\n\tmov.l %1,%T0";
1895 /* Work out the safe way to copy. Copy into the second half first. */
1897 return "mov.l %T1,%T0\n\tmov.l %1,%0";
1900 return "mov.l %1,%0\n\tmov.l %T1,%T0";
1903 /* Print an instruction which would have gone into a delay slot after
1904 another instruction, but couldn't because the other instruction expanded
1905 into a sequence where putting the slot insn at the end wouldn't work. */
1908 print_slot (rtx insn)
1910 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file, optimize, 1, NULL);
1912 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
1916 output_far_jump (rtx insn, rtx op)
1918 struct { rtx lab, reg, op; } this;
1919 rtx braf_base_lab = NULL_RTX;
1922 int offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn));
1925 this.lab = gen_label_rtx ();
1929 && offset - get_attr_length (insn) <= 32766)
1932 jump = "mov.w %O0,%1; braf %1";
1940 jump = "mov.l %O0,%1; braf %1";
1942 jump = "mov.l r0,@-r15; mova %O0,r0; mov.l @r0,%1; add r0,%1; mov.l @r15+,r0; jmp @%1";
1945 jump = "mov.l %O0,%1; jmp @%1";
1947 /* If we have a scratch register available, use it. */
1948 if (GET_CODE ((prev = prev_nonnote_insn (insn))) == INSN
1949 && INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
1951 this.reg = SET_DEST (XVECEXP (PATTERN (prev), 0, 0));
1952 if (REGNO (this.reg) == R0_REG && flag_pic && ! TARGET_SH2)
1953 jump = "mov.l r1,@-r15; mova %O0,r0; mov.l @r0,r1; add r1,r0; mov.l @r15+,r1; jmp @%1";
1954 output_asm_insn (jump, &this.lab);
1955 if (dbr_sequence_length ())
1956 print_slot (final_sequence);
1958 output_asm_insn ("nop", 0);
1962 /* Output the delay slot insn first if any. */
1963 if (dbr_sequence_length ())
1964 print_slot (final_sequence);
1966 this.reg = gen_rtx_REG (SImode, 13);
1967 /* We must keep the stack aligned to 8-byte boundaries on SH5.
1968 Fortunately, MACL is fixed and call-clobbered, and we never
1969 need its value across jumps, so save r13 in it instead of in
1972 output_asm_insn ("lds r13, macl", 0);
1974 output_asm_insn ("mov.l r13,@-r15", 0);
1975 output_asm_insn (jump, &this.lab);
1977 output_asm_insn ("sts macl, r13", 0);
1979 output_asm_insn ("mov.l @r15+,r13", 0);
1981 if (far && flag_pic && TARGET_SH2)
1983 braf_base_lab = gen_label_rtx ();
1984 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1985 CODE_LABEL_NUMBER (braf_base_lab));
1988 output_asm_insn (".align 2", 0);
1989 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (this.lab));
1991 if (far && flag_pic)
1994 this.lab = braf_base_lab;
1995 output_asm_insn (".long %O2-%O0", &this.lab);
1998 output_asm_insn (far ? ".long %O2" : ".word %O2-%O0", &this.lab);
2002 /* Local label counter, used for constants in the pool and inside
2003 pattern branches. */
2005 static int lf = 100;
2007 /* Output code for ordinary branches. */
2010 output_branch (int logic, rtx insn, rtx *operands)
2012 switch (get_attr_length (insn))
2015 /* This can happen if filling the delay slot has caused a forward
2016 branch to exceed its range (we could reverse it, but only
2017 when we know we won't overextend other branches; this should
2018 best be handled by relaxation).
2019 It can also happen when other condbranches hoist delay slot insn
2020 from their destination, thus leading to code size increase.
2021 But the branch will still be in the range -4092..+4098 bytes. */
2026 /* The call to print_slot will clobber the operands. */
2027 rtx op0 = operands[0];
2029 /* If the instruction in the delay slot is annulled (true), then
2030 there is no delay slot where we can put it now. The only safe
2031 place for it is after the label. final will do that by default. */
2034 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
2035 && get_attr_length (XVECEXP (final_sequence, 0, 1)))
2037 asm_fprintf (asm_out_file, "\tb%s%ss\t%LLF%d\n", logic ? "f" : "t",
2038 ASSEMBLER_DIALECT ? "/" : ".", label);
2039 print_slot (final_sequence);
2042 asm_fprintf (asm_out_file, "\tb%s\t%LLF%d\n", logic ? "f" : "t", label);
2044 output_asm_insn ("bra\t%l0", &op0);
2045 fprintf (asm_out_file, "\tnop\n");
2046 (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
2050 /* When relaxing, handle this like a short branch. The linker
2051 will fix it up if it still doesn't fit after relaxation. */
2053 return logic ? "bt%.\t%l0" : "bf%.\t%l0";
2055 /* These are for SH2e, in which we have to account for the
2056 extra nop because of the hardware bug in annulled branches. */
2062 gcc_assert (!final_sequence
2063 || !(INSN_ANNULLED_BRANCH_P
2064 (XVECEXP (final_sequence, 0, 0))));
2065 asm_fprintf (asm_out_file, "b%s%ss\t%LLF%d\n",
2067 ASSEMBLER_DIALECT ? "/" : ".", label);
2068 fprintf (asm_out_file, "\tnop\n");
2069 output_asm_insn ("bra\t%l0", operands);
2070 fprintf (asm_out_file, "\tnop\n");
2071 (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
2075 /* When relaxing, fall through. */
2080 sprintf (buffer, "b%s%ss\t%%l0",
2082 ASSEMBLER_DIALECT ? "/" : ".");
2083 output_asm_insn (buffer, &operands[0]);
2088 /* There should be no longer branches now - that would
2089 indicate that something has destroyed the branches set
2090 up in machine_dependent_reorg. */
2095 /* Output a code sequence for INSN using TEMPLATE with OPERANDS; but before,
2096 fill in operands 9 as a label to the successor insn.
2097 We try to use jump threading where possible.
2098 IF CODE matches the comparison in the IF_THEN_ELSE of a following jump,
2099 we assume the jump is taken. I.e. EQ means follow jmp and bf, NE means
2100 follow jmp and bt, if the address is in range. */
2102 output_branchy_insn (enum rtx_code code, const char *template,
2103 rtx insn, rtx *operands)
2105 rtx next_insn = NEXT_INSN (insn);
2107 if (next_insn && GET_CODE (next_insn) == JUMP_INSN && condjump_p (next_insn))
2109 rtx src = SET_SRC (PATTERN (next_insn));
2110 if (GET_CODE (src) == IF_THEN_ELSE && GET_CODE (XEXP (src, 0)) != code)
2112 /* Following branch not taken */
2113 operands[9] = gen_label_rtx ();
2114 emit_label_after (operands[9], next_insn);
2115 INSN_ADDRESSES_NEW (operands[9],
2116 INSN_ADDRESSES (INSN_UID (next_insn))
2117 + get_attr_length (next_insn));
2122 int offset = (branch_dest (next_insn)
2123 - INSN_ADDRESSES (INSN_UID (next_insn)) + 4);
2124 if (offset >= -252 && offset <= 258)
2126 if (GET_CODE (src) == IF_THEN_ELSE)
2128 src = XEXP (src, 1);
2134 operands[9] = gen_label_rtx ();
2135 emit_label_after (operands[9], insn);
2136 INSN_ADDRESSES_NEW (operands[9],
2137 INSN_ADDRESSES (INSN_UID (insn))
2138 + get_attr_length (insn));
2143 output_ieee_ccmpeq (rtx insn, rtx *operands)
2145 return output_branchy_insn (NE, "bt\t%l9\n\tfcmp/eq\t%1,%0",
2149 /* Output the start of the assembler file. */
2152 sh_file_start (void)
2154 default_file_start ();
2157 /* Declare the .directive section before it is used. */
2158 fputs ("\t.section .directive, \"SM\", @progbits, 1\n", asm_out_file);
2159 fputs ("\t.asciz \"#<SYMEDIT>#\\n\"\n", asm_out_file);
2163 /* We need to show the text section with the proper
2164 attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
2165 emits it without attributes in TEXT_SECTION_ASM_OP, else GAS
2166 will complain. We can teach GAS specifically about the
2167 default attributes for our choice of text section, but
2168 then we would have to change GAS again if/when we change
2169 the text section name. */
2170 fprintf (asm_out_file, "%s\n", TEXT_SECTION_ASM_OP);
2172 /* Switch to the data section so that the coffsem symbol
2173 isn't in the text section. */
2174 switch_to_section (data_section);
2176 if (TARGET_LITTLE_ENDIAN)
2177 fputs ("\t.little\n", asm_out_file);
2181 if (TARGET_SHCOMPACT)
2182 fputs ("\t.mode\tSHcompact\n", asm_out_file);
2183 else if (TARGET_SHMEDIA)
2184 fprintf (asm_out_file, "\t.mode\tSHmedia\n\t.abi\t%i\n",
2185 TARGET_SHMEDIA64 ? 64 : 32);
2189 /* Check if PAT includes UNSPEC_CALLER unspec pattern. */
2192 unspec_caller_rtx_p (rtx pat)
2194 switch (GET_CODE (pat))
2197 return unspec_caller_rtx_p (XEXP (pat, 0));
2200 if (unspec_caller_rtx_p (XEXP (pat, 0)))
2202 return unspec_caller_rtx_p (XEXP (pat, 1));
2204 if (XINT (pat, 1) == UNSPEC_CALLER)
2213 /* Indicate that INSN cannot be duplicated. This is true for insn
2214 that generates a unique label. */
2217 sh_cannot_copy_insn_p (rtx insn)
2221 if (!reload_completed || !flag_pic)
2224 if (GET_CODE (insn) != INSN)
2226 if (asm_noperands (insn) >= 0)
2229 pat = PATTERN (insn);
2230 if (GET_CODE (pat) != SET)
2232 pat = SET_SRC (pat);
2234 if (unspec_caller_rtx_p (pat))
2240 /* Actual number of instructions used to make a shift by N. */
2241 static const char ashiftrt_insns[] =
2242 { 0,1,2,3,4,5,8,8,8,8,8,8,8,8,8,8,2,3,4,5,8,8,8,8,8,8,8,8,8,8,8,2};
2244 /* Left shift and logical right shift are the same. */
2245 static const char shift_insns[] =
2246 { 0,1,1,2,2,3,3,4,1,2,2,3,3,4,3,3,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
2248 /* Individual shift amounts needed to get the above length sequences.
2249 One bit right shifts clobber the T bit, so when possible, put one bit
2250 shifts in the middle of the sequence, so the ends are eligible for
2251 branch delay slots. */
2252 static const short shift_amounts[32][5] = {
2253 {0}, {1}, {2}, {2, 1},
2254 {2, 2}, {2, 1, 2}, {2, 2, 2}, {2, 2, 1, 2},
2255 {8}, {8, 1}, {8, 2}, {8, 1, 2},
2256 {8, 2, 2}, {8, 2, 1, 2}, {8, -2, 8}, {8, -1, 8},
2257 {16}, {16, 1}, {16, 2}, {16, 1, 2},
2258 {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
2259 {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
2260 {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
2262 /* Likewise, but for shift amounts < 16, up to three highmost bits
2263 might be clobbered. This is typically used when combined with some
2264 kind of sign or zero extension. */
2266 static const char ext_shift_insns[] =
2267 { 0,1,1,2,2,3,2,2,1,2,2,3,3,3,2,2,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
2269 static const short ext_shift_amounts[32][4] = {
2270 {0}, {1}, {2}, {2, 1},
2271 {2, 2}, {2, 1, 2}, {8, -2}, {8, -1},
2272 {8}, {8, 1}, {8, 2}, {8, 1, 2},
2273 {8, 2, 2}, {16, -2, -1}, {16, -2}, {16, -1},
2274 {16}, {16, 1}, {16, 2}, {16, 1, 2},
2275 {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
2276 {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
2277 {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
2279 /* Assuming we have a value that has been sign-extended by at least one bit,
2280 can we use the ext_shift_amounts with the last shift turned to an arithmetic shift
2281 to shift it by N without data loss, and quicker than by other means? */
2282 #define EXT_SHIFT_SIGNED(n) (((n) | 8) == 15)
2284 /* This is used in length attributes in sh.md to help compute the length
2285 of arbitrary constant shift instructions. */
2288 shift_insns_rtx (rtx insn)
2290 rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2291 int shift_count = INTVAL (XEXP (set_src, 1));
2292 enum rtx_code shift_code = GET_CODE (set_src);
2297 return ashiftrt_insns[shift_count];
2300 return shift_insns[shift_count];
2306 /* Return the cost of a shift. */
2316 if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
2318 if (GET_MODE (x) == DImode
2319 && GET_CODE (XEXP (x, 1)) == CONST_INT
2320 && INTVAL (XEXP (x, 1)) == 1)
2323 /* Everything else is invalid, because there is no pattern for it. */
2326 /* If shift by a non constant, then this will be expensive. */
2327 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2328 return SH_DYNAMIC_SHIFT_COST;
2330 value = INTVAL (XEXP (x, 1));
2332 /* Otherwise, return the true cost in instructions. */
2333 if (GET_CODE (x) == ASHIFTRT)
2335 int cost = ashiftrt_insns[value];
2336 /* If SH3, then we put the constant in a reg and use shad. */
2337 if (cost > 1 + SH_DYNAMIC_SHIFT_COST)
2338 cost = 1 + SH_DYNAMIC_SHIFT_COST;
2342 return shift_insns[value];
2345 /* Return the cost of an AND operation. */
2352 /* Anding with a register is a single cycle and instruction. */
2353 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2356 i = INTVAL (XEXP (x, 1));
2360 if (satisfies_constraint_I10 (XEXP (x, 1))
2361 || satisfies_constraint_J16 (XEXP (x, 1)))
2364 return 1 + rtx_cost (XEXP (x, 1), AND);
2367 /* These constants are single cycle extu.[bw] instructions. */
2368 if (i == 0xff || i == 0xffff)
2370 /* Constants that can be used in an and immediate instruction in a single
2371 cycle, but this requires r0, so make it a little more expensive. */
2372 if (CONST_OK_FOR_K08 (i))
2374 /* Constants that can be loaded with a mov immediate and an and.
2375 This case is probably unnecessary. */
2376 if (CONST_OK_FOR_I08 (i))
2378 /* Any other constants requires a 2 cycle pc-relative load plus an and.
2379 This case is probably unnecessary. */
2383 /* Return the cost of an addition or a subtraction. */
2388 /* Adding a register is a single cycle insn. */
2389 if (GET_CODE (XEXP (x, 1)) == REG
2390 || GET_CODE (XEXP (x, 1)) == SUBREG)
2393 /* Likewise for small constants. */
2394 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2395 && CONST_OK_FOR_ADD (INTVAL (XEXP (x, 1))))
2399 switch (GET_CODE (XEXP (x, 1)))
2404 return TARGET_SHMEDIA64 ? 5 : 3;
2407 if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1))))
2409 else if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1)) >> 16))
2411 else if (CONST_OK_FOR_I16 ((INTVAL (XEXP (x, 1)) >> 16) >> 16))
2419 /* Any other constant requires a 2 cycle pc-relative load plus an
2424 /* Return the cost of a multiply. */
2426 multcosts (rtx x ATTRIBUTE_UNUSED)
2428 if (sh_multcost >= 0)
2431 /* ??? We have a mul insn, but it has a latency of three, and doesn't
2432 accept constants. Ideally, we would use a cost of one or two and
2433 add the cost of the operand, but disregard the latter when inside loops
2434 and loop invariant code motion is still to follow.
2435 Using a multiply first and splitting it later if it's a loss
2436 doesn't work because of different sign / zero extension semantics
2437 of multiplies vs. shifts. */
2438 return TARGET_SMALLCODE ? 2 : 3;
2442 /* We have a mul insn, so we can never take more than the mul and the
2443 read of the mac reg, but count more because of the latency and extra
2445 if (TARGET_SMALLCODE)
2450 /* If we're aiming at small code, then just count the number of
2451 insns in a multiply call sequence. */
2452 if (TARGET_SMALLCODE)
2455 /* Otherwise count all the insns in the routine we'd be calling too. */
2459 /* Compute a (partial) cost for rtx X. Return true if the complete
2460 cost has been computed, and false if subexpressions should be
2461 scanned. In either case, *TOTAL contains the cost result. */
2464 sh_rtx_costs (rtx x, int code, int outer_code, int *total)
2471 if (INTVAL (x) == 0)
2473 else if (outer_code == AND && and_operand ((x), DImode))
2475 else if ((outer_code == IOR || outer_code == XOR
2476 || outer_code == PLUS)
2477 && CONST_OK_FOR_I10 (INTVAL (x)))
2479 else if (CONST_OK_FOR_I16 (INTVAL (x)))
2480 *total = COSTS_N_INSNS (outer_code != SET);
2481 else if (CONST_OK_FOR_I16 (INTVAL (x) >> 16))
2482 *total = COSTS_N_INSNS ((outer_code != SET) + 1);
2483 else if (CONST_OK_FOR_I16 ((INTVAL (x) >> 16) >> 16))
2484 *total = COSTS_N_INSNS ((outer_code != SET) + 2);
2486 *total = COSTS_N_INSNS ((outer_code != SET) + 3);
2489 if (CONST_OK_FOR_I08 (INTVAL (x)))
2491 else if ((outer_code == AND || outer_code == IOR || outer_code == XOR)
2492 && CONST_OK_FOR_K08 (INTVAL (x)))
2494 /* prepare_cmp_insn will force costly constants int registers before
2495 the cbranch[sd]i4 patterns can see them, so preserve potentially
2496 interesting ones not covered by I08 above. */
2497 else if (outer_code == COMPARE
2498 && ((unsigned HOST_WIDE_INT) INTVAL (x)
2499 == (unsigned HOST_WIDE_INT) 0x7fffffff + 1
2500 || INTVAL (x) == 0x7fffffff
2501 || INTVAL (x) == 0x80 || INTVAL (x) == -0x81))
2510 if (TARGET_SHMEDIA64)
2511 *total = COSTS_N_INSNS (4);
2512 else if (TARGET_SHMEDIA32)
2513 *total = COSTS_N_INSNS (2);
2520 *total = COSTS_N_INSNS (4);
2521 /* prepare_cmp_insn will force costly constants int registers before
2522 the cbranchdi4 pattern can see them, so preserve potentially
2523 interesting ones. */
2524 else if (outer_code == COMPARE && GET_MODE (x) == DImode)
2530 if (x == CONST0_RTX (GET_MODE (x)))
2532 else if (sh_1el_vec (x, VOIDmode))
2533 *total = outer_code != SET;
2534 if (sh_rep_vec (x, VOIDmode))
2535 *total = ((GET_MODE_UNIT_SIZE (GET_MODE (x)) + 3) / 4
2536 + (outer_code != SET));
2537 *total = COSTS_N_INSNS (3) + (outer_code != SET);
2542 *total = COSTS_N_INSNS (addsubcosts (x));
2546 *total = COSTS_N_INSNS (andcosts (x));
2550 *total = COSTS_N_INSNS (multcosts (x));
2556 *total = COSTS_N_INSNS (shiftcosts (x));
2563 *total = COSTS_N_INSNS (20);
2567 if (sh_1el_vec (x, VOIDmode))
2568 *total = outer_code != SET;
2569 if (sh_rep_vec (x, VOIDmode))
2570 *total = ((GET_MODE_UNIT_SIZE (GET_MODE (x)) + 3) / 4
2571 + (outer_code != SET));
2572 *total = COSTS_N_INSNS (3) + (outer_code != SET);
2585 /* Compute the cost of an address. For the SH, all valid addresses are
2586 the same cost. Use a slightly higher cost for reg + reg addressing,
2587 since it increases pressure on r0. */
2590 sh_address_cost (rtx X)
2592 return (GET_CODE (X) == PLUS
2593 && ! CONSTANT_P (XEXP (X, 1))
2594 && ! TARGET_SHMEDIA ? 1 : 0);
2597 /* Code to expand a shift. */
2600 gen_ashift (int type, int n, rtx reg)
2602 /* Negative values here come from the shift_amounts array. */
2615 emit_insn (gen_ashrsi3_k (reg, reg, GEN_INT (n)));
2619 emit_insn (gen_lshrsi3_m (reg, reg, GEN_INT (n)));
2621 emit_insn (gen_lshrsi3_k (reg, reg, GEN_INT (n)));
2624 emit_insn (gen_ashlsi3_std (reg, reg, GEN_INT (n)));
2629 /* Same for HImode */
2632 gen_ashift_hi (int type, int n, rtx reg)
2634 /* Negative values here come from the shift_amounts array. */
2648 /* We don't have HImode right shift operations because using the
2649 ordinary 32 bit shift instructions for that doesn't generate proper
2650 zero/sign extension.
2651 gen_ashift_hi is only called in contexts where we know that the
2652 sign extension works out correctly. */
2655 if (GET_CODE (reg) == SUBREG)
2657 offset = SUBREG_BYTE (reg);
2658 reg = SUBREG_REG (reg);
2660 gen_ashift (type, n, gen_rtx_SUBREG (SImode, reg, offset));
2664 emit_insn (gen_ashlhi3_k (reg, reg, GEN_INT (n)));
2669 /* Output RTL to split a constant shift into its component SH constant
2670 shift instructions. */
2673 gen_shifty_op (int code, rtx *operands)
2675 int value = INTVAL (operands[2]);
2678 /* Truncate the shift count in case it is out of bounds. */
2679 value = value & 0x1f;
2683 if (code == LSHIFTRT)
2685 emit_insn (gen_rotlsi3_1 (operands[0], operands[0]));
2686 emit_insn (gen_movt (operands[0]));
2689 else if (code == ASHIFT)
2691 /* There is a two instruction sequence for 31 bit left shifts,
2692 but it requires r0. */
2693 if (GET_CODE (operands[0]) == REG && REGNO (operands[0]) == 0)
2695 emit_insn (gen_andsi3 (operands[0], operands[0], const1_rtx));
2696 emit_insn (gen_rotlsi3_31 (operands[0], operands[0]));
2701 else if (value == 0)
2703 /* This can happen even when optimizing, if there were subregs before
2704 reload. Don't output a nop here, as this is never optimized away;
2705 use a no-op move instead. */
2706 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[0]));
2710 max = shift_insns[value];
2711 for (i = 0; i < max; i++)
2712 gen_ashift (code, shift_amounts[value][i], operands[0]);
2715 /* Same as above, but optimized for values where the topmost bits don't
2719 gen_shifty_hi_op (int code, rtx *operands)
2721 int value = INTVAL (operands[2]);
2723 void (*gen_fun) (int, int, rtx);
2725 /* This operation is used by and_shl for SImode values with a few
2726 high bits known to be cleared. */
2730 emit_insn (gen_nop ());
2734 gen_fun = GET_MODE (operands[0]) == HImode ? gen_ashift_hi : gen_ashift;
2737 max = ext_shift_insns[value];
2738 for (i = 0; i < max; i++)
2739 gen_fun (code, ext_shift_amounts[value][i], operands[0]);
2742 /* When shifting right, emit the shifts in reverse order, so that
2743 solitary negative values come first. */
2744 for (i = ext_shift_insns[value] - 1; i >= 0; i--)
2745 gen_fun (code, ext_shift_amounts[value][i], operands[0]);
2748 /* Output RTL for an arithmetic right shift. */
2750 /* ??? Rewrite to use super-optimizer sequences. */
2753 expand_ashiftrt (rtx *operands)
2761 if (GET_CODE (operands[2]) != CONST_INT)
2763 rtx count = copy_to_mode_reg (SImode, operands[2]);
2764 emit_insn (gen_negsi2 (count, count));
2765 emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
2768 else if (ashiftrt_insns[INTVAL (operands[2]) & 31]
2769 > 1 + SH_DYNAMIC_SHIFT_COST)
2772 = force_reg (SImode, GEN_INT (- (INTVAL (operands[2]) & 31)));
2773 emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
2777 if (GET_CODE (operands[2]) != CONST_INT)
2780 value = INTVAL (operands[2]) & 31;
2784 /* If we are called from abs expansion, arrange things so that we
2785 we can use a single MT instruction that doesn't clobber the source,
2786 if LICM can hoist out the load of the constant zero. */
2787 if (currently_expanding_to_rtl)
2789 emit_insn (gen_cmpgtsi_t (force_reg (SImode, CONST0_RTX (SImode)),
2791 emit_insn (gen_mov_neg_si_t (operands[0]));
2794 emit_insn (gen_ashrsi2_31 (operands[0], operands[1]));
2797 else if (value >= 16 && value <= 19)
2799 wrk = gen_reg_rtx (SImode);
2800 emit_insn (gen_ashrsi2_16 (wrk, operands[1]));
2803 gen_ashift (ASHIFTRT, 1, wrk);
2804 emit_move_insn (operands[0], wrk);
2807 /* Expand a short sequence inline, longer call a magic routine. */
2808 else if (value <= 5)
2810 wrk = gen_reg_rtx (SImode);
2811 emit_move_insn (wrk, operands[1]);
2813 gen_ashift (ASHIFTRT, 1, wrk);
2814 emit_move_insn (operands[0], wrk);
2818 wrk = gen_reg_rtx (Pmode);
2820 /* Load the value into an arg reg and call a helper. */
2821 emit_move_insn (gen_rtx_REG (SImode, 4), operands[1]);
2822 sprintf (func, "__ashiftrt_r4_%d", value);
2823 function_symbol (wrk, func, SFUNC_STATIC);
2824 emit_insn (gen_ashrsi3_n (GEN_INT (value), wrk));
2825 emit_move_insn (operands[0], gen_rtx_REG (SImode, 4));
2830 sh_dynamicalize_shift_p (rtx count)
2832 return shift_insns[INTVAL (count)] > 1 + SH_DYNAMIC_SHIFT_COST;
2835 /* Try to find a good way to implement the combiner pattern
2836 [(set (match_operand:SI 0 "register_operand" "r")
2837 (and:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
2838 (match_operand:SI 2 "const_int_operand" "n"))
2839 (match_operand:SI 3 "const_int_operand" "n"))) .
2840 LEFT_RTX is operand 2 in the above pattern, and MASK_RTX is operand 3.
2841 return 0 for simple right / left or left/right shift combination.
2842 return 1 for a combination of shifts with zero_extend.
2843 return 2 for a combination of shifts with an AND that needs r0.
2844 return 3 for a combination of shifts with an AND that needs an extra
2845 scratch register, when the three highmost bits of the AND mask are clear.
2846 return 4 for a combination of shifts with an AND that needs an extra
2847 scratch register, when any of the three highmost bits of the AND mask
2849 If ATTRP is set, store an initial right shift width in ATTRP[0],
2850 and the instruction length in ATTRP[1] . These values are not valid
2852 When ATTRP is set and returning 1, ATTRP[2] gets set to the index into
2853 shift_amounts for the last shift value that is to be used before the
2856 shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
2858 unsigned HOST_WIDE_INT mask, lsb, mask2, lsb2;
2859 int left = INTVAL (left_rtx), right;
2861 int cost, best_cost = 10000;
2862 int best_right = 0, best_len = 0;
2866 if (left < 0 || left > 31)
2868 if (GET_CODE (mask_rtx) == CONST_INT)
2869 mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> left;
2871 mask = (unsigned HOST_WIDE_INT) GET_MODE_MASK (SImode) >> left;
2872 /* Can this be expressed as a right shift / left shift pair? */
2873 lsb = ((mask ^ (mask - 1)) >> 1) + 1;
2874 right = exact_log2 (lsb);
2875 mask2 = ~(mask + lsb - 1);
2876 lsb2 = ((mask2 ^ (mask2 - 1)) >> 1) + 1;
2877 /* mask has no zeroes but trailing zeroes <==> ! mask2 */
2879 best_cost = shift_insns[right] + shift_insns[right + left];
2880 /* mask has no trailing zeroes <==> ! right */
2881 else if (! right && mask2 == ~(lsb2 - 1))
2883 int late_right = exact_log2 (lsb2);
2884 best_cost = shift_insns[left + late_right] + shift_insns[late_right];
2886 /* Try to use zero extend. */
2887 if (mask2 == ~(lsb2 - 1))
2891 for (width = 8; width <= 16; width += 8)
2893 /* Can we zero-extend right away? */
2894 if (lsb2 == (unsigned HOST_WIDE_INT) 1 << width)
2897 = 1 + ext_shift_insns[right] + ext_shift_insns[left + right];
2898 if (cost < best_cost)
2909 /* ??? Could try to put zero extend into initial right shift,
2910 or even shift a bit left before the right shift. */
2911 /* Determine value of first part of left shift, to get to the
2912 zero extend cut-off point. */
2913 first = width - exact_log2 (lsb2) + right;
2914 if (first >= 0 && right + left - first >= 0)
2916 cost = ext_shift_insns[right] + ext_shift_insns[first] + 1
2917 + ext_shift_insns[right + left - first];
2918 if (cost < best_cost)
2930 /* Try to use r0 AND pattern */
2931 for (i = 0; i <= 2; i++)
2935 if (! CONST_OK_FOR_K08 (mask >> i))
2937 cost = (i != 0) + 2 + ext_shift_insns[left + i];
2938 if (cost < best_cost)
2943 best_len = cost - 1;
2946 /* Try to use a scratch register to hold the AND operand. */
2947 can_ext = ((mask << left) & ((unsigned HOST_WIDE_INT) 3 << 30)) == 0;
2948 for (i = 0; i <= 2; i++)
2952 cost = (i != 0) + (CONST_OK_FOR_I08 (mask >> i) ? 2 : 3)
2953 + (can_ext ? ext_shift_insns : shift_insns)[left + i];
2954 if (cost < best_cost)
2959 best_len = cost - 1 - ! CONST_OK_FOR_I08 (mask >> i);
2965 attrp[0] = best_right;
2966 attrp[1] = best_len;
2971 /* This is used in length attributes of the unnamed instructions
2972 corresponding to shl_and_kind return values of 1 and 2. */
2974 shl_and_length (rtx insn)
2976 rtx set_src, left_rtx, mask_rtx;
2979 set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2980 left_rtx = XEXP (XEXP (set_src, 0), 1);
2981 mask_rtx = XEXP (set_src, 1);
2982 shl_and_kind (left_rtx, mask_rtx, attributes);
2983 return attributes[1];
2986 /* This is used in length attribute of the and_shl_scratch instruction. */
2989 shl_and_scr_length (rtx insn)
2991 rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2992 int len = shift_insns[INTVAL (XEXP (set_src, 1))];
2993 rtx op = XEXP (set_src, 0);
2994 len += shift_insns[INTVAL (XEXP (op, 1))] + 1;
2995 op = XEXP (XEXP (op, 0), 0);
2996 return len + shift_insns[INTVAL (XEXP (op, 1))];
2999 /* Generate rtl for instructions for which shl_and_kind advised a particular
3000 method of generating them, i.e. returned zero. */
3003 gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
3006 unsigned HOST_WIDE_INT mask;
3007 int kind = shl_and_kind (left_rtx, mask_rtx, attributes);
3008 int right, total_shift;
3009 void (*shift_gen_fun) (int, rtx *) = gen_shifty_hi_op;
3011 right = attributes[0];
3012 total_shift = INTVAL (left_rtx) + right;
3013 mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> total_shift;
3020 int first = attributes[2];
3025 emit_insn ((mask << right) <= 0xff
3026 ? gen_zero_extendqisi2 (dest,
3027 gen_lowpart (QImode, source))
3028 : gen_zero_extendhisi2 (dest,
3029 gen_lowpart (HImode, source)));
3033 emit_insn (gen_movsi (dest, source));
3037 operands[2] = GEN_INT (right);
3038 gen_shifty_hi_op (LSHIFTRT, operands);
3042 operands[2] = GEN_INT (first);
3043 gen_shifty_hi_op (ASHIFT, operands);
3044 total_shift -= first;
3048 emit_insn (mask <= 0xff
3049 ? gen_zero_extendqisi2 (dest, gen_lowpart (QImode, dest))
3050 : gen_zero_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3051 if (total_shift > 0)
3053 operands[2] = GEN_INT (total_shift);
3054 gen_shifty_hi_op (ASHIFT, operands);
3059 shift_gen_fun = gen_shifty_op;
3061 /* If the topmost bit that matters is set, set the topmost bits
3062 that don't matter. This way, we might be able to get a shorter
3064 if (mask & ((HOST_WIDE_INT) 1 << (31 - total_shift)))
3065 mask |= (HOST_WIDE_INT) ~0 << (31 - total_shift);
3067 /* Don't expand fine-grained when combining, because that will
3068 make the pattern fail. */
3069 if (currently_expanding_to_rtl
3070 || reload_in_progress || reload_completed)
3074 /* Cases 3 and 4 should be handled by this split
3075 only while combining */
3076 gcc_assert (kind <= 2);
3079 emit_insn (gen_lshrsi3 (dest, source, GEN_INT (right)));
3082 emit_insn (gen_andsi3 (dest, source, GEN_INT (mask)));
3087 operands[2] = GEN_INT (total_shift);
3088 shift_gen_fun (ASHIFT, operands);
3095 if (kind != 4 && total_shift < 16)
3097 neg = -ext_shift_amounts[total_shift][1];
3099 neg -= ext_shift_amounts[total_shift][2];
3103 emit_insn (gen_and_shl_scratch (dest, source,
3106 GEN_INT (total_shift + neg),
3108 emit_insn (gen_movsi (dest, dest));
3115 /* Try to find a good way to implement the combiner pattern
3116 [(set (match_operand:SI 0 "register_operand" "=r")
3117 (sign_extract:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
3118 (match_operand:SI 2 "const_int_operand" "n")
3119 (match_operand:SI 3 "const_int_operand" "n")
3121 (clobber (reg:SI T_REG))]
3122 LEFT_RTX is operand 2 in the above pattern, and SIZE_RTX is operand 3.
3123 return 0 for simple left / right shift combination.
3124 return 1 for left shift / 8 bit sign extend / left shift.
3125 return 2 for left shift / 16 bit sign extend / left shift.
3126 return 3 for left shift / 8 bit sign extend / shift / sign extend.
3127 return 4 for left shift / 16 bit sign extend / shift / sign extend.
3128 return 5 for left shift / 16 bit sign extend / right shift
3129 return 6 for < 8 bit sign extend / left shift.
3130 return 7 for < 8 bit sign extend / left shift / single right shift.
3131 If COSTP is nonzero, assign the calculated cost to *COSTP. */
3134 shl_sext_kind (rtx left_rtx, rtx size_rtx, int *costp)
3136 int left, size, insize, ext;
3137 int cost = 0, best_cost;
3140 left = INTVAL (left_rtx);
3141 size = INTVAL (size_rtx);
3142 insize = size - left;
3143 gcc_assert (insize > 0);
3144 /* Default to left / right shift. */
3146 best_cost = shift_insns[32 - insize] + ashiftrt_insns[32 - size];
3149 /* 16 bit shift / sign extend / 16 bit shift */
3150 cost = shift_insns[16 - insize] + 1 + ashiftrt_insns[16 - size];
3151 /* If ashiftrt_insns[16 - size] is 8, this choice will be overridden
3152 below, by alternative 3 or something even better. */
3153 if (cost < best_cost)
3159 /* Try a plain sign extend between two shifts. */
3160 for (ext = 16; ext >= insize; ext -= 8)
3164 cost = ext_shift_insns[ext - insize] + 1 + shift_insns[size - ext];
3165 if (cost < best_cost)
3167 kind = ext / (unsigned) 8;
3171 /* Check if we can do a sloppy shift with a final signed shift
3172 restoring the sign. */
3173 if (EXT_SHIFT_SIGNED (size - ext))
3174 cost = ext_shift_insns[ext - insize] + ext_shift_insns[size - ext] + 1;
3175 /* If not, maybe it's still cheaper to do the second shift sloppy,
3176 and do a final sign extend? */
3177 else if (size <= 16)
3178 cost = ext_shift_insns[ext - insize] + 1
3179 + ext_shift_insns[size > ext ? size - ext : ext - size] + 1;
3182 if (cost < best_cost)
3184 kind = ext / (unsigned) 8 + 2;
3188 /* Check if we can sign extend in r0 */
3191 cost = 3 + shift_insns[left];
3192 if (cost < best_cost)
3197 /* Try the same with a final signed shift. */
3200 cost = 3 + ext_shift_insns[left + 1] + 1;
3201 if (cost < best_cost)
3210 /* Try to use a dynamic shift. */
3211 cost = shift_insns[32 - insize] + 1 + SH_DYNAMIC_SHIFT_COST;
3212 if (cost < best_cost)
3223 /* Function to be used in the length attribute of the instructions
3224 implementing this pattern. */
3227 shl_sext_length (rtx insn)
3229 rtx set_src, left_rtx, size_rtx;
3232 set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
3233 left_rtx = XEXP (XEXP (set_src, 0), 1);
3234 size_rtx = XEXP (set_src, 1);
3235 shl_sext_kind (left_rtx, size_rtx, &cost);
3239 /* Generate rtl for this pattern */
3242 gen_shl_sext (rtx dest, rtx left_rtx, rtx size_rtx, rtx source)
3245 int left, size, insize, cost;
3248 kind = shl_sext_kind (left_rtx, size_rtx, &cost);
3249 left = INTVAL (left_rtx);
3250 size = INTVAL (size_rtx);
3251 insize = size - left;
3259 int ext = kind & 1 ? 8 : 16;
3260 int shift2 = size - ext;
3262 /* Don't expand fine-grained when combining, because that will
3263 make the pattern fail. */
3264 if (! currently_expanding_to_rtl
3265 && ! reload_in_progress && ! reload_completed)
3267 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3268 emit_insn (gen_movsi (dest, source));
3272 emit_insn (gen_movsi (dest, source));
3276 operands[2] = GEN_INT (ext - insize);
3277 gen_shifty_hi_op (ASHIFT, operands);
3280 ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
3281 : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3286 operands[2] = GEN_INT (shift2);
3287 gen_shifty_op (ASHIFT, operands);
3294 if (EXT_SHIFT_SIGNED (shift2))
3296 operands[2] = GEN_INT (shift2 + 1);
3297 gen_shifty_op (ASHIFT, operands);
3298 operands[2] = const1_rtx;
3299 gen_shifty_op (ASHIFTRT, operands);
3302 operands[2] = GEN_INT (shift2);
3303 gen_shifty_hi_op (ASHIFT, operands);
3307 operands[2] = GEN_INT (-shift2);
3308 gen_shifty_hi_op (LSHIFTRT, operands);
3310 emit_insn (size <= 8
3311 ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
3312 : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3319 if (! currently_expanding_to_rtl
3320 && ! reload_in_progress && ! reload_completed)
3321 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3325 operands[2] = GEN_INT (16 - insize);
3326 gen_shifty_hi_op (ASHIFT, operands);
3327 emit_insn (gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3329 /* Don't use gen_ashrsi3 because it generates new pseudos. */
3331 gen_ashift (ASHIFTRT, 1, dest);
3336 /* Don't expand fine-grained when combining, because that will
3337 make the pattern fail. */
3338 if (! currently_expanding_to_rtl
3339 && ! reload_in_progress && ! reload_completed)
3341 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3342 emit_insn (gen_movsi (dest, source));
3345 emit_insn (gen_andsi3 (dest, source, GEN_INT ((1 << insize) - 1)));
3346 emit_insn (gen_xorsi3 (dest, dest, GEN_INT (1 << (insize - 1))));
3347 emit_insn (gen_addsi3 (dest, dest, GEN_INT (-1 << (insize - 1))));
3349 operands[2] = kind == 7 ? GEN_INT (left + 1) : left_rtx;
3350 gen_shifty_op (ASHIFT, operands);
3352 emit_insn (gen_ashrsi3_k (dest, dest, const1_rtx));
3360 /* Prefix a symbol_ref name with "datalabel". */
3363 gen_datalabel_ref (rtx sym)
3367 if (GET_CODE (sym) == LABEL_REF)
3368 return gen_rtx_CONST (GET_MODE (sym),
3369 gen_rtx_UNSPEC (GET_MODE (sym),
3373 gcc_assert (GET_CODE (sym) == SYMBOL_REF);
3375 str = XSTR (sym, 0);
3376 /* Share all SYMBOL_REF strings with the same value - that is important
3378 str = IDENTIFIER_POINTER (get_identifier (str));
3379 XSTR (sym, 0) = str;
3385 static alloc_pool label_ref_list_pool;
3387 typedef struct label_ref_list_d
3390 struct label_ref_list_d *next;
3391 } *label_ref_list_t;
3393 /* The SH cannot load a large constant into a register, constants have to
3394 come from a pc relative load. The reference of a pc relative load
3395 instruction must be less than 1k in front of the instruction. This
3396 means that we often have to dump a constant inside a function, and
3397 generate code to branch around it.
3399 It is important to minimize this, since the branches will slow things
3400 down and make things bigger.
3402 Worst case code looks like:
3420 We fix this by performing a scan before scheduling, which notices which
3421 instructions need to have their operands fetched from the constant table
3422 and builds the table.
3426 scan, find an instruction which needs a pcrel move. Look forward, find the
3427 last barrier which is within MAX_COUNT bytes of the requirement.
3428 If there isn't one, make one. Process all the instructions between
3429 the find and the barrier.
3431 In the above example, we can tell that L3 is within 1k of L1, so
3432 the first move can be shrunk from the 3 insn+constant sequence into
3433 just 1 insn, and the constant moved to L3 to make:
3444 Then the second move becomes the target for the shortening process. */
3448 rtx value; /* Value in table. */
3449 rtx label; /* Label of value. */
3450 label_ref_list_t wend; /* End of window. */
3451 enum machine_mode mode; /* Mode of value. */
3453 /* True if this constant is accessed as part of a post-increment
3454 sequence. Note that HImode constants are never accessed in this way. */
3455 bool part_of_sequence_p;
3458 /* The maximum number of constants that can fit into one pool, since
3459 constants in the range 0..510 are at least 2 bytes long, and in the
3460 range from there to 1018 at least 4 bytes. */
3462 #define MAX_POOL_SIZE 372
3463 static pool_node pool_vector[MAX_POOL_SIZE];
3464 static int pool_size;
3465 static rtx pool_window_label;
3466 static int pool_window_last;
3468 static int max_labelno_before_reorg;
3470 /* ??? If we need a constant in HImode which is the truncated value of a
3471 constant we need in SImode, we could combine the two entries thus saving
3472 two bytes. Is this common enough to be worth the effort of implementing
3475 /* ??? This stuff should be done at the same time that we shorten branches.
3476 As it is now, we must assume that all branches are the maximum size, and
3477 this causes us to almost always output constant pools sooner than
3480 /* Add a constant to the pool and return its label. */
3483 add_constant (rtx x, enum machine_mode mode, rtx last_value)
3487 label_ref_list_t ref, newref;
3489 /* First see if we've already got it. */
3490 for (i = 0; i < pool_size; i++)
3492 if (x->code == pool_vector[i].value->code
3493 && mode == pool_vector[i].mode)
3495 if (x->code == CODE_LABEL)
3497 if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
3500 if (rtx_equal_p (x, pool_vector[i].value))
3505 || ! rtx_equal_p (last_value, pool_vector[i-1].value))
3507 new = gen_label_rtx ();
3508 LABEL_REFS (new) = pool_vector[i].label;
3509 pool_vector[i].label = lab = new;
3511 if (lab && pool_window_label)
3513 newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
3514 newref->label = pool_window_label;
3515 ref = pool_vector[pool_window_last].wend;
3517 pool_vector[pool_window_last].wend = newref;
3520 pool_window_label = new;
3521 pool_window_last = i;
3527 /* Need a new one. */
3528 pool_vector[pool_size].value = x;
3529 if (last_value && rtx_equal_p (last_value, pool_vector[pool_size - 1].value))
3532 pool_vector[pool_size - 1].part_of_sequence_p = true;
3535 lab = gen_label_rtx ();
3536 pool_vector[pool_size].mode = mode;
3537 pool_vector[pool_size].label = lab;
3538 pool_vector[pool_size].wend = NULL;
3539 pool_vector[pool_size].part_of_sequence_p = (lab == 0);
3540 if (lab && pool_window_label)
3542 newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
3543 newref->label = pool_window_label;
3544 ref = pool_vector[pool_window_last].wend;
3546 pool_vector[pool_window_last].wend = newref;
3549 pool_window_label = lab;
3550 pool_window_last = pool_size;
3555 /* Output the literal table. START, if nonzero, is the first instruction
3556 this table is needed for, and also indicates that there is at least one
3557 casesi_worker_2 instruction; We have to emit the operand3 labels from
3558 these insns at a 4-byte aligned position. BARRIER is the barrier
3559 after which we are to place the table. */
3562 dump_table (rtx start, rtx barrier)
3568 label_ref_list_t ref;
3571 /* Do two passes, first time dump out the HI sized constants. */
3573 for (i = 0; i < pool_size; i++)
3575 pool_node *p = &pool_vector[i];
3577 if (p->mode == HImode)
3581 scan = emit_insn_after (gen_align_2 (), scan);
3584 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3585 scan = emit_label_after (lab, scan);
3586 scan = emit_insn_after (gen_consttable_2 (p->value, const0_rtx),
3588 for (ref = p->wend; ref; ref = ref->next)
3591 scan = emit_insn_after (gen_consttable_window_end (lab), scan);
3594 else if (p->mode == DFmode)
3602 scan = emit_insn_after (gen_align_4 (), scan);
3604 for (; start != barrier; start = NEXT_INSN (start))
3605 if (GET_CODE (start) == INSN
3606 && recog_memoized (start) == CODE_FOR_casesi_worker_2)
3608 rtx src = SET_SRC (XVECEXP (PATTERN (start), 0, 0));
3609 rtx lab = XEXP (XVECEXP (src, 0, 3), 0);
3611 scan = emit_label_after (lab, scan);
3614 if (TARGET_FMOVD && TARGET_ALIGN_DOUBLE && have_df)
3616 rtx align_insn = NULL_RTX;
3618 scan = emit_label_after (gen_label_rtx (), scan);
3619 scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
3622 for (i = 0; i < pool_size; i++)
3624 pool_node *p = &pool_vector[i];
3632 if (align_insn && !p->part_of_sequence_p)
3634 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3635 emit_label_before (lab, align_insn);
3636 emit_insn_before (gen_consttable_4 (p->value, const0_rtx),
3638 for (ref = p->wend; ref; ref = ref->next)
3641 emit_insn_before (gen_consttable_window_end (lab),
3644 delete_insn (align_insn);
3645 align_insn = NULL_RTX;
3650 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3651 scan = emit_label_after (lab, scan);
3652 scan = emit_insn_after (gen_consttable_4 (p->value,
3654 need_align = ! need_align;
3660 scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
3665 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3666 scan = emit_label_after (lab, scan);
3667 scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
3674 if (p->mode != HImode)
3676 for (ref = p->wend; ref; ref = ref->next)
3679 scan = emit_insn_after (gen_consttable_window_end (lab),
3688 for (i = 0; i < pool_size; i++)
3690 pool_node *p = &pool_vector[i];
3701 scan = emit_label_after (gen_label_rtx (), scan);
3702 scan = emit_insn_after (gen_align_4 (), scan);
3704 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3705 scan = emit_label_after (lab, scan);
3706 scan = emit_insn_after (gen_consttable_4 (p->value, const0_rtx),
3714 scan = emit_label_after (gen_label_rtx (), scan);
3715 scan = emit_insn_after (gen_align_4 (), scan);
3717 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3718 scan = emit_label_after (lab, scan);
3719 scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
3726 if (p->mode != HImode)
3728 for (ref = p->wend; ref; ref = ref->next)
3731 scan = emit_insn_after (gen_consttable_window_end (lab), scan);
3736 scan = emit_insn_after (gen_consttable_end (), scan);
3737 scan = emit_barrier_after (scan);
3739 pool_window_label = NULL_RTX;
3740 pool_window_last = 0;
3743 /* Return nonzero if constant would be an ok source for a
3744 mov.w instead of a mov.l. */
3749 return (GET_CODE (src) == CONST_INT
3750 && INTVAL (src) >= -32768
3751 && INTVAL (src) <= 32767);
3754 #define MOVA_LABELREF(mova) XVECEXP (SET_SRC (PATTERN (mova)), 0, 0)
3756 /* Nonzero if the insn is a move instruction which needs to be fixed. */
3758 /* ??? For a DImode/DFmode moves, we don't need to fix it if each half of the
3759 CONST_DOUBLE input value is CONST_OK_FOR_I08. For a SFmode move, we don't
3760 need to fix it if the input value is CONST_OK_FOR_I08. */
3763 broken_move (rtx insn)
3765 if (GET_CODE (insn) == INSN)
3767 rtx pat = PATTERN (insn);
3768 if (GET_CODE (pat) == PARALLEL)
3769 pat = XVECEXP (pat, 0, 0);
3770 if (GET_CODE (pat) == SET
3771 /* We can load any 8-bit value if we don't care what the high
3772 order bits end up as. */
3773 && GET_MODE (SET_DEST (pat)) != QImode
3774 && (CONSTANT_P (SET_SRC (pat))
3775 /* Match mova_const. */
3776 || (GET_CODE (SET_SRC (pat)) == UNSPEC
3777 && XINT (SET_SRC (pat), 1) == UNSPEC_MOVA
3778 && GET_CODE (XVECEXP (SET_SRC (pat), 0, 0)) == CONST))
3780 && GET_CODE (SET_SRC (pat)) == CONST_DOUBLE
3781 && (fp_zero_operand (SET_SRC (pat))
3782 || fp_one_operand (SET_SRC (pat)))
3783 /* ??? If this is a -m4 or -m4-single compilation, in general
3784 we don't know the current setting of fpscr, so disable fldi.
3785 There is an exception if this was a register-register move
3786 before reload - and hence it was ascertained that we have
3787 single precision setting - and in a post-reload optimization
3788 we changed this to do a constant load. In that case
3789 we don't have an r0 clobber, hence we must use fldi. */
3790 && (! TARGET_SH4 || TARGET_FMOVD
3791 || (GET_CODE (XEXP (XVECEXP (PATTERN (insn), 0, 2), 0))
3793 && GET_CODE (SET_DEST (pat)) == REG
3794 && FP_REGISTER_P (REGNO (SET_DEST (pat))))
3796 && GET_MODE (SET_DEST (pat)) == SImode
3797 && (satisfies_constraint_I20 (SET_SRC (pat))
3798 || satisfies_constraint_I28 (SET_SRC (pat))))
3799 && ! satisfies_constraint_I08 (SET_SRC (pat)))
3809 return (GET_CODE (insn) == INSN
3810 && GET_CODE (PATTERN (insn)) == SET
3811 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
3812 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_MOVA
3813 /* Don't match mova_const. */
3814 && GET_CODE (MOVA_LABELREF (insn)) == LABEL_REF);
3817 /* Fix up a mova from a switch that went out of range. */
3819 fixup_mova (rtx mova)
3821 PUT_MODE (XEXP (MOVA_LABELREF (mova), 0), QImode);
3824 SET_SRC (PATTERN (mova)) = MOVA_LABELREF (mova);
3825 INSN_CODE (mova) = -1;
3830 rtx lab = gen_label_rtx ();
3831 rtx wpat, wpat0, wpat1, wsrc, diff;
3835 worker = NEXT_INSN (worker);
3837 && GET_CODE (worker) != CODE_LABEL
3838 && GET_CODE (worker) != JUMP_INSN);
3839 } while (GET_CODE (worker) == NOTE
3840 || recog_memoized (worker) != CODE_FOR_casesi_worker_1);
3841 wpat = PATTERN (worker);
3842 wpat0 = XVECEXP (wpat, 0, 0);
3843 wpat1 = XVECEXP (wpat, 0, 1);
3844 wsrc = SET_SRC (wpat0);
3845 PATTERN (worker) = (gen_casesi_worker_2
3846 (SET_DEST (wpat0), XVECEXP (wsrc, 0, 1),
3847 XEXP (XVECEXP (wsrc, 0, 2), 0), lab,
3849 INSN_CODE (worker) = -1;
3850 diff = gen_rtx_MINUS (Pmode, XVECEXP (SET_SRC (PATTERN (mova)), 0, 0),
3851 gen_rtx_LABEL_REF (Pmode, lab));
3852 diff = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, diff), UNSPEC_PIC);
3853 SET_SRC (PATTERN (mova)) = gen_rtx_CONST (Pmode, diff);
3854 INSN_CODE (mova) = -1;
3858 /* NEW_MOVA is a mova we've just encountered while scanning forward. Update
3859 *num_mova, and check if the new mova is not nested within the first one.
3860 return 0 if *first_mova was replaced, 1 if new_mova was replaced,
3861 2 if new_mova has been assigned to *first_mova, -1 otherwise.. */
3863 untangle_mova (int *num_mova, rtx *first_mova, rtx new_mova)
3865 int n_addr = 0; /* Initialization to shut up spurious warning. */
3866 int f_target, n_target = 0; /* Likewise. */
3870 n_addr = INSN_ADDRESSES (INSN_UID (new_mova));
3871 n_target = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (new_mova), 0)));
3872 if (n_addr > n_target || n_addr + 1022 < n_target)
3874 /* Change the mova into a load.
3875 broken_move will then return true for it. */
3876 fixup_mova (new_mova);
3882 *first_mova = new_mova;
3887 = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (*first_mova), 0))))
3892 if (f_target - INSN_ADDRESSES (INSN_UID (*first_mova))
3893 > n_target - n_addr)
3895 fixup_mova (*first_mova);
3900 fixup_mova (new_mova);
3905 /* Find the last barrier from insn FROM which is close enough to hold the
3906 constant pool. If we can't find one, then create one near the end of
3910 find_barrier (int num_mova, rtx mova, rtx from)
3919 int leading_mova = num_mova;
3920 rtx barrier_before_mova = 0, found_barrier = 0, good_barrier = 0;
3925 /* For HImode: range is 510, add 4 because pc counts from address of
3926 second instruction after this one, subtract 2 for the jump instruction
3927 that we may need to emit before the table, subtract 2 for the instruction
3928 that fills the jump delay slot (in very rare cases, reorg will take an
3929 instruction from after the constant pool or will leave the delay slot
3930 empty). This gives 510.
3931 For SImode: range is 1020, add 4 because pc counts from address of
3932 second instruction after this one, subtract 2 in case pc is 2 byte
3933 aligned, subtract 2 for the jump instruction that we may need to emit
3934 before the table, subtract 2 for the instruction that fills the jump
3935 delay slot. This gives 1018. */
3937 /* The branch will always be shortened now that the reference address for
3938 forward branches is the successor address, thus we need no longer make
3939 adjustments to the [sh]i_limit for -O0. */
3944 while (from && count_si < si_limit && count_hi < hi_limit)
3946 int inc = get_attr_length (from);
3949 /* If this is a label that existed at the time of the compute_alignments
3950 call, determine the alignment. N.B. When find_barrier recurses for
3951 an out-of-reach mova, we might see labels at the start of previously
3952 inserted constant tables. */
3953 if (GET_CODE (from) == CODE_LABEL
3954 && CODE_LABEL_NUMBER (from) <= max_labelno_before_reorg)
3957 new_align = 1 << label_to_alignment (from);
3958 else if (GET_CODE (prev_nonnote_insn (from)) == BARRIER)
3959 new_align = 1 << barrier_align (from);
3964 /* In case we are scanning a constant table because of recursion, check
3965 for explicit alignments. If the table is long, we might be forced
3966 to emit the new table in front of it; the length of the alignment
3967 might be the last straw. */
3968 else if (GET_CODE (from) == INSN
3969 && GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
3970 && XINT (PATTERN (from), 1) == UNSPECV_ALIGN)
3971 new_align = INTVAL (XVECEXP (PATTERN (from), 0, 0));
3972 /* When we find the end of a constant table, paste the new constant
3973 at the end. That is better than putting it in front because
3974 this way, we don't need extra alignment for adding a 4-byte-aligned
3975 mov(a) label to a 2/4 or 8/4 byte aligned table. */
3976 else if (GET_CODE (from) == INSN
3977 && GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
3978 && XINT (PATTERN (from), 1) == UNSPECV_CONST_END)
3981 if (GET_CODE (from) == BARRIER)
3985 found_barrier = from;
3987 /* If we are at the end of the function, or in front of an alignment
3988 instruction, we need not insert an extra alignment. We prefer
3989 this kind of barrier. */
3990 if (barrier_align (from) > 2)
3991 good_barrier = from;
3993 /* If we are at the end of a hot/cold block, dump the constants
3995 next = NEXT_INSN (from);
3998 && NOTE_KIND (next) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
4002 if (broken_move (from))
4005 enum machine_mode mode;
4007 pat = PATTERN (from);
4008 if (GET_CODE (pat) == PARALLEL)
4009 pat = XVECEXP (pat, 0, 0);
4010 src = SET_SRC (pat);
4011 dst = SET_DEST (pat);
4012 mode = GET_MODE (dst);
4014 /* We must explicitly check the mode, because sometimes the
4015 front end will generate code to load unsigned constants into
4016 HImode targets without properly sign extending them. */
4018 || (mode == SImode && hi_const (src) && REGNO (dst) != FPUL_REG))
4021 /* We put the short constants before the long constants, so
4022 we must count the length of short constants in the range
4023 for the long constants. */
4024 /* ??? This isn't optimal, but is easy to do. */
4029 /* We dump DF/DI constants before SF/SI ones, because
4030 the limit is the same, but the alignment requirements
4031 are higher. We may waste up to 4 additional bytes
4032 for alignment, and the DF/DI constant may have
4033 another SF/SI constant placed before it. */
4034 if (TARGET_SHCOMPACT
4036 && (mode == DFmode || mode == DImode))
4041 while (si_align > 2 && found_si + si_align - 2 > count_si)
4043 if (found_si > count_si)
4044 count_si = found_si;
4045 found_si += GET_MODE_SIZE (mode);
4047 si_limit -= GET_MODE_SIZE (mode);
4053 switch (untangle_mova (&num_mova, &mova, from))
4055 case 0: return find_barrier (0, 0, mova);
4060 = good_barrier ? good_barrier : found_barrier;
4064 if (found_si > count_si)
4065 count_si = found_si;
4067 else if (GET_CODE (from) == JUMP_INSN
4068 && (GET_CODE (PATTERN (from)) == ADDR_VEC
4069 || GET_CODE (PATTERN (from)) == ADDR_DIFF_VEC))
4071 if ((num_mova > 1 && GET_MODE (prev_nonnote_insn (from)) == VOIDmode)
4073 && (prev_nonnote_insn (from)
4074 == XEXP (MOVA_LABELREF (mova), 0))))
4076 if (barrier_align (next_real_insn (from)) == align_jumps_log)
4078 /* We have just passed the barrier in front of the
4079 ADDR_DIFF_VEC, which is stored in found_barrier. Since
4080 the ADDR_DIFF_VEC is accessed as data, just like our pool
4081 constants, this is a good opportunity to accommodate what
4082 we have gathered so far.
4083 If we waited any longer, we could end up at a barrier in
4084 front of code, which gives worse cache usage for separated
4085 instruction / data caches. */
4086 good_barrier = found_barrier;
4091 rtx body = PATTERN (from);
4092 inc = XVECLEN (body, 1) * GET_MODE_SIZE (GET_MODE (body));
4095 /* For the SH1, we generate alignments even after jumps-around-jumps. */
4096 else if (GET_CODE (from) == JUMP_INSN
4098 && ! TARGET_SMALLCODE)
4104 if (new_align > si_align)
4106 si_limit -= (count_si - 1) & (new_align - si_align);
4107 si_align = new_align;
4109 count_si = (count_si + new_align - 1) & -new_align;
4114 if (new_align > hi_align)
4116 hi_limit -= (count_hi - 1) & (new_align - hi_align);
4117 hi_align = new_align;
4119 count_hi = (count_hi + new_align - 1) & -new_align;
4121 from = NEXT_INSN (from);
4128 /* Try as we might, the leading mova is out of range. Change
4129 it into a load (which will become a pcload) and retry. */
4131 return find_barrier (0, 0, mova);
4135 /* Insert the constant pool table before the mova instruction,
4136 to prevent the mova label reference from going out of range. */
4138 good_barrier = found_barrier = barrier_before_mova;
4144 if (good_barrier && next_real_insn (found_barrier))
4145 found_barrier = good_barrier;
4149 /* We didn't find a barrier in time to dump our stuff,
4150 so we'll make one. */
4151 rtx label = gen_label_rtx ();
4153 /* If we exceeded the range, then we must back up over the last
4154 instruction we looked at. Otherwise, we just need to undo the
4155 NEXT_INSN at the end of the loop. */
4156 if (PREV_INSN (from) != orig
4157 && (count_hi > hi_limit || count_si > si_limit))
4158 from = PREV_INSN (PREV_INSN (from));
4160 from = PREV_INSN (from);
4162 /* Walk back to be just before any jump or label.
4163 Putting it before a label reduces the number of times the branch
4164 around the constant pool table will be hit. Putting it before
4165 a jump makes it more likely that the bra delay slot will be
4167 while (GET_CODE (from) == JUMP_INSN || GET_CODE (from) == NOTE
4168 || GET_CODE (from) == CODE_LABEL)
4169 from = PREV_INSN (from);
4171 from = emit_jump_insn_after (gen_jump (label), from);
4172 JUMP_LABEL (from) = label;
4173 LABEL_NUSES (label) = 1;
4174 found_barrier = emit_barrier_after (from);
4175 emit_label_after (label, found_barrier);
4178 return found_barrier;
4181 /* If the instruction INSN is implemented by a special function, and we can
4182 positively find the register that is used to call the sfunc, and this
4183 register is not used anywhere else in this instruction - except as the
4184 destination of a set, return this register; else, return 0. */
4186 sfunc_uses_reg (rtx insn)
4189 rtx pattern, part, reg_part, reg;
4191 if (GET_CODE (insn) != INSN)
4193 pattern = PATTERN (insn);
4194 if (GET_CODE (pattern) != PARALLEL || get_attr_type (insn) != TYPE_SFUNC)
4197 for (reg_part = 0, i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
4199 part = XVECEXP (pattern, 0, i);
4200 if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == SImode)
4205 reg = XEXP (reg_part, 0);
4206 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
4208 part = XVECEXP (pattern, 0, i);
4209 if (part == reg_part || GET_CODE (part) == CLOBBER)
4211 if (reg_mentioned_p (reg, ((GET_CODE (part) == SET
4212 && GET_CODE (SET_DEST (part)) == REG)
4213 ? SET_SRC (part) : part)))
4219 /* See if the only way in which INSN uses REG is by calling it, or by
4220 setting it while calling it. Set *SET to a SET rtx if the register
4224 noncall_uses_reg (rtx reg, rtx insn, rtx *set)
4230 reg2 = sfunc_uses_reg (insn);
4231 if (reg2 && REGNO (reg2) == REGNO (reg))
4233 pattern = single_set (insn);
4235 && GET_CODE (SET_DEST (pattern)) == REG
4236 && REGNO (reg) == REGNO (SET_DEST (pattern)))
4240 if (GET_CODE (insn) != CALL_INSN)
4242 /* We don't use rtx_equal_p because we don't care if the mode is
4244 pattern = single_set (insn);
4246 && GET_CODE (SET_DEST (pattern)) == REG
4247 && REGNO (reg) == REGNO (SET_DEST (pattern)))
4253 par = PATTERN (insn);
4254 if (GET_CODE (par) == PARALLEL)
4255 for (i = XVECLEN (par, 0) - 1; i >= 0; i--)
4257 part = XVECEXP (par, 0, i);
4258 if (GET_CODE (part) != SET && reg_mentioned_p (reg, part))
4261 return reg_mentioned_p (reg, SET_SRC (pattern));
4267 pattern = PATTERN (insn);
4269 if (GET_CODE (pattern) == PARALLEL)
4273 for (i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
4274 if (reg_mentioned_p (reg, XVECEXP (pattern, 0, i)))
4276 pattern = XVECEXP (pattern, 0, 0);
4279 if (GET_CODE (pattern) == SET)
4281 if (reg_mentioned_p (reg, SET_DEST (pattern)))
4283 /* We don't use rtx_equal_p, because we don't care if the
4284 mode is different. */
4285 if (GET_CODE (SET_DEST (pattern)) != REG
4286 || REGNO (reg) != REGNO (SET_DEST (pattern)))
4292 pattern = SET_SRC (pattern);
4295 if (GET_CODE (pattern) != CALL
4296 || GET_CODE (XEXP (pattern, 0)) != MEM
4297 || ! rtx_equal_p (reg, XEXP (XEXP (pattern, 0), 0)))
4303 /* Given a X, a pattern of an insn or a part of it, return a mask of used
4304 general registers. Bits 0..15 mean that the respective registers
4305 are used as inputs in the instruction. Bits 16..31 mean that the
4306 registers 0..15, respectively, are used as outputs, or are clobbered.
4307 IS_DEST should be set to 16 if X is the destination of a SET, else to 0. */
4309 regs_used (rtx x, int is_dest)
4317 code = GET_CODE (x);
4322 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
4323 << (REGNO (x) + is_dest));
4327 rtx y = SUBREG_REG (x);
4329 if (GET_CODE (y) != REG)
4332 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
4334 subreg_regno_offset (REGNO (y),
4337 GET_MODE (x)) + is_dest));
4341 return regs_used (SET_SRC (x), 0) | regs_used (SET_DEST (x), 16);
4343 /* If there was a return value, it must have been indicated with USE. */
4358 fmt = GET_RTX_FORMAT (code);
4360 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4365 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4366 used |= regs_used (XVECEXP (x, i, j), is_dest);
4368 else if (fmt[i] == 'e')
4369 used |= regs_used (XEXP (x, i), is_dest);
4374 /* Create an instruction that prevents redirection of a conditional branch
4375 to the destination of the JUMP with address ADDR.
4376 If the branch needs to be implemented as an indirect jump, try to find
4377 a scratch register for it.
4378 If NEED_BLOCK is 0, don't do anything unless we need a scratch register.
4379 If any preceding insn that doesn't fit into a delay slot is good enough,
4380 pass 1. Pass 2 if a definite blocking insn is needed.
4381 -1 is used internally to avoid deep recursion.
4382 If a blocking instruction is made or recognized, return it. */
4385 gen_block_redirect (rtx jump, int addr, int need_block)
4388 rtx prev = prev_nonnote_insn (jump);
4391 /* First, check if we already have an instruction that satisfies our need. */
4392 if (prev && GET_CODE (prev) == INSN && ! INSN_DELETED_P (prev))
4394 if (INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
4396 if (GET_CODE (PATTERN (prev)) == USE
4397 || GET_CODE (PATTERN (prev)) == CLOBBER
4398 || get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
4400 else if ((need_block &= ~1) < 0)
4402 else if (recog_memoized (prev) == CODE_FOR_block_branch_redirect)
4405 if (GET_CODE (PATTERN (jump)) == RETURN)
4409 /* Reorg even does nasty things with return insns that cause branches
4410 to go out of range - see find_end_label and callers. */
4411 return emit_insn_before (gen_block_branch_redirect (const0_rtx) , jump);
4413 /* We can't use JUMP_LABEL here because it might be undefined
4414 when not optimizing. */
4415 dest = XEXP (SET_SRC (PATTERN (jump)), 0);
4416 /* If the branch is out of range, try to find a scratch register for it. */
4418 && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
4422 /* Don't look for the stack pointer as a scratch register,
4423 it would cause trouble if an interrupt occurred. */
4424 unsigned try = 0x7fff, used;
4425 int jump_left = flag_expensive_optimizations + 1;
4427 /* It is likely that the most recent eligible instruction is wanted for
4428 the delay slot. Therefore, find out which registers it uses, and
4429 try to avoid using them. */
4431 for (scan = jump; (scan = PREV_INSN (scan)); )
4435 if (INSN_DELETED_P (scan))
4437 code = GET_CODE (scan);
4438 if (code == CODE_LABEL || code == JUMP_INSN)
4441 && GET_CODE (PATTERN (scan)) != USE
4442 && GET_CODE (PATTERN (scan)) != CLOBBER
4443 && get_attr_in_delay_slot (scan) == IN_DELAY_SLOT_YES)
4445 try &= ~regs_used (PATTERN (scan), 0);
4449 for (used = dead = 0, scan = JUMP_LABEL (jump);
4450 (scan = NEXT_INSN (scan)); )
4454 if (INSN_DELETED_P (scan))
4456 code = GET_CODE (scan);
4459 used |= regs_used (PATTERN (scan), 0);
4460 if (code == CALL_INSN)
4461 used |= regs_used (CALL_INSN_FUNCTION_USAGE (scan), 0);
4462 dead |= (used >> 16) & ~used;
4468 if (code == JUMP_INSN)
4470 if (jump_left-- && simplejump_p (scan))
4471 scan = JUMP_LABEL (scan);
4477 /* Mask out the stack pointer again, in case it was
4478 the only 'free' register we have found. */
4481 /* If the immediate destination is still in range, check for possible
4482 threading with a jump beyond the delay slot insn.
4483 Don't check if we are called recursively; the jump has been or will be
4484 checked in a different invocation then. */
4486 else if (optimize && need_block >= 0)
4488 rtx next = next_active_insn (next_active_insn (dest));
4489 if (next && GET_CODE (next) == JUMP_INSN
4490 && GET_CODE (PATTERN (next)) == SET
4491 && recog_memoized (next) == CODE_FOR_jump_compact)
4493 dest = JUMP_LABEL (next);
4495 && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
4497 gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), -1);
4503 rtx reg = gen_rtx_REG (SImode, exact_log2 (dead & -dead));
4505 /* It would be nice if we could convert the jump into an indirect
4506 jump / far branch right now, and thus exposing all constituent
4507 instructions to further optimization. However, reorg uses
4508 simplejump_p to determine if there is an unconditional jump where
4509 it should try to schedule instructions from the target of the
4510 branch; simplejump_p fails for indirect jumps even if they have
4512 rtx insn = emit_insn_before (gen_indirect_jump_scratch
4513 (reg, GEN_INT (INSN_UID (JUMP_LABEL (jump))))
4515 /* ??? We would like this to have the scope of the jump, but that
4516 scope will change when a delay slot insn of an inner scope is added.
4517 Hence, after delay slot scheduling, we'll have to expect
4518 NOTE_INSN_BLOCK_END notes between the indirect_jump_scratch and
4521 INSN_LOCATOR (insn) = INSN_LOCATOR (jump);
4522 INSN_CODE (insn) = CODE_FOR_indirect_jump_scratch;
4525 else if (need_block)
4526 /* We can't use JUMP_LABEL here because it might be undefined
4527 when not optimizing. */
4528 return emit_insn_before (gen_block_branch_redirect
4529 (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))))
4534 #define CONDJUMP_MIN -252
4535 #define CONDJUMP_MAX 262
4538 /* A label (to be placed) in front of the jump
4539 that jumps to our ultimate destination. */
4541 /* Where we are going to insert it if we cannot move the jump any farther,
4542 or the jump itself if we have picked up an existing jump. */
4544 /* The ultimate destination. */
4546 struct far_branch *prev;
4547 /* If the branch has already been created, its address;
4548 else the address of its first prospective user. */
4552 static void gen_far_branch (struct far_branch *);
4553 enum mdep_reorg_phase_e mdep_reorg_phase;
4555 gen_far_branch (struct far_branch *bp)
4557 rtx insn = bp->insert_place;
4559 rtx label = gen_label_rtx ();
4562 emit_label_after (label, insn);
4565 jump = emit_jump_insn_after (gen_jump (bp->far_label), insn);
4566 LABEL_NUSES (bp->far_label)++;
4569 jump = emit_jump_insn_after (gen_return (), insn);
4570 /* Emit a barrier so that reorg knows that any following instructions
4571 are not reachable via a fall-through path.
4572 But don't do this when not optimizing, since we wouldn't suppress the
4573 alignment for the barrier then, and could end up with out-of-range
4574 pc-relative loads. */
4576 emit_barrier_after (jump);
4577 emit_label_after (bp->near_label, insn);
4578 JUMP_LABEL (jump) = bp->far_label;
4579 ok = invert_jump (insn, label, 1);
4582 /* If we are branching around a jump (rather than a return), prevent
4583 reorg from using an insn from the jump target as the delay slot insn -
4584 when reorg did this, it pessimized code (we rather hide the delay slot)
4585 and it could cause branches to go out of range. */
4588 (gen_stuff_delay_slot
4589 (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))),
4590 GEN_INT (recog_memoized (insn) == CODE_FOR_branch_false)),
4592 /* Prevent reorg from undoing our splits. */
4593 gen_block_redirect (jump, bp->address += 2, 2);
4596 /* Fix up ADDR_DIFF_VECs. */
4598 fixup_addr_diff_vecs (rtx first)
4602 for (insn = first; insn; insn = NEXT_INSN (insn))
4604 rtx vec_lab, pat, prev, prevpat, x, braf_label;
4606 if (GET_CODE (insn) != JUMP_INSN
4607 || GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
4609 pat = PATTERN (insn);
4610 vec_lab = XEXP (XEXP (pat, 0), 0);
4612 /* Search the matching casesi_jump_2. */
4613 for (prev = vec_lab; ; prev = PREV_INSN (prev))
4615 if (GET_CODE (prev) != JUMP_INSN)
4617 prevpat = PATTERN (prev);
4618 if (GET_CODE (prevpat) != PARALLEL || XVECLEN (prevpat, 0) != 2)
4620 x = XVECEXP (prevpat, 0, 1);
4621 if (GET_CODE (x) != USE)
4624 if (GET_CODE (x) == LABEL_REF && XEXP (x, 0) == vec_lab)
4627 /* FIXME: This is a bug in the optimizer, but it seems harmless
4628 to just avoid panicing. */
4632 /* Emit the reference label of the braf where it belongs, right after
4633 the casesi_jump_2 (i.e. braf). */
4634 braf_label = XEXP (XEXP (SET_SRC (XVECEXP (prevpat, 0, 0)), 1), 0);
4635 emit_label_after (braf_label, prev);
4637 /* Fix up the ADDR_DIF_VEC to be relative
4638 to the reference address of the braf. */
4639 XEXP (XEXP (pat, 0), 0) = braf_label;
4643 /* BARRIER_OR_LABEL is either a BARRIER or a CODE_LABEL immediately following
4644 a barrier. Return the base 2 logarithm of the desired alignment. */
4646 barrier_align (rtx barrier_or_label)
4648 rtx next = next_real_insn (barrier_or_label), pat, prev;
4649 int slot, credit, jump_to_next = 0;
4654 pat = PATTERN (next);
4656 if (GET_CODE (pat) == ADDR_DIFF_VEC)
4659 if (GET_CODE (pat) == UNSPEC_VOLATILE && XINT (pat, 1) == UNSPECV_ALIGN)
4660 /* This is a barrier in front of a constant table. */
4663 prev = prev_real_insn (barrier_or_label);
4664 if (GET_CODE (PATTERN (prev)) == ADDR_DIFF_VEC)
4666 pat = PATTERN (prev);
4667 /* If this is a very small table, we want to keep the alignment after
4668 the table to the minimum for proper code alignment. */
4669 return ((TARGET_SMALLCODE
4670 || ((unsigned) XVECLEN (pat, 1) * GET_MODE_SIZE (GET_MODE (pat))
4671 <= (unsigned) 1 << (CACHE_LOG - 2)))
4672 ? 1 << TARGET_SHMEDIA : align_jumps_log);
4675 if (TARGET_SMALLCODE)
4678 if (! TARGET_SH2 || ! optimize)
4679 return align_jumps_log;
4681 /* When fixing up pcloads, a constant table might be inserted just before
4682 the basic block that ends with the barrier. Thus, we can't trust the
4683 instruction lengths before that. */
4684 if (mdep_reorg_phase > SH_FIXUP_PCLOAD)
4686 /* Check if there is an immediately preceding branch to the insn beyond
4687 the barrier. We must weight the cost of discarding useful information
4688 from the current cache line when executing this branch and there is
4689 an alignment, against that of fetching unneeded insn in front of the
4690 branch target when there is no alignment. */
4692 /* There are two delay_slot cases to consider. One is the simple case
4693 where the preceding branch is to the insn beyond the barrier (simple
4694 delay slot filling), and the other is where the preceding branch has
4695 a delay slot that is a duplicate of the insn after the barrier
4696 (fill_eager_delay_slots) and the branch is to the insn after the insn
4697 after the barrier. */
4699 /* PREV is presumed to be the JUMP_INSN for the barrier under
4700 investigation. Skip to the insn before it. */
4701 prev = prev_real_insn (prev);
4703 for (slot = 2, credit = (1 << (CACHE_LOG - 2)) + 2;
4704 credit >= 0 && prev && GET_CODE (prev) == INSN;
4705 prev = prev_real_insn (prev))
4708 if (GET_CODE (PATTERN (prev)) == USE
4709 || GET_CODE (PATTERN (prev)) == CLOBBER)
4711 if (GET_CODE (PATTERN (prev)) == SEQUENCE)
4713 prev = XVECEXP (PATTERN (prev), 0, 1);
4714 if (INSN_UID (prev) == INSN_UID (next))
4716 /* Delay slot was filled with insn at jump target. */
4723 get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
4725 credit -= get_attr_length (prev);
4728 && GET_CODE (prev) == JUMP_INSN
4729 && JUMP_LABEL (prev))
4733 || next_real_insn (JUMP_LABEL (prev)) == next
4734 /* If relax_delay_slots() decides NEXT was redundant
4735 with some previous instruction, it will have
4736 redirected PREV's jump to the following insn. */
4737 || JUMP_LABEL (prev) == next_nonnote_insn (next)
4738 /* There is no upper bound on redundant instructions
4739 that might have been skipped, but we must not put an
4740 alignment where none had been before. */
4741 || (x = (NEXT_INSN (NEXT_INSN (PREV_INSN (prev)))),
4743 && (INSN_CODE (x) == CODE_FOR_block_branch_redirect
4744 || INSN_CODE (x) == CODE_FOR_indirect_jump_scratch
4745 || INSN_CODE (x) == CODE_FOR_stuff_delay_slot))))
4747 rtx pat = PATTERN (prev);
4748 if (GET_CODE (pat) == PARALLEL)
4749 pat = XVECEXP (pat, 0, 0);
4750 if (credit - slot >= (GET_CODE (SET_SRC (pat)) == PC ? 2 : 0))
4756 return align_jumps_log;
4759 /* If we are inside a phony loop, almost any kind of label can turn up as the
4760 first one in the loop. Aligning a braf label causes incorrect switch
4761 destination addresses; we can detect braf labels because they are
4762 followed by a BARRIER.
4763 Applying loop alignment to small constant or switch tables is a waste
4764 of space, so we suppress this too. */
4766 sh_loop_align (rtx label)
4771 next = next_nonnote_insn (next);
4772 while (next && GET_CODE (next) == CODE_LABEL);
4776 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC
4777 || recog_memoized (next) == CODE_FOR_consttable_2)
4780 return align_loops_log;
4783 /* Do a final pass over the function, just before delayed branch
4789 rtx first, insn, mova = NULL_RTX;
4791 rtx r0_rtx = gen_rtx_REG (Pmode, 0);
4792 rtx r0_inc_rtx = gen_rtx_POST_INC (Pmode, r0_rtx);
4794 first = get_insns ();
4795 max_labelno_before_reorg = max_label_num ();
4797 /* We must split call insns before introducing `mova's. If we're
4798 optimizing, they'll have already been split. Otherwise, make
4799 sure we don't split them too late. */
4801 split_all_insns_noflow ();
4806 /* If relaxing, generate pseudo-ops to associate function calls with
4807 the symbols they call. It does no harm to not generate these
4808 pseudo-ops. However, when we can generate them, it enables to
4809 linker to potentially relax the jsr to a bsr, and eliminate the
4810 register load and, possibly, the constant pool entry. */
4812 mdep_reorg_phase = SH_INSERT_USES_LABELS;
4815 /* Remove all REG_LABEL_OPERAND notes. We want to use them for our
4816 own purposes. This works because none of the remaining passes
4817 need to look at them.
4819 ??? But it may break in the future. We should use a machine
4820 dependent REG_NOTE, or some other approach entirely. */
4821 for (insn = first; insn; insn = NEXT_INSN (insn))
4827 while ((note = find_reg_note (insn, REG_LABEL_OPERAND,
4829 remove_note (insn, note);
4833 for (insn = first; insn; insn = NEXT_INSN (insn))
4835 rtx pattern, reg, link, set, scan, dies, label;
4836 int rescan = 0, foundinsn = 0;
4838 if (GET_CODE (insn) == CALL_INSN)
4840 pattern = PATTERN (insn);
4842 if (GET_CODE (pattern) == PARALLEL)
4843 pattern = XVECEXP (pattern, 0, 0);
4844 if (GET_CODE (pattern) == SET)
4845 pattern = SET_SRC (pattern);
4847 if (GET_CODE (pattern) != CALL
4848 || GET_CODE (XEXP (pattern, 0)) != MEM)
4851 reg = XEXP (XEXP (pattern, 0), 0);
4855 reg = sfunc_uses_reg (insn);
4860 if (GET_CODE (reg) != REG)
4863 /* Try scanning backward to find where the register is set. */
4865 for (scan = PREV_INSN (insn);
4866 scan && GET_CODE (scan) != CODE_LABEL;
4867 scan = PREV_INSN (scan))
4869 if (! INSN_P (scan))
4872 if (! reg_mentioned_p (reg, scan))
4875 if (noncall_uses_reg (reg, scan, &set))
4888 /* The register is set at LINK. */
4890 /* We can only optimize the function call if the register is
4891 being set to a symbol. In theory, we could sometimes
4892 optimize calls to a constant location, but the assembler
4893 and linker do not support that at present. */
4894 if (GET_CODE (SET_SRC (set)) != SYMBOL_REF
4895 && GET_CODE (SET_SRC (set)) != LABEL_REF)
4898 /* Scan forward from LINK to the place where REG dies, and
4899 make sure that the only insns which use REG are
4900 themselves function calls. */
4902 /* ??? This doesn't work for call targets that were allocated
4903 by reload, since there may not be a REG_DEAD note for the
4907 for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan))
4911 /* Don't try to trace forward past a CODE_LABEL if we haven't
4912 seen INSN yet. Ordinarily, we will only find the setting insn
4913 if it is in the same basic block. However,
4914 cross-jumping can insert code labels in between the load and
4915 the call, and can result in situations where a single call
4916 insn may have two targets depending on where we came from. */
4918 if (GET_CODE (scan) == CODE_LABEL && ! foundinsn)
4921 if (! INSN_P (scan))
4924 /* Don't try to trace forward past a JUMP. To optimize
4925 safely, we would have to check that all the
4926 instructions at the jump destination did not use REG. */
4928 if (GET_CODE (scan) == JUMP_INSN)
4931 if (! reg_mentioned_p (reg, scan))
4934 if (noncall_uses_reg (reg, scan, &scanset))
4941 && (GET_CODE (scan) == CALL_INSN || sfunc_uses_reg (scan)))
4943 /* There is a function call to this register other
4944 than the one we are checking. If we optimize
4945 this call, we need to rescan again below. */
4949 /* ??? We shouldn't have to worry about SCANSET here.
4950 We should just be able to check for a REG_DEAD note
4951 on a function call. However, the REG_DEAD notes are
4952 apparently not dependable around libcalls; c-torture
4953 execute/920501-2 is a test case. If SCANSET is set,
4954 then this insn sets the register, so it must have
4955 died earlier. Unfortunately, this will only handle
4956 the cases in which the register is, in fact, set in a
4959 /* ??? We shouldn't have to use FOUNDINSN here.
4960 This dates back to when we used LOG_LINKS to find
4961 the most recent insn which sets the register. */
4965 || find_reg_note (scan, REG_DEAD, reg)))
4974 /* Either there was a branch, or some insn used REG
4975 other than as a function call address. */
4979 /* Create a code label, and put it in a REG_LABEL_OPERAND note
4980 on the insn which sets the register, and on each call insn
4981 which uses the register. In final_prescan_insn we look for
4982 the REG_LABEL_OPERAND notes, and output the appropriate label
4985 label = gen_label_rtx ();
4986 add_reg_note (link, REG_LABEL_OPERAND, label);
4987 add_reg_note (insn, REG_LABEL_OPERAND, label);
4995 scan = NEXT_INSN (scan);
4997 && ((GET_CODE (scan) == CALL_INSN
4998 && reg_mentioned_p (reg, scan))
4999 || ((reg2 = sfunc_uses_reg (scan))
5000 && REGNO (reg2) == REGNO (reg))))
5001 add_reg_note (scan, REG_LABEL_OPERAND, label);
5003 while (scan != dies);
5009 fixup_addr_diff_vecs (first);
5013 mdep_reorg_phase = SH_SHORTEN_BRANCHES0;
5014 shorten_branches (first);
5017 /* Scan the function looking for move instructions which have to be
5018 changed to pc-relative loads and insert the literal tables. */
5019 label_ref_list_pool = create_alloc_pool ("label references list",
5020 sizeof (struct label_ref_list_d),
5022 mdep_reorg_phase = SH_FIXUP_PCLOAD;
5023 for (insn = first, num_mova = 0; insn; insn = NEXT_INSN (insn))
5027 /* ??? basic block reordering can move a switch table dispatch
5028 below the switch table. Check if that has happened.
5029 We only have the addresses available when optimizing; but then,
5030 this check shouldn't be needed when not optimizing. */
5031 if (!untangle_mova (&num_mova, &mova, insn))
5037 else if (GET_CODE (insn) == JUMP_INSN
5038 && GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
5040 /* ??? loop invariant motion can also move a mova out of a
5041 loop. Since loop does this code motion anyway, maybe we
5042 should wrap UNSPEC_MOVA into a CONST, so that reload can
5045 && GET_MODE (prev_nonnote_insn (insn)) == VOIDmode)
5046 || (prev_nonnote_insn (insn)
5047 == XEXP (MOVA_LABELREF (mova), 0))))
5054 /* Some code might have been inserted between the mova and
5055 its ADDR_DIFF_VEC. Check if the mova is still in range. */
5056 for (scan = mova, total = 0; scan != insn; scan = NEXT_INSN (scan))
5057 total += get_attr_length (scan);
5059 /* range of mova is 1020, add 4 because pc counts from address of
5060 second instruction after this one, subtract 2 in case pc is 2
5061 byte aligned. Possible alignment needed for the ADDR_DIFF_VEC
5062 cancels out with alignment effects of the mova itself. */
5065 /* Change the mova into a load, and restart scanning
5066 there. broken_move will then return true for mova. */
5071 if (broken_move (insn)
5072 || (GET_CODE (insn) == INSN
5073 && recog_memoized (insn) == CODE_FOR_casesi_worker_2))
5076 /* Scan ahead looking for a barrier to stick the constant table
5078 rtx barrier = find_barrier (num_mova, mova, insn);
5079 rtx last_float_move = NULL_RTX, last_float = 0, *last_float_addr = NULL;
5080 int need_aligned_label = 0;
5082 if (num_mova && ! mova_p (mova))
5084 /* find_barrier had to change the first mova into a
5085 pcload; thus, we have to start with this new pcload. */
5089 /* Now find all the moves between the points and modify them. */
5090 for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
5092 if (GET_CODE (scan) == CODE_LABEL)
5094 if (GET_CODE (scan) == INSN
5095 && recog_memoized (scan) == CODE_FOR_casesi_worker_2)
5096 need_aligned_label = 1;
5097 if (broken_move (scan))
5099 rtx *patp = &PATTERN (scan), pat = *patp;
5103 enum machine_mode mode;
5105 if (GET_CODE (pat) == PARALLEL)
5106 patp = &XVECEXP (pat, 0, 0), pat = *patp;
5107 src = SET_SRC (pat);
5108 dst = SET_DEST (pat);
5109 mode = GET_MODE (dst);
5111 if (mode == SImode && hi_const (src)
5112 && REGNO (dst) != FPUL_REG)
5117 while (GET_CODE (dst) == SUBREG)
5119 offset += subreg_regno_offset (REGNO (SUBREG_REG (dst)),
5120 GET_MODE (SUBREG_REG (dst)),
5123 dst = SUBREG_REG (dst);
5125 dst = gen_rtx_REG (HImode, REGNO (dst) + offset);
5127 if (GET_CODE (dst) == REG && FP_ANY_REGISTER_P (REGNO (dst)))
5129 /* This must be an insn that clobbers r0. */
5130 rtx *clobberp = &XVECEXP (PATTERN (scan), 0,
5131 XVECLEN (PATTERN (scan), 0)
5133 rtx clobber = *clobberp;
5135 gcc_assert (GET_CODE (clobber) == CLOBBER
5136 && rtx_equal_p (XEXP (clobber, 0), r0_rtx));
5139 && reg_set_between_p (r0_rtx, last_float_move, scan))
5143 && GET_MODE_SIZE (mode) != 4
5144 && GET_MODE_SIZE (GET_MODE (last_float)) == 4)
5146 lab = add_constant (src, mode, last_float);
5148 emit_insn_before (gen_mova (lab), scan);
5151 /* There will be a REG_UNUSED note for r0 on
5152 LAST_FLOAT_MOVE; we have to change it to REG_INC,
5153 lest reorg:mark_target_live_regs will not
5154 consider r0 to be used, and we end up with delay
5155 slot insn in front of SCAN that clobbers r0. */
5157 = find_regno_note (last_float_move, REG_UNUSED, 0);
5159 /* If we are not optimizing, then there may not be
5162 PUT_MODE (note, REG_INC);
5164 *last_float_addr = r0_inc_rtx;
5166 last_float_move = scan;
5168 newsrc = gen_const_mem (mode,
5169 (((TARGET_SH4 && ! TARGET_FMOVD)
5170 || REGNO (dst) == FPUL_REG)
5173 last_float_addr = &XEXP (newsrc, 0);
5175 /* Remove the clobber of r0. */
5176 *clobberp = gen_rtx_CLOBBER (GET_MODE (clobber),
5177 gen_rtx_SCRATCH (Pmode));
5179 /* This is a mova needing a label. Create it. */
5180 else if (GET_CODE (src) == UNSPEC
5181 && XINT (src, 1) == UNSPEC_MOVA
5182 && GET_CODE (XVECEXP (src, 0, 0)) == CONST)
5184 lab = add_constant (XVECEXP (src, 0, 0), mode, 0);
5185 newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
5186 newsrc = gen_rtx_UNSPEC (SImode,
5187 gen_rtvec (1, newsrc),
5192 lab = add_constant (src, mode, 0);
5193 newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
5194 newsrc = gen_const_mem (mode, newsrc);
5196 *patp = gen_rtx_SET (VOIDmode, dst, newsrc);
5197 INSN_CODE (scan) = -1;
5200 dump_table (need_aligned_label ? insn : 0, barrier);
5204 free_alloc_pool (label_ref_list_pool);
5205 for (insn = first; insn; insn = NEXT_INSN (insn))
5206 PUT_MODE (insn, VOIDmode);
5208 mdep_reorg_phase = SH_SHORTEN_BRANCHES1;
5209 INSN_ADDRESSES_FREE ();
5210 split_branches (first);
5212 /* The INSN_REFERENCES_ARE_DELAYED in sh.h is problematic because it
5213 also has an effect on the register that holds the address of the sfunc.
5214 Insert an extra dummy insn in front of each sfunc that pretends to
5215 use this register. */
5216 if (flag_delayed_branch)
5218 for (insn = first; insn; insn = NEXT_INSN (insn))
5220 rtx reg = sfunc_uses_reg (insn);
5224 emit_insn_before (gen_use_sfunc_addr (reg), insn);
5228 /* fpscr is not actually a user variable, but we pretend it is for the
5229 sake of the previous optimization passes, since we want it handled like
5230 one. However, we don't have any debugging information for it, so turn
5231 it into a non-user variable now. */
5233 REG_USERVAR_P (get_fpscr_rtx ()) = 0;
5235 mdep_reorg_phase = SH_AFTER_MDEP_REORG;
5239 get_dest_uid (rtx label, int max_uid)
5241 rtx dest = next_real_insn (label);
5244 /* This can happen for an undefined label. */
5246 dest_uid = INSN_UID (dest);
5247 /* If this is a newly created branch redirection blocking instruction,
5248 we cannot index the branch_uid or insn_addresses arrays with its
5249 uid. But then, we won't need to, because the actual destination is
5250 the following branch. */
5251 while (dest_uid >= max_uid)
5253 dest = NEXT_INSN (dest);
5254 dest_uid = INSN_UID (dest);
5256 if (GET_CODE (dest) == JUMP_INSN && GET_CODE (PATTERN (dest)) == RETURN)
5261 /* Split condbranches that are out of range. Also add clobbers for
5262 scratch registers that are needed in far jumps.
5263 We do this before delay slot scheduling, so that it can take our
5264 newly created instructions into account. It also allows us to
5265 find branches with common targets more easily. */
5268 split_branches (rtx first)
5271 struct far_branch **uid_branch, *far_branch_list = 0;
5272 int max_uid = get_max_uid ();
5275 /* Find out which branches are out of range. */
5276 shorten_branches (first);
5278 uid_branch = (struct far_branch **) alloca (max_uid * sizeof *uid_branch);
5279 memset ((char *) uid_branch, 0, max_uid * sizeof *uid_branch);
5281 for (insn = first; insn; insn = NEXT_INSN (insn))
5282 if (! INSN_P (insn))
5284 else if (INSN_DELETED_P (insn))
5286 /* Shorten_branches would split this instruction again,
5287 so transform it into a note. */
5288 SET_INSN_DELETED (insn);
5290 else if (GET_CODE (insn) == JUMP_INSN
5291 /* Don't mess with ADDR_DIFF_VEC */
5292 && (GET_CODE (PATTERN (insn)) == SET
5293 || GET_CODE (PATTERN (insn)) == RETURN))
5295 enum attr_type type = get_attr_type (insn);
5296 if (type == TYPE_CBRANCH)
5300 if (get_attr_length (insn) > 4)
5302 rtx src = SET_SRC (PATTERN (insn));
5303 rtx olabel = XEXP (XEXP (src, 1), 0);
5304 int addr = INSN_ADDRESSES (INSN_UID (insn));
5306 int dest_uid = get_dest_uid (olabel, max_uid);
5307 struct far_branch *bp = uid_branch[dest_uid];
5309 /* redirect_jump needs a valid JUMP_LABEL, and it might delete
5310 the label if the LABEL_NUSES count drops to zero. There is
5311 always a jump_optimize pass that sets these values, but it
5312 proceeds to delete unreferenced code, and then if not
5313 optimizing, to un-delete the deleted instructions, thus
5314 leaving labels with too low uses counts. */
5317 JUMP_LABEL (insn) = olabel;
5318 LABEL_NUSES (olabel)++;
5322 bp = (struct far_branch *) alloca (sizeof *bp);
5323 uid_branch[dest_uid] = bp;
5324 bp->prev = far_branch_list;
5325 far_branch_list = bp;
5327 = XEXP (XEXP (SET_SRC (PATTERN (insn)), 1), 0);
5328 LABEL_NUSES (bp->far_label)++;
5332 label = bp->near_label;
5333 if (! label && bp->address - addr >= CONDJUMP_MIN)
5335 rtx block = bp->insert_place;
5337 if (GET_CODE (PATTERN (block)) == RETURN)
5338 block = PREV_INSN (block);
5340 block = gen_block_redirect (block,
5342 label = emit_label_after (gen_label_rtx (),
5344 bp->near_label = label;
5346 else if (label && ! NEXT_INSN (label))
5348 if (addr + 2 - bp->address <= CONDJUMP_MAX)
5349 bp->insert_place = insn;
5351 gen_far_branch (bp);
5355 || (NEXT_INSN (label) && bp->address - addr < CONDJUMP_MIN))
5357 bp->near_label = label = gen_label_rtx ();
5358 bp->insert_place = insn;
5361 ok = redirect_jump (insn, label, 0);
5366 /* get_attr_length (insn) == 2 */
5367 /* Check if we have a pattern where reorg wants to redirect
5368 the branch to a label from an unconditional branch that
5370 /* We can't use JUMP_LABEL here because it might be undefined
5371 when not optimizing. */
5372 /* A syntax error might cause beyond to be NULL_RTX. */
5374 = next_active_insn (XEXP (XEXP (SET_SRC (PATTERN (insn)), 1),
5378 && (GET_CODE (beyond) == JUMP_INSN
5379 || ((beyond = next_active_insn (beyond))
5380 && GET_CODE (beyond) == JUMP_INSN))
5381 && GET_CODE (PATTERN (beyond)) == SET
5382 && recog_memoized (beyond) == CODE_FOR_jump_compact
5384 (INSN_UID (XEXP (SET_SRC (PATTERN (beyond)), 0)))
5385 - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
5387 gen_block_redirect (beyond,
5388 INSN_ADDRESSES (INSN_UID (beyond)), 1);
5391 next = next_active_insn (insn);
5393 if ((GET_CODE (next) == JUMP_INSN
5394 || ((next = next_active_insn (next))
5395 && GET_CODE (next) == JUMP_INSN))
5396 && GET_CODE (PATTERN (next)) == SET
5397 && recog_memoized (next) == CODE_FOR_jump_compact
5399 (INSN_UID (XEXP (SET_SRC (PATTERN (next)), 0)))
5400 - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
5402 gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), 1);
5404 else if (type == TYPE_JUMP || type == TYPE_RETURN)
5406 int addr = INSN_ADDRESSES (INSN_UID (insn));
5409 struct far_branch *bp;
5411 if (type == TYPE_JUMP)
5413 far_label = XEXP (SET_SRC (PATTERN (insn)), 0);
5414 dest_uid = get_dest_uid (far_label, max_uid);
5417 /* Parse errors can lead to labels outside
5419 if (! NEXT_INSN (far_label))
5424 JUMP_LABEL (insn) = far_label;
5425 LABEL_NUSES (far_label)++;
5427 redirect_jump (insn, NULL_RTX, 1);
5431 bp = uid_branch[dest_uid];
5434 bp = (struct far_branch *) alloca (sizeof *bp);
5435 uid_branch[dest_uid] = bp;
5436 bp->prev = far_branch_list;
5437 far_branch_list = bp;
5439 bp->far_label = far_label;
5441 LABEL_NUSES (far_label)++;
5443 else if (bp->near_label && ! NEXT_INSN (bp->near_label))
5444 if (addr - bp->address <= CONDJUMP_MAX)
5445 emit_label_after (bp->near_label, PREV_INSN (insn));
5448 gen_far_branch (bp);
5454 bp->insert_place = insn;
5456 emit_insn_before (gen_block_branch_redirect (const0_rtx), insn);
5458 gen_block_redirect (insn, addr, bp->near_label ? 2 : 0);
5461 /* Generate all pending far branches,
5462 and free our references to the far labels. */
5463 while (far_branch_list)
5465 if (far_branch_list->near_label
5466 && ! NEXT_INSN (far_branch_list->near_label))
5467 gen_far_branch (far_branch_list);
5469 && far_branch_list->far_label
5470 && ! --LABEL_NUSES (far_branch_list->far_label))
5471 delete_insn (far_branch_list->far_label);
5472 far_branch_list = far_branch_list->prev;
5475 /* Instruction length information is no longer valid due to the new
5476 instructions that have been generated. */
5477 init_insn_lengths ();
5480 /* Dump out instruction addresses, which is useful for debugging the
5481 constant pool table stuff.
5483 If relaxing, output the label and pseudo-ops used to link together
5484 calls and the instruction which set the registers. */
5486 /* ??? The addresses printed by this routine for insns are nonsense for
5487 insns which are inside of a sequence where none of the inner insns have
5488 variable length. This is because the second pass of shorten_branches
5489 does not bother to update them. */
5492 final_prescan_insn (rtx insn, rtx *opvec ATTRIBUTE_UNUSED,
5493 int noperands ATTRIBUTE_UNUSED)
5495 if (TARGET_DUMPISIZE)
5496 fprintf (asm_out_file, "\n! at %04x\n", INSN_ADDRESSES (INSN_UID (insn)));
5502 note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX);
5507 pattern = PATTERN (insn);
5508 if (GET_CODE (pattern) == PARALLEL)
5509 pattern = XVECEXP (pattern, 0, 0);
5510 switch (GET_CODE (pattern))
5513 if (GET_CODE (SET_SRC (pattern)) != CALL
5514 && get_attr_type (insn) != TYPE_SFUNC)
5516 targetm.asm_out.internal_label
5517 (asm_out_file, "L", CODE_LABEL_NUMBER (XEXP (note, 0)));
5520 /* else FALLTHROUGH */
5522 asm_fprintf (asm_out_file, "\t.uses %LL%d\n",
5523 CODE_LABEL_NUMBER (XEXP (note, 0)));
5533 /* Dump out any constants accumulated in the final pass. These will
5537 output_jump_label_table (void)
5543 fprintf (asm_out_file, "\t.align 2\n");
5544 for (i = 0; i < pool_size; i++)
5546 pool_node *p = &pool_vector[i];
5548 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5549 CODE_LABEL_NUMBER (p->label));
5550 output_asm_insn (".long %O0", &p->value);
5558 /* A full frame looks like:
5562 [ if current_function_anonymous_args
5575 local-0 <- fp points here. */
5577 /* Number of bytes pushed for anonymous args, used to pass information
5578 between expand_prologue and expand_epilogue. */
5580 /* Adjust the stack by SIZE bytes. REG holds the rtl of the register to be
5581 adjusted. If epilogue_p is zero, this is for a prologue; otherwise, it's
5582 for an epilogue and a negative value means that it's for a sibcall
5583 epilogue. If LIVE_REGS_MASK is nonzero, it points to a HARD_REG_SET of
5584 all the registers that are about to be restored, and hence dead. */
5587 output_stack_adjust (int size, rtx reg, int epilogue_p,
5588 HARD_REG_SET *live_regs_mask)
5590 rtx (*emit_fn) (rtx) = epilogue_p ? &emit_insn : &frame_insn;
5593 HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
5595 /* This test is bogus, as output_stack_adjust is used to re-align the
5598 gcc_assert (!(size % align));
5601 if (CONST_OK_FOR_ADD (size))
5602 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size)));
5603 /* Try to do it with two partial adjustments; however, we must make
5604 sure that the stack is properly aligned at all times, in case
5605 an interrupt occurs between the two partial adjustments. */
5606 else if (CONST_OK_FOR_ADD (size / 2 & -align)
5607 && CONST_OK_FOR_ADD (size - (size / 2 & -align)))
5609 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size / 2 & -align)));
5610 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size - (size / 2 & -align))));
5616 int temp = epilogue_p ? 7 : (TARGET_SH5 ? 0 : 1);
5619 /* If TEMP is invalid, we could temporarily save a general
5620 register to MACL. However, there is currently no need
5621 to handle this case, so just die when we see it. */
5623 || current_function_interrupt
5624 || ! call_really_used_regs[temp] || fixed_regs[temp])
5626 if (temp < 0 && ! current_function_interrupt
5627 && (TARGET_SHMEDIA || epilogue_p >= 0))
5630 COPY_HARD_REG_SET (temps, call_used_reg_set);
5631 AND_COMPL_HARD_REG_SET (temps, call_fixed_reg_set);
5635 if (crtl->return_rtx)
5637 enum machine_mode mode;
5638 mode = GET_MODE (crtl->return_rtx);
5639 if (BASE_RETURN_VALUE_REG (mode) == FIRST_RET_REG)
5640 nreg = HARD_REGNO_NREGS (FIRST_RET_REG, mode);
5642 for (i = 0; i < nreg; i++)
5643 CLEAR_HARD_REG_BIT (temps, FIRST_RET_REG + i);
5644 if (crtl->calls_eh_return)
5646 CLEAR_HARD_REG_BIT (temps, EH_RETURN_STACKADJ_REGNO);
5647 for (i = 0; i <= 3; i++)
5648 CLEAR_HARD_REG_BIT (temps, EH_RETURN_DATA_REGNO (i));
5651 if (TARGET_SHMEDIA && epilogue_p < 0)
5652 for (i = FIRST_TARGET_REG; i <= LAST_TARGET_REG; i++)
5653 CLEAR_HARD_REG_BIT (temps, i);
5654 if (epilogue_p <= 0)
5656 for (i = FIRST_PARM_REG;
5657 i < FIRST_PARM_REG + NPARM_REGS (SImode); i++)
5658 CLEAR_HARD_REG_BIT (temps, i);
5659 if (cfun->static_chain_decl != NULL)
5660 CLEAR_HARD_REG_BIT (temps, STATIC_CHAIN_REGNUM);
5662 temp = scavenge_reg (&temps);
5664 if (temp < 0 && live_regs_mask)
5668 COPY_HARD_REG_SET (temps, *live_regs_mask);
5669 CLEAR_HARD_REG_BIT (temps, REGNO (reg));
5670 temp = scavenge_reg (&temps);
5674 rtx adj_reg, tmp_reg, mem;
5676 /* If we reached here, the most likely case is the (sibcall)
5677 epilogue for non SHmedia. Put a special push/pop sequence
5678 for such case as the last resort. This looks lengthy but
5679 would not be problem because it seems to be very
5682 gcc_assert (!TARGET_SHMEDIA && epilogue_p);
5685 /* ??? There is still the slight possibility that r4 or
5686 r5 have been reserved as fixed registers or assigned
5687 as global registers, and they change during an
5688 interrupt. There are possible ways to handle this:
5690 - If we are adjusting the frame pointer (r14), we can do
5691 with a single temp register and an ordinary push / pop
5693 - Grab any call-used or call-saved registers (i.e. not
5694 fixed or globals) for the temps we need. We might
5695 also grab r14 if we are adjusting the stack pointer.
5696 If we can't find enough available registers, issue
5697 a diagnostic and die - the user must have reserved
5698 way too many registers.
5699 But since all this is rather unlikely to happen and
5700 would require extra testing, we just die if r4 / r5
5701 are not available. */
5702 gcc_assert (!fixed_regs[4] && !fixed_regs[5]
5703 && !global_regs[4] && !global_regs[5]);
5705 adj_reg = gen_rtx_REG (GET_MODE (reg), 4);
5706 tmp_reg = gen_rtx_REG (GET_MODE (reg), 5);
5707 emit_move_insn (gen_tmp_stack_mem (Pmode, reg), adj_reg);
5708 emit_insn (GEN_MOV (adj_reg, GEN_INT (size)));
5709 emit_insn (GEN_ADD3 (adj_reg, adj_reg, reg));
5710 mem = gen_tmp_stack_mem (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
5711 emit_move_insn (mem, tmp_reg);
5712 emit_move_insn (tmp_reg, gen_tmp_stack_mem (Pmode, reg));
5713 mem = gen_tmp_stack_mem (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
5714 emit_move_insn (mem, tmp_reg);
5715 emit_move_insn (reg, adj_reg);
5716 mem = gen_tmp_stack_mem (Pmode, gen_rtx_POST_INC (Pmode, reg));
5717 emit_move_insn (adj_reg, mem);
5718 mem = gen_tmp_stack_mem (Pmode, gen_rtx_POST_INC (Pmode, reg));
5719 emit_move_insn (tmp_reg, mem);
5720 /* Tell flow the insns that pop r4/r5 aren't dead. */
5725 const_reg = gen_rtx_REG (GET_MODE (reg), temp);
5727 /* If SIZE is negative, subtract the positive value.
5728 This sometimes allows a constant pool entry to be shared
5729 between prologue and epilogue code. */
5732 emit_insn (GEN_MOV (const_reg, GEN_INT (-size)));
5733 insn = emit_fn (GEN_SUB3 (reg, reg, const_reg));
5737 emit_insn (GEN_MOV (const_reg, GEN_INT (size)));
5738 insn = emit_fn (GEN_ADD3 (reg, reg, const_reg));
5742 = (gen_rtx_EXPR_LIST
5743 (REG_FRAME_RELATED_EXPR,
5744 gen_rtx_SET (VOIDmode, reg,
5745 gen_rtx_PLUS (SImode, reg, GEN_INT (size))),
5755 RTX_FRAME_RELATED_P (x) = 1;
5759 /* Output RTL to push register RN onto the stack. */
5766 x = gen_push_fpul ();
5767 else if (rn == FPSCR_REG)
5768 x = gen_push_fpscr ();
5769 else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && ! TARGET_FPU_SINGLE
5770 && FP_OR_XD_REGISTER_P (rn))
5772 if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
5774 x = gen_push_4 (gen_rtx_REG (DFmode, rn));
5776 else if (TARGET_SH2E && FP_REGISTER_P (rn))
5777 x = gen_push_e (gen_rtx_REG (SFmode, rn));
5779 x = gen_push (gen_rtx_REG (SImode, rn));
5783 = gen_rtx_EXPR_LIST (REG_INC,
5784 gen_rtx_REG (SImode, STACK_POINTER_REGNUM), 0);
5788 /* Output RTL to pop register RN from the stack. */
5795 x = gen_pop_fpul ();
5796 else if (rn == FPSCR_REG)
5797 x = gen_pop_fpscr ();
5798 else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && ! TARGET_FPU_SINGLE
5799 && FP_OR_XD_REGISTER_P (rn))
5801 if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
5803 x = gen_pop_4 (gen_rtx_REG (DFmode, rn));
5805 else if (TARGET_SH2E && FP_REGISTER_P (rn))
5806 x = gen_pop_e (gen_rtx_REG (SFmode, rn));
5808 x = gen_pop (gen_rtx_REG (SImode, rn));
5812 = gen_rtx_EXPR_LIST (REG_INC,
5813 gen_rtx_REG (SImode, STACK_POINTER_REGNUM), 0);
5816 /* Generate code to push the regs specified in the mask. */
5819 push_regs (HARD_REG_SET *mask, int interrupt_handler)
5821 int i = interrupt_handler ? LAST_BANKED_REG + 1 : 0;
5824 /* Push PR last; this gives better latencies after the prologue, and
5825 candidates for the return delay slot when there are no general
5826 registers pushed. */
5827 for (; i < FIRST_PSEUDO_REGISTER; i++)
5829 /* If this is an interrupt handler, and the SZ bit varies,
5830 and we have to push any floating point register, we need
5831 to switch to the correct precision first. */
5832 if (i == FIRST_FP_REG && interrupt_handler && TARGET_FMOVD
5833 && hard_reg_set_intersect_p (*mask, reg_class_contents[DF_REGS]))
5835 HARD_REG_SET unsaved;
5838 COMPL_HARD_REG_SET (unsaved, *mask);
5839 fpscr_set_from_mem (NORMAL_MODE (FP_MODE), unsaved);
5843 && (i != FPSCR_REG || ! skip_fpscr)
5844 && TEST_HARD_REG_BIT (*mask, i))
5846 /* If the ISR has RESBANK attribute assigned, don't push any of
5847 the following registers - R0-R14, MACH, MACL and GBR. */
5848 if (! (sh_cfun_resbank_handler_p ()
5849 && ((i >= FIRST_GENERAL_REG && i < LAST_GENERAL_REG)
5857 /* Push banked registers last to improve delay slot opportunities. */
5858 if (interrupt_handler)
5859 for (i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
5860 if (TEST_HARD_REG_BIT (*mask, i))
5863 /* Don't push PR register for an ISR with RESBANK attribute assigned. */
5864 if (TEST_HARD_REG_BIT (*mask, PR_REG) && !sh_cfun_resbank_handler_p ())
5868 /* Calculate how much extra space is needed to save all callee-saved
5870 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
5873 shmedia_target_regs_stack_space (HARD_REG_SET *live_regs_mask)
5876 int stack_space = 0;
5877 int interrupt_handler = sh_cfun_interrupt_handler_p ();
5879 for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
5880 if ((! call_really_used_regs[reg] || interrupt_handler)
5881 && ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
5882 /* Leave space to save this target register on the stack,
5883 in case target register allocation wants to use it. */
5884 stack_space += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
5888 /* Decide whether we should reserve space for callee-save target registers,
5889 in case target register allocation wants to use them. REGS_SAVED is
5890 the space, in bytes, that is already required for register saves.
5891 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
5894 shmedia_reserve_space_for_target_registers_p (int regs_saved,
5895 HARD_REG_SET *live_regs_mask)
5899 return shmedia_target_regs_stack_space (live_regs_mask) <= regs_saved;
5902 /* Decide how much space to reserve for callee-save target registers
5903 in case target register allocation wants to use them.
5904 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
5907 shmedia_target_regs_stack_adjust (HARD_REG_SET *live_regs_mask)
5909 if (shmedia_space_reserved_for_target_registers)
5910 return shmedia_target_regs_stack_space (live_regs_mask);
5915 /* Work out the registers which need to be saved, both as a mask and a
5916 count of saved words. Return the count.
5918 If doing a pragma interrupt function, then push all regs used by the
5919 function, and if we call another function (we can tell by looking at PR),
5920 make sure that all the regs it clobbers are safe too. */
5923 calc_live_regs (HARD_REG_SET *live_regs_mask)
5928 bool interrupt_or_trapa_handler, trapa_handler, interrupt_handler;
5929 bool nosave_low_regs;
5930 int pr_live, has_call;
5932 attrs = DECL_ATTRIBUTES (current_function_decl);
5933 interrupt_or_trapa_handler = sh_cfun_interrupt_handler_p ();
5934 trapa_handler = lookup_attribute ("trapa_handler", attrs) != NULL_TREE;
5935 interrupt_handler = interrupt_or_trapa_handler && ! trapa_handler;
5936 nosave_low_regs = lookup_attribute ("nosave_low_regs", attrs) != NULL_TREE;
5938 CLEAR_HARD_REG_SET (*live_regs_mask);
5939 if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && interrupt_handler
5940 && df_regs_ever_live_p (FPSCR_REG))
5941 target_flags &= ~MASK_FPU_SINGLE;
5942 /* If we can save a lot of saves by switching to double mode, do that. */
5943 else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && TARGET_FPU_SINGLE)
5944 for (count = 0, reg = FIRST_FP_REG; reg <= LAST_FP_REG; reg += 2)
5945 if (df_regs_ever_live_p (reg) && df_regs_ever_live_p (reg+1)
5946 && (! call_really_used_regs[reg]
5947 || interrupt_handler)
5950 target_flags &= ~MASK_FPU_SINGLE;
5953 /* PR_MEDIA_REG is a general purpose register, thus global_alloc already
5954 knows how to use it. That means the pseudo originally allocated for
5955 the initial value can become the PR_MEDIA_REG hard register, as seen for
5956 execute/20010122-1.c:test9. */
5958 /* ??? this function is called from initial_elimination_offset, hence we
5959 can't use the result of sh_media_register_for_return here. */
5960 pr_live = sh_pr_n_sets ();
5963 rtx pr_initial = has_hard_reg_initial_val (Pmode, PR_REG);
5964 pr_live = (pr_initial
5965 ? (GET_CODE (pr_initial) != REG
5966 || REGNO (pr_initial) != (PR_REG))
5967 : df_regs_ever_live_p (PR_REG));
5968 /* For Shcompact, if not optimizing, we end up with a memory reference
5969 using the return address pointer for __builtin_return_address even
5970 though there is no actual need to put the PR register on the stack. */
5971 pr_live |= df_regs_ever_live_p (RETURN_ADDRESS_POINTER_REGNUM);
5973 /* Force PR to be live if the prologue has to call the SHmedia
5974 argument decoder or register saver. */
5975 if (TARGET_SHCOMPACT
5976 && ((crtl->args.info.call_cookie
5977 & ~ CALL_COOKIE_RET_TRAMP (1))
5978 || crtl->saves_all_registers))
5980 has_call = TARGET_SHMEDIA ? ! leaf_function_p () : pr_live;
5981 for (count = 0, reg = FIRST_PSEUDO_REGISTER; reg-- != 0; )
5983 if (reg == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG)
5986 ? (/* Need to save all the regs ever live. */
5987 (df_regs_ever_live_p (reg)
5988 || (call_really_used_regs[reg]
5989 && (! fixed_regs[reg] || reg == MACH_REG || reg == MACL_REG
5990 || reg == PIC_OFFSET_TABLE_REGNUM)
5992 || (TARGET_SHMEDIA && has_call
5993 && REGISTER_NATURAL_MODE (reg) == SImode
5994 && (GENERAL_REGISTER_P (reg) || TARGET_REGISTER_P (reg))))
5995 && reg != STACK_POINTER_REGNUM && reg != ARG_POINTER_REGNUM
5996 && reg != RETURN_ADDRESS_POINTER_REGNUM
5997 && reg != T_REG && reg != GBR_REG
5998 /* Push fpscr only on targets which have FPU */
5999 && (reg != FPSCR_REG || TARGET_FPU_ANY))
6000 : (/* Only push those regs which are used and need to be saved. */
6003 && crtl->args.info.call_cookie
6004 && reg == PIC_OFFSET_TABLE_REGNUM)
6005 || (df_regs_ever_live_p (reg)
6006 && (!call_really_used_regs[reg]
6007 || (trapa_handler && reg == FPSCR_REG && TARGET_FPU_ANY)))
6008 || (crtl->calls_eh_return
6009 && (reg == EH_RETURN_DATA_REGNO (0)
6010 || reg == EH_RETURN_DATA_REGNO (1)
6011 || reg == EH_RETURN_DATA_REGNO (2)
6012 || reg == EH_RETURN_DATA_REGNO (3)))
6013 || ((reg == MACL_REG || reg == MACH_REG)
6014 && df_regs_ever_live_p (reg)
6015 && sh_cfun_attr_renesas_p ())
6018 SET_HARD_REG_BIT (*live_regs_mask, reg);
6019 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
6021 if ((TARGET_SH4 || TARGET_SH2A_DOUBLE || TARGET_SH5) && TARGET_FMOVD
6022 && GET_MODE_CLASS (REGISTER_NATURAL_MODE (reg)) == MODE_FLOAT)
6024 if (FP_REGISTER_P (reg))
6026 if (! TARGET_FPU_SINGLE && ! df_regs_ever_live_p (reg ^ 1))
6028 SET_HARD_REG_BIT (*live_regs_mask, (reg ^ 1));
6029 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg ^ 1));
6032 else if (XD_REGISTER_P (reg))
6034 /* Must switch to double mode to access these registers. */
6035 target_flags &= ~MASK_FPU_SINGLE;
6039 if (nosave_low_regs && reg == R8_REG)
6042 /* If we have a target register optimization pass after prologue / epilogue
6043 threading, we need to assume all target registers will be live even if
6045 if (flag_branch_target_load_optimize2
6046 && TARGET_SAVE_ALL_TARGET_REGS
6047 && shmedia_space_reserved_for_target_registers)
6048 for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
6049 if ((! call_really_used_regs[reg] || interrupt_handler)
6050 && ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
6052 SET_HARD_REG_BIT (*live_regs_mask, reg);
6053 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
6055 /* If this is an interrupt handler, we don't have any call-clobbered
6056 registers we can conveniently use for target register save/restore.
6057 Make sure we save at least one general purpose register when we need
6058 to save target registers. */
6059 if (interrupt_handler
6060 && hard_reg_set_intersect_p (*live_regs_mask,
6061 reg_class_contents[TARGET_REGS])
6062 && ! hard_reg_set_intersect_p (*live_regs_mask,
6063 reg_class_contents[GENERAL_REGS]))
6065 SET_HARD_REG_BIT (*live_regs_mask, R0_REG);
6066 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (R0_REG));
6072 /* Code to generate prologue and epilogue sequences */
6074 /* PUSHED is the number of bytes that are being pushed on the
6075 stack for register saves. Return the frame size, padded
6076 appropriately so that the stack stays properly aligned. */
6077 static HOST_WIDE_INT
6078 rounded_frame_size (int pushed)
6080 HOST_WIDE_INT size = get_frame_size ();
6081 HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
6083 return ((size + pushed + align - 1) & -align) - pushed;
6086 /* Choose a call-clobbered target-branch register that remains
6087 unchanged along the whole function. We set it up as the return
6088 value in the prologue. */
6090 sh_media_register_for_return (void)
6095 if (! current_function_is_leaf)
6097 if (lookup_attribute ("interrupt_handler",
6098 DECL_ATTRIBUTES (current_function_decl)))
6100 if (sh_cfun_interrupt_handler_p ())
6103 tr0_used = flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
6105 for (regno = FIRST_TARGET_REG + tr0_used; regno <= LAST_TARGET_REG; regno++)
6106 if (call_really_used_regs[regno] && ! df_regs_ever_live_p (regno))
6112 /* The maximum registers we need to save are:
6113 - 62 general purpose registers (r15 is stack pointer, r63 is zero)
6114 - 32 floating point registers (for each pair, we save none,
6115 one single precision value, or a double precision value).
6116 - 8 target registers
6117 - add 1 entry for a delimiter. */
6118 #define MAX_SAVED_REGS (62+32+8)
6120 typedef struct save_entry_s
6129 /* There will be a delimiter entry with VOIDmode both at the start and the
6130 end of a filled in schedule. The end delimiter has the offset of the
6131 save with the smallest (i.e. most negative) offset. */
6132 typedef struct save_schedule_s
6134 save_entry entries[MAX_SAVED_REGS + 2];
6135 int temps[MAX_TEMPS+1];
6138 /* Fill in SCHEDULE according to LIVE_REGS_MASK. If RESTORE is nonzero,
6139 use reverse order. Returns the last entry written to (not counting
6140 the delimiter). OFFSET_BASE is a number to be added to all offset
6144 sh5_schedule_saves (HARD_REG_SET *live_regs_mask, save_schedule *schedule,
6148 save_entry *entry = schedule->entries;
6152 if (! current_function_interrupt)
6153 for (i = FIRST_GENERAL_REG; tmpx < MAX_TEMPS && i <= LAST_GENERAL_REG; i++)
6154 if (call_really_used_regs[i] && ! fixed_regs[i] && i != PR_MEDIA_REG
6155 && ! FUNCTION_ARG_REGNO_P (i)
6156 && i != FIRST_RET_REG
6157 && ! (cfun->static_chain_decl != NULL && i == STATIC_CHAIN_REGNUM)
6158 && ! (crtl->calls_eh_return
6159 && (i == EH_RETURN_STACKADJ_REGNO
6160 || ((unsigned) i >= EH_RETURN_DATA_REGNO (0)
6161 && (unsigned) i <= EH_RETURN_DATA_REGNO (3)))))
6162 schedule->temps[tmpx++] = i;
6164 entry->mode = VOIDmode;
6165 entry->offset = offset_base;
6167 /* We loop twice: first, we save 8-byte aligned registers in the
6168 higher addresses, that are known to be aligned. Then, we
6169 proceed to saving 32-bit registers that don't need 8-byte
6171 If this is an interrupt function, all registers that need saving
6172 need to be saved in full. moreover, we need to postpone saving
6173 target registers till we have saved some general purpose registers
6174 we can then use as scratch registers. */
6175 offset = offset_base;
6176 for (align = 1; align >= 0; align--)
6178 for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
6179 if (TEST_HARD_REG_BIT (*live_regs_mask, i))
6181 enum machine_mode mode = REGISTER_NATURAL_MODE (i);
6184 if (current_function_interrupt)
6186 if (TARGET_REGISTER_P (i))
6188 if (GENERAL_REGISTER_P (i))
6191 if (mode == SFmode && (i % 2) == 1
6192 && ! TARGET_FPU_SINGLE && FP_REGISTER_P (i)
6193 && (TEST_HARD_REG_BIT (*live_regs_mask, (i ^ 1))))
6200 /* If we're doing the aligned pass and this is not aligned,
6201 or we're doing the unaligned pass and this is aligned,
6203 if ((GET_MODE_SIZE (mode) % (STACK_BOUNDARY / BITS_PER_UNIT) == 0)
6207 if (current_function_interrupt
6208 && GENERAL_REGISTER_P (i)
6209 && tmpx < MAX_TEMPS)
6210 schedule->temps[tmpx++] = i;
6212 offset -= GET_MODE_SIZE (mode);
6215 entry->offset = offset;
6218 if (align && current_function_interrupt)
6219 for (i = LAST_TARGET_REG; i >= FIRST_TARGET_REG; i--)
6220 if (TEST_HARD_REG_BIT (*live_regs_mask, i))
6222 offset -= GET_MODE_SIZE (DImode);
6224 entry->mode = DImode;
6225 entry->offset = offset;
6230 entry->mode = VOIDmode;
6231 entry->offset = offset;
6232 schedule->temps[tmpx] = -1;
6237 sh_expand_prologue (void)
6239 HARD_REG_SET live_regs_mask;
6242 int save_flags = target_flags;
6245 = lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl));
6247 current_function_interrupt = sh_cfun_interrupt_handler_p ();
6249 /* We have pretend args if we had an object sent partially in registers
6250 and partially on the stack, e.g. a large structure. */
6251 pretend_args = crtl->args.pretend_args_size;
6252 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl)
6253 && (NPARM_REGS(SImode)
6254 > crtl->args.info.arg_count[(int) SH_ARG_INT]))
6256 output_stack_adjust (-pretend_args
6257 - crtl->args.info.stack_regs * 8,
6258 stack_pointer_rtx, 0, NULL);
6260 if (TARGET_SHCOMPACT && flag_pic && crtl->args.info.call_cookie)
6261 /* We're going to use the PIC register to load the address of the
6262 incoming-argument decoder and/or of the return trampoline from
6263 the GOT, so make sure the PIC register is preserved and
6265 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
6267 if (TARGET_SHCOMPACT
6268 && (crtl->args.info.call_cookie & ~ CALL_COOKIE_RET_TRAMP(1)))
6272 /* First, make all registers with incoming arguments that will
6273 be pushed onto the stack live, so that register renaming
6274 doesn't overwrite them. */
6275 for (reg = 0; reg < NPARM_REGS (SImode); reg++)
6276 if (CALL_COOKIE_STACKSEQ_GET (crtl->args.info.call_cookie)
6277 >= NPARM_REGS (SImode) - reg)
6278 for (; reg < NPARM_REGS (SImode); reg++)
6279 emit_insn (gen_shcompact_preserve_incoming_args
6280 (gen_rtx_REG (SImode, FIRST_PARM_REG + reg)));
6281 else if (CALL_COOKIE_INT_REG_GET
6282 (crtl->args.info.call_cookie, reg) == 1)
6283 emit_insn (gen_shcompact_preserve_incoming_args
6284 (gen_rtx_REG (SImode, FIRST_PARM_REG + reg)));
6286 emit_move_insn (gen_rtx_REG (Pmode, MACL_REG),
6288 emit_move_insn (gen_rtx_REG (SImode, R0_REG),
6289 GEN_INT (crtl->args.info.call_cookie));
6290 emit_move_insn (gen_rtx_REG (SImode, MACH_REG),
6291 gen_rtx_REG (SImode, R0_REG));
6293 else if (TARGET_SHMEDIA)
6295 int tr = sh_media_register_for_return ();
6298 emit_move_insn (gen_rtx_REG (DImode, tr),
6299 gen_rtx_REG (DImode, PR_MEDIA_REG));
6302 /* Emit the code for SETUP_VARARGS. */
6305 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
6307 /* Push arg regs as if they'd been provided by caller in stack. */
6308 for (i = 0; i < NPARM_REGS(SImode); i++)
6310 int rn = NPARM_REGS(SImode) + FIRST_PARM_REG - i - 1;
6313 if (i >= (NPARM_REGS(SImode)
6314 - crtl->args.info.arg_count[(int) SH_ARG_INT]
6322 /* If we're supposed to switch stacks at function entry, do so now. */
6325 /* The argument specifies a variable holding the address of the
6326 stack the interrupt function should switch to/from at entry/exit. */
6328 = ggc_strdup (TREE_STRING_POINTER (TREE_VALUE (sp_switch_attr)));
6329 rtx sp_switch = gen_rtx_SYMBOL_REF (Pmode, s);
6331 emit_insn (gen_sp_switch_1 (sp_switch));
6334 d = calc_live_regs (&live_regs_mask);
6335 /* ??? Maybe we could save some switching if we can move a mode switch
6336 that already happens to be at the function start into the prologue. */
6337 if (target_flags != save_flags && ! current_function_interrupt)
6338 emit_insn (gen_toggle_sz ());
6342 int offset_base, offset;
6344 int offset_in_r0 = -1;
6346 int tregs_space = shmedia_target_regs_stack_adjust (&live_regs_mask);
6347 int total_size, save_size;
6348 save_schedule schedule;
6352 if (call_really_used_regs[R0_REG] && ! fixed_regs[R0_REG]
6353 && ! current_function_interrupt)
6354 r0 = gen_rtx_REG (Pmode, R0_REG);
6356 /* D is the actual number of bytes that we need for saving registers,
6357 however, in initial_elimination_offset we have committed to using
6358 an additional TREGS_SPACE amount of bytes - in order to keep both
6359 addresses to arguments supplied by the caller and local variables
6360 valid, we must keep this gap. Place it between the incoming
6361 arguments and the actually saved registers in a bid to optimize
6362 locality of reference. */
6363 total_size = d + tregs_space;
6364 total_size += rounded_frame_size (total_size);
6365 save_size = total_size - rounded_frame_size (d);
6366 if (save_size % (STACK_BOUNDARY / BITS_PER_UNIT))
6367 d_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
6368 - save_size % (STACK_BOUNDARY / BITS_PER_UNIT));
6370 /* If adjusting the stack in a single step costs nothing extra, do so.
6371 I.e. either if a single addi is enough, or we need a movi anyway,
6372 and we don't exceed the maximum offset range (the test for the
6373 latter is conservative for simplicity). */
6375 && (CONST_OK_FOR_I10 (-total_size)
6376 || (! CONST_OK_FOR_I10 (-(save_size + d_rounding))
6377 && total_size <= 2044)))
6378 d_rounding = total_size - save_size;
6380 offset_base = d + d_rounding;
6382 output_stack_adjust (-(save_size + d_rounding), stack_pointer_rtx,
6385 sh5_schedule_saves (&live_regs_mask, &schedule, offset_base);
6386 tmp_pnt = schedule.temps;
6387 for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
6389 enum machine_mode mode = entry->mode;
6390 unsigned int reg = entry->reg;
6391 rtx reg_rtx, mem_rtx, pre_dec = NULL_RTX;
6394 offset = entry->offset;
6396 reg_rtx = gen_rtx_REG (mode, reg);
6398 mem_rtx = gen_frame_mem (mode,
6399 gen_rtx_PLUS (Pmode,
6403 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_pre_dec);
6410 if (HAVE_PRE_DECREMENT
6411 && (offset_in_r0 - offset == GET_MODE_SIZE (mode)
6412 || mem_rtx == NULL_RTX
6413 || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
6415 pre_dec = gen_frame_mem (mode, gen_rtx_PRE_DEC (Pmode, r0));
6417 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (pre_dec, 0),
6426 offset += GET_MODE_SIZE (mode);
6430 if (mem_rtx != NULL_RTX)
6433 if (offset_in_r0 == -1)
6435 emit_move_insn (r0, GEN_INT (offset));
6436 offset_in_r0 = offset;
6438 else if (offset != offset_in_r0)
6443 GEN_INT (offset - offset_in_r0)));
6444 offset_in_r0 += offset - offset_in_r0;
6447 if (pre_dec != NULL_RTX)
6453 (Pmode, r0, stack_pointer_rtx));
6457 offset -= GET_MODE_SIZE (mode);
6458 offset_in_r0 -= GET_MODE_SIZE (mode);
6463 mem_rtx = gen_frame_mem (mode, r0);
6465 mem_rtx = gen_frame_mem (mode,
6466 gen_rtx_PLUS (Pmode,
6470 /* We must not use an r0-based address for target-branch
6471 registers or for special registers without pre-dec
6472 memory addresses, since we store their values in r0
6474 gcc_assert (!TARGET_REGISTER_P (reg)
6475 && ((reg != PR_REG && !SPECIAL_REGISTER_P (reg))
6476 || mem_rtx == pre_dec));
6479 orig_reg_rtx = reg_rtx;
6480 if (TARGET_REGISTER_P (reg)
6481 || ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
6482 && mem_rtx != pre_dec))
6484 rtx tmp_reg = gen_rtx_REG (GET_MODE (reg_rtx), *tmp_pnt);
6486 emit_move_insn (tmp_reg, reg_rtx);
6488 if (REGNO (tmp_reg) == R0_REG)
6492 gcc_assert (!refers_to_regno_p
6493 (R0_REG, R0_REG+1, mem_rtx, (rtx *) 0));
6496 if (*++tmp_pnt <= 0)
6497 tmp_pnt = schedule.temps;
6504 /* Mark as interesting for dwarf cfi generator */
6505 insn = emit_move_insn (mem_rtx, reg_rtx);
6506 RTX_FRAME_RELATED_P (insn) = 1;
6507 /* If we use an intermediate register for the save, we can't
6508 describe this exactly in cfi as a copy of the to-be-saved
6509 register into the temporary register and then the temporary
6510 register on the stack, because the temporary register can
6511 have a different natural size than the to-be-saved register.
6512 Thus, we gloss over the intermediate copy and pretend we do
6513 a direct save from the to-be-saved register. */
6514 if (REGNO (reg_rtx) != reg)
6518 set = gen_rtx_SET (VOIDmode, mem_rtx, orig_reg_rtx);
6519 note_rtx = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, set,
6521 REG_NOTES (insn) = note_rtx;
6524 if (TARGET_SHCOMPACT && (offset_in_r0 != -1))
6526 rtx reg_rtx = gen_rtx_REG (mode, reg);
6528 rtx mem_rtx = gen_frame_mem (mode,
6529 gen_rtx_PLUS (Pmode,
6533 set = gen_rtx_SET (VOIDmode, mem_rtx, reg_rtx);
6534 note_rtx = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, set,
6536 REG_NOTES (insn) = note_rtx;
6541 gcc_assert (entry->offset == d_rounding);
6544 push_regs (&live_regs_mask, current_function_interrupt);
6546 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
6547 emit_insn (gen_GOTaddr2picreg ());
6549 if (SHMEDIA_REGS_STACK_ADJUST ())
6551 /* This must NOT go through the PLT, otherwise mach and macl
6552 may be clobbered. */
6553 function_symbol (gen_rtx_REG (Pmode, R0_REG),
6555 ? "__GCC_push_shmedia_regs"
6556 : "__GCC_push_shmedia_regs_nofpu"), SFUNC_GOT);
6557 emit_insn (gen_shmedia_save_restore_regs_compact
6558 (GEN_INT (-SHMEDIA_REGS_STACK_ADJUST ())));
6561 if (target_flags != save_flags && ! current_function_interrupt)
6562 emit_insn (gen_toggle_sz ());
6564 target_flags = save_flags;
6566 output_stack_adjust (-rounded_frame_size (d) + d_rounding,
6567 stack_pointer_rtx, 0, NULL);
6569 if (frame_pointer_needed)
6570 frame_insn (GEN_MOV (hard_frame_pointer_rtx, stack_pointer_rtx));
6572 if (TARGET_SHCOMPACT
6573 && (crtl->args.info.call_cookie & ~ CALL_COOKIE_RET_TRAMP(1)))
6575 /* This must NOT go through the PLT, otherwise mach and macl
6576 may be clobbered. */
6577 function_symbol (gen_rtx_REG (Pmode, R0_REG),
6578 "__GCC_shcompact_incoming_args", SFUNC_GOT);
6579 emit_insn (gen_shcompact_incoming_args ());
6584 sh_expand_epilogue (bool sibcall_p)
6586 HARD_REG_SET live_regs_mask;
6590 int save_flags = target_flags;
6591 int frame_size, save_size;
6592 int fpscr_deferred = 0;
6593 int e = sibcall_p ? -1 : 1;
6595 d = calc_live_regs (&live_regs_mask);
6598 frame_size = rounded_frame_size (d);
6602 int tregs_space = shmedia_target_regs_stack_adjust (&live_regs_mask);
6604 if (d % (STACK_BOUNDARY / BITS_PER_UNIT))
6605 d_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
6606 - d % (STACK_BOUNDARY / BITS_PER_UNIT));
6608 total_size = d + tregs_space;
6609 total_size += rounded_frame_size (total_size);
6610 save_size = total_size - frame_size;
6612 /* If adjusting the stack in a single step costs nothing extra, do so.
6613 I.e. either if a single addi is enough, or we need a movi anyway,
6614 and we don't exceed the maximum offset range (the test for the
6615 latter is conservative for simplicity). */
6617 && ! frame_pointer_needed
6618 && (CONST_OK_FOR_I10 (total_size)
6619 || (! CONST_OK_FOR_I10 (save_size + d_rounding)
6620 && total_size <= 2044)))
6621 d_rounding = frame_size;
6623 frame_size -= d_rounding;
6626 if (frame_pointer_needed)
6628 /* We must avoid scheduling the epilogue with previous basic blocks
6629 when exception handling is enabled. See PR/18032. */
6630 if (flag_exceptions)
6631 emit_insn (gen_blockage ());
6632 output_stack_adjust (frame_size, hard_frame_pointer_rtx, e,
6635 /* We must avoid moving the stack pointer adjustment past code
6636 which reads from the local frame, else an interrupt could
6637 occur after the SP adjustment and clobber data in the local
6639 emit_insn (gen_blockage ());
6640 emit_insn (GEN_MOV (stack_pointer_rtx, hard_frame_pointer_rtx));
6642 else if (frame_size)
6644 /* We must avoid moving the stack pointer adjustment past code
6645 which reads from the local frame, else an interrupt could
6646 occur after the SP adjustment and clobber data in the local
6648 emit_insn (gen_blockage ());
6649 output_stack_adjust (frame_size, stack_pointer_rtx, e, &live_regs_mask);
6652 if (SHMEDIA_REGS_STACK_ADJUST ())
6654 function_symbol (gen_rtx_REG (Pmode, R0_REG),
6656 ? "__GCC_pop_shmedia_regs"
6657 : "__GCC_pop_shmedia_regs_nofpu"), SFUNC_GOT);
6658 /* This must NOT go through the PLT, otherwise mach and macl
6659 may be clobbered. */
6660 emit_insn (gen_shmedia_save_restore_regs_compact
6661 (GEN_INT (SHMEDIA_REGS_STACK_ADJUST ())));
6664 /* Pop all the registers. */
6666 if (target_flags != save_flags && ! current_function_interrupt)
6667 emit_insn (gen_toggle_sz ());
6670 int offset_base, offset;
6671 int offset_in_r0 = -1;
6673 rtx r0 = gen_rtx_REG (Pmode, R0_REG);
6674 save_schedule schedule;
6678 entry = sh5_schedule_saves (&live_regs_mask, &schedule, d_rounding);
6679 offset_base = -entry[1].offset + d_rounding;
6680 tmp_pnt = schedule.temps;
6681 for (; entry->mode != VOIDmode; entry--)
6683 enum machine_mode mode = entry->mode;
6684 int reg = entry->reg;
6685 rtx reg_rtx, mem_rtx, post_inc = NULL_RTX, insn;
6687 offset = offset_base + entry->offset;
6688 reg_rtx = gen_rtx_REG (mode, reg);
6690 mem_rtx = gen_frame_mem (mode,
6691 gen_rtx_PLUS (Pmode,
6695 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_post_inc);
6701 if (HAVE_POST_INCREMENT
6702 && (offset == offset_in_r0
6703 || (offset + GET_MODE_SIZE (mode) != d + d_rounding
6704 && mem_rtx == NULL_RTX)
6705 || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
6707 post_inc = gen_frame_mem (mode, gen_rtx_POST_INC (Pmode, r0));
6709 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (post_inc, 0),
6712 post_inc = NULL_RTX;
6721 if (mem_rtx != NULL_RTX)
6724 if (offset_in_r0 == -1)
6726 emit_move_insn (r0, GEN_INT (offset));
6727 offset_in_r0 = offset;
6729 else if (offset != offset_in_r0)
6734 GEN_INT (offset - offset_in_r0)));
6735 offset_in_r0 += offset - offset_in_r0;
6738 if (post_inc != NULL_RTX)
6744 (Pmode, r0, stack_pointer_rtx));
6750 offset_in_r0 += GET_MODE_SIZE (mode);
6753 mem_rtx = gen_frame_mem (mode, r0);
6755 mem_rtx = gen_frame_mem (mode,
6756 gen_rtx_PLUS (Pmode,
6760 gcc_assert ((reg != PR_REG && !SPECIAL_REGISTER_P (reg))
6761 || mem_rtx == post_inc);
6764 if ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
6765 && mem_rtx != post_inc)
6767 insn = emit_move_insn (r0, mem_rtx);
6770 else if (TARGET_REGISTER_P (reg))
6772 rtx tmp_reg = gen_rtx_REG (mode, *tmp_pnt);
6774 /* Give the scheduler a bit of freedom by using up to
6775 MAX_TEMPS registers in a round-robin fashion. */
6776 insn = emit_move_insn (tmp_reg, mem_rtx);
6779 tmp_pnt = schedule.temps;
6782 insn = emit_move_insn (reg_rtx, mem_rtx);
6785 gcc_assert (entry->offset + offset_base == d + d_rounding);
6787 else /* ! TARGET_SH5 */
6792 /* For an ISR with RESBANK attribute assigned, don't pop PR
6794 if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG)
6795 && !sh_cfun_resbank_handler_p ())
6797 if (!frame_pointer_needed)
6798 emit_insn (gen_blockage ());
6802 /* Banked registers are poped first to avoid being scheduled in the
6803 delay slot. RTE switches banks before the ds instruction. */
6804 if (current_function_interrupt)
6806 for (i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
6807 if (TEST_HARD_REG_BIT (live_regs_mask, i))
6808 pop (LAST_BANKED_REG - i);
6810 last_reg = FIRST_PSEUDO_REGISTER - LAST_BANKED_REG - 1;
6813 last_reg = FIRST_PSEUDO_REGISTER;
6815 for (i = 0; i < last_reg; i++)
6817 int j = (FIRST_PSEUDO_REGISTER - 1) - i;
6819 if (j == FPSCR_REG && current_function_interrupt && TARGET_FMOVD
6820 && hard_reg_set_intersect_p (live_regs_mask,
6821 reg_class_contents[DF_REGS]))
6823 /* For an ISR with RESBANK attribute assigned, don't pop
6824 following registers, R0-R14, MACH, MACL and GBR. */
6825 else if (j != PR_REG && TEST_HARD_REG_BIT (live_regs_mask, j)
6826 && ! (sh_cfun_resbank_handler_p ()
6827 && ((j >= FIRST_GENERAL_REG
6828 && j < LAST_GENERAL_REG)
6834 if (j == FIRST_FP_REG && fpscr_deferred)
6838 if (target_flags != save_flags && ! current_function_interrupt)
6839 emit_insn (gen_toggle_sz ());
6840 target_flags = save_flags;
6842 output_stack_adjust (crtl->args.pretend_args_size
6843 + save_size + d_rounding
6844 + crtl->args.info.stack_regs * 8,
6845 stack_pointer_rtx, e, NULL);
6847 if (crtl->calls_eh_return)
6848 emit_insn (GEN_ADD3 (stack_pointer_rtx, stack_pointer_rtx,
6849 EH_RETURN_STACKADJ_RTX));
6851 /* Switch back to the normal stack if necessary. */
6852 if (lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl)))
6853 emit_insn (gen_sp_switch_2 ());
6855 /* Tell flow the insn that pops PR isn't dead. */
6856 /* PR_REG will never be live in SHmedia mode, and we don't need to
6857 USE PR_MEDIA_REG, since it will be explicitly copied to TR0_REG
6858 by the return pattern. */
6859 if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG))
6860 emit_use (gen_rtx_REG (SImode, PR_REG));
6863 static int sh_need_epilogue_known = 0;
6866 sh_need_epilogue (void)
6868 if (! sh_need_epilogue_known)
6873 sh_expand_epilogue (0);
6874 epilogue = get_insns ();
6876 sh_need_epilogue_known = (epilogue == NULL ? -1 : 1);
6878 return sh_need_epilogue_known > 0;
6881 /* Emit code to change the current function's return address to RA.
6882 TEMP is available as a scratch register, if needed. */
6885 sh_set_return_address (rtx ra, rtx tmp)
6887 HARD_REG_SET live_regs_mask;
6889 int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
6892 d = calc_live_regs (&live_regs_mask);
6894 /* If pr_reg isn't life, we can set it (or the register given in
6895 sh_media_register_for_return) directly. */
6896 if (! TEST_HARD_REG_BIT (live_regs_mask, pr_reg))
6902 int rr_regno = sh_media_register_for_return ();
6907 rr = gen_rtx_REG (DImode, rr_regno);
6910 rr = gen_rtx_REG (SImode, pr_reg);
6912 emit_insn (GEN_MOV (rr, ra));
6913 /* Tell flow the register for return isn't dead. */
6921 save_schedule schedule;
6924 entry = sh5_schedule_saves (&live_regs_mask, &schedule, 0);
6925 offset = entry[1].offset;
6926 for (; entry->mode != VOIDmode; entry--)
6927 if (entry->reg == pr_reg)
6930 /* We can't find pr register. */
6934 offset = entry->offset - offset;
6935 pr_offset = (rounded_frame_size (d) + offset
6936 + SHMEDIA_REGS_STACK_ADJUST ());
6939 pr_offset = rounded_frame_size (d);
6941 emit_insn (GEN_MOV (tmp, GEN_INT (pr_offset)));
6942 emit_insn (GEN_ADD3 (tmp, tmp, hard_frame_pointer_rtx));
6944 tmp = gen_frame_mem (Pmode, tmp);
6945 emit_insn (GEN_MOV (tmp, ra));
6948 /* Clear variables at function end. */
6951 sh_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6952 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6954 sh_need_epilogue_known = 0;
6958 sh_builtin_saveregs (void)
6960 /* First unnamed integer register. */
6961 int first_intreg = crtl->args.info.arg_count[(int) SH_ARG_INT];
6962 /* Number of integer registers we need to save. */
6963 int n_intregs = MAX (0, NPARM_REGS (SImode) - first_intreg);
6964 /* First unnamed SFmode float reg */
6965 int first_floatreg = crtl->args.info.arg_count[(int) SH_ARG_FLOAT];
6966 /* Number of SFmode float regs to save. */
6967 int n_floatregs = MAX (0, NPARM_REGS (SFmode) - first_floatreg);
6970 alias_set_type alias_set;
6976 int pushregs = n_intregs;
6978 while (pushregs < NPARM_REGS (SImode) - 1
6979 && (CALL_COOKIE_INT_REG_GET
6980 (crtl->args.info.call_cookie,
6981 NPARM_REGS (SImode) - pushregs)
6984 crtl->args.info.call_cookie
6985 &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
6990 if (pushregs == NPARM_REGS (SImode))
6991 crtl->args.info.call_cookie
6992 |= (CALL_COOKIE_INT_REG (0, 1)
6993 | CALL_COOKIE_STACKSEQ (pushregs - 1));
6995 crtl->args.info.call_cookie
6996 |= CALL_COOKIE_STACKSEQ (pushregs);
6998 crtl->args.pretend_args_size += 8 * n_intregs;
7000 if (TARGET_SHCOMPACT)
7004 if (! TARGET_SH2E && ! TARGET_SH4 && ! TARGET_SH5)
7006 error ("__builtin_saveregs not supported by this subtarget");
7013 /* Allocate block of memory for the regs. */
7014 /* ??? If n_intregs + n_floatregs == 0, should we allocate at least 1 byte?
7015 Or can assign_stack_local accept a 0 SIZE argument? */
7016 bufsize = (n_intregs * UNITS_PER_WORD) + (n_floatregs * UNITS_PER_WORD);
7019 regbuf = gen_frame_mem (BLKmode, gen_rtx_REG (Pmode, ARG_POINTER_REGNUM));
7020 else if (n_floatregs & 1)
7024 regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
7025 addr = copy_to_mode_reg (Pmode, XEXP (regbuf, 0));
7026 emit_insn (gen_iorsi3 (addr, addr, GEN_INT (UNITS_PER_WORD)));
7027 regbuf = change_address (regbuf, BLKmode, addr);
7029 else if (STACK_BOUNDARY < 64 && TARGET_FPU_DOUBLE && n_floatregs)
7033 regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
7034 addr = copy_to_mode_reg (Pmode, plus_constant (XEXP (regbuf, 0), 4));
7035 mask = copy_to_mode_reg (Pmode, GEN_INT (-8));
7036 emit_insn (gen_andsi3 (addr, addr, mask));
7037 regbuf = change_address (regbuf, BLKmode, addr);
7040 regbuf = assign_stack_local (BLKmode, bufsize, TARGET_FPU_DOUBLE ? 64 : 0);
7041 alias_set = get_varargs_alias_set ();
7042 set_mem_alias_set (regbuf, alias_set);
7045 This is optimized to only save the regs that are necessary. Explicitly
7046 named args need not be saved. */
7048 move_block_from_reg (BASE_ARG_REG (SImode) + first_intreg,
7049 adjust_address (regbuf, BLKmode,
7050 n_floatregs * UNITS_PER_WORD),
7054 /* Return the address of the regbuf. */
7055 return XEXP (regbuf, 0);
7058 This is optimized to only save the regs that are necessary. Explicitly
7059 named args need not be saved.
7060 We explicitly build a pointer to the buffer because it halves the insn
7061 count when not optimizing (otherwise the pointer is built for each reg
7063 We emit the moves in reverse order so that we can use predecrement. */
7065 fpregs = copy_to_mode_reg (Pmode,
7066 plus_constant (XEXP (regbuf, 0),
7067 n_floatregs * UNITS_PER_WORD));
7068 if (TARGET_SH4 || TARGET_SH2A_DOUBLE)
7071 for (regno = NPARM_REGS (DFmode) - 2; regno >= first_floatreg; regno -= 2)
7073 emit_insn (gen_addsi3 (fpregs, fpregs,
7074 GEN_INT (-2 * UNITS_PER_WORD)));
7075 mem = change_address (regbuf, DFmode, fpregs);
7076 emit_move_insn (mem,
7077 gen_rtx_REG (DFmode, BASE_ARG_REG (DFmode) + regno));
7079 regno = first_floatreg;
7082 emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
7083 mem = change_address (regbuf, SFmode, fpregs);
7084 emit_move_insn (mem,
7085 gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno
7086 - (TARGET_LITTLE_ENDIAN != 0)));
7090 for (regno = NPARM_REGS (SFmode) - 1; regno >= first_floatreg; regno--)
7094 emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
7095 mem = change_address (regbuf, SFmode, fpregs);
7096 emit_move_insn (mem,
7097 gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno));
7100 /* Return the address of the regbuf. */
7101 return XEXP (regbuf, 0);
7104 /* Define the `__builtin_va_list' type for the ABI. */
7107 sh_build_builtin_va_list (void)
7109 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7112 if (TARGET_SH5 || (! TARGET_SH2E && ! TARGET_SH4)
7113 || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
7114 return ptr_type_node;
7116 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
7118 f_next_o = build_decl (FIELD_DECL, get_identifier ("__va_next_o"),
7120 f_next_o_limit = build_decl (FIELD_DECL,
7121 get_identifier ("__va_next_o_limit"),
7123 f_next_fp = build_decl (FIELD_DECL, get_identifier ("__va_next_fp"),
7125 f_next_fp_limit = build_decl (FIELD_DECL,
7126 get_identifier ("__va_next_fp_limit"),
7128 f_next_stack = build_decl (FIELD_DECL, get_identifier ("__va_next_stack"),
7131 DECL_FIELD_CONTEXT (f_next_o) = record;
7132 DECL_FIELD_CONTEXT (f_next_o_limit) = record;
7133 DECL_FIELD_CONTEXT (f_next_fp) = record;
7134 DECL_FIELD_CONTEXT (f_next_fp_limit) = record;
7135 DECL_FIELD_CONTEXT (f_next_stack) = record;
7137 TYPE_FIELDS (record) = f_next_o;
7138 TREE_CHAIN (f_next_o) = f_next_o_limit;
7139 TREE_CHAIN (f_next_o_limit) = f_next_fp;
7140 TREE_CHAIN (f_next_fp) = f_next_fp_limit;
7141 TREE_CHAIN (f_next_fp_limit) = f_next_stack;
7143 layout_type (record);
7148 /* Implement `va_start' for varargs and stdarg. */
7151 sh_va_start (tree valist, rtx nextarg)
7153 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7154 tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
7160 expand_builtin_saveregs ();
7161 std_expand_builtin_va_start (valist, nextarg);
7165 if ((! TARGET_SH2E && ! TARGET_SH4)
7166 || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
7168 std_expand_builtin_va_start (valist, nextarg);
7172 f_next_o = TYPE_FIELDS (va_list_type_node);
7173 f_next_o_limit = TREE_CHAIN (f_next_o);
7174 f_next_fp = TREE_CHAIN (f_next_o_limit);
7175 f_next_fp_limit = TREE_CHAIN (f_next_fp);
7176 f_next_stack = TREE_CHAIN (f_next_fp_limit);
7178 next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
7180 next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
7181 valist, f_next_o_limit, NULL_TREE);
7182 next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp), valist, f_next_fp,
7184 next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
7185 valist, f_next_fp_limit, NULL_TREE);
7186 next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
7187 valist, f_next_stack, NULL_TREE);
7189 /* Call __builtin_saveregs. */
7190 u = make_tree (sizetype, expand_builtin_saveregs ());
7191 u = fold_convert (ptr_type_node, u);
7192 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp, u);
7193 TREE_SIDE_EFFECTS (t) = 1;
7194 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7196 nfp = crtl->args.info.arg_count[SH_ARG_FLOAT];
7201 u = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, u,
7202 size_int (UNITS_PER_WORD * nfp));
7203 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp_limit, u);
7204 TREE_SIDE_EFFECTS (t) = 1;
7205 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7207 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_o, u);
7208 TREE_SIDE_EFFECTS (t) = 1;
7209 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7211 nint = crtl->args.info.arg_count[SH_ARG_INT];
7216 u = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, u,
7217 size_int (UNITS_PER_WORD * nint));
7218 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_o_limit, u);
7219 TREE_SIDE_EFFECTS (t) = 1;
7220 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7222 u = make_tree (ptr_type_node, nextarg);
7223 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_stack, u);
7224 TREE_SIDE_EFFECTS (t) = 1;
7225 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7228 /* TYPE is a RECORD_TYPE. If there is only a single nonzero-sized
7229 member, return it. */
7231 find_sole_member (tree type)
7233 tree field, member = NULL_TREE;
7235 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
7237 if (TREE_CODE (field) != FIELD_DECL)
7239 if (!DECL_SIZE (field))
7241 if (integer_zerop (DECL_SIZE (field)))
7249 /* Implement `va_arg'. */
7252 sh_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
7253 tree *post_p ATTRIBUTE_UNUSED)
7255 HOST_WIDE_INT size, rsize;
7256 tree tmp, pptr_type_node;
7257 tree addr, lab_over = NULL, result = NULL;
7258 int pass_by_ref = targetm.calls.must_pass_in_stack (TYPE_MODE (type), type);
7262 type = build_pointer_type (type);
7264 size = int_size_in_bytes (type);
7265 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
7266 pptr_type_node = build_pointer_type (ptr_type_node);
7268 if (! TARGET_SH5 && (TARGET_SH2E || TARGET_SH4)
7269 && ! (TARGET_HITACHI || sh_cfun_attr_renesas_p ()))
7271 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7272 tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
7277 f_next_o = TYPE_FIELDS (va_list_type_node);
7278 f_next_o_limit = TREE_CHAIN (f_next_o);
7279 f_next_fp = TREE_CHAIN (f_next_o_limit);
7280 f_next_fp_limit = TREE_CHAIN (f_next_fp);
7281 f_next_stack = TREE_CHAIN (f_next_fp_limit);
7283 next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
7285 next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
7286 valist, f_next_o_limit, NULL_TREE);
7287 next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp),
7288 valist, f_next_fp, NULL_TREE);
7289 next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
7290 valist, f_next_fp_limit, NULL_TREE);
7291 next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
7292 valist, f_next_stack, NULL_TREE);
7294 /* Structures with a single member with a distinct mode are passed
7295 like their member. This is relevant if the latter has a REAL_TYPE
7296 or COMPLEX_TYPE type. */
7298 while (TREE_CODE (eff_type) == RECORD_TYPE
7299 && (member = find_sole_member (eff_type))
7300 && (TREE_CODE (TREE_TYPE (member)) == REAL_TYPE
7301 || TREE_CODE (TREE_TYPE (member)) == COMPLEX_TYPE
7302 || TREE_CODE (TREE_TYPE (member)) == RECORD_TYPE))
7304 tree field_type = TREE_TYPE (member);
7306 if (TYPE_MODE (eff_type) == TYPE_MODE (field_type))
7307 eff_type = field_type;
7310 gcc_assert ((TYPE_ALIGN (eff_type)
7311 < GET_MODE_ALIGNMENT (TYPE_MODE (field_type)))
7312 || (TYPE_ALIGN (eff_type)
7313 > GET_MODE_BITSIZE (TYPE_MODE (field_type))));
7318 if (TARGET_SH4 || TARGET_SH2A_DOUBLE)
7320 pass_as_float = ((TREE_CODE (eff_type) == REAL_TYPE && size <= 8)
7321 || (TREE_CODE (eff_type) == COMPLEX_TYPE
7322 && TREE_CODE (TREE_TYPE (eff_type)) == REAL_TYPE
7327 pass_as_float = (TREE_CODE (eff_type) == REAL_TYPE && size == 4);
7330 addr = create_tmp_var (pptr_type_node, NULL);
7331 lab_false = create_artificial_label ();
7332 lab_over = create_artificial_label ();
7334 valist = build1 (INDIRECT_REF, ptr_type_node, addr);
7338 tree next_fp_tmp = create_tmp_var (TREE_TYPE (f_next_fp), NULL);
7340 bool is_double = size == 8 && TREE_CODE (eff_type) == REAL_TYPE;
7342 tmp = build1 (ADDR_EXPR, pptr_type_node, next_fp);
7343 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
7344 gimplify_and_add (tmp, pre_p);
7346 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp_tmp, valist);
7347 gimplify_and_add (tmp, pre_p);
7348 tmp = next_fp_limit;
7349 if (size > 4 && !is_double)
7350 tmp = build2 (POINTER_PLUS_EXPR, TREE_TYPE (tmp), tmp,
7351 size_int (4 - size));
7352 tmp = build2 (GE_EXPR, boolean_type_node, next_fp_tmp, tmp);
7353 cmp = build3 (COND_EXPR, void_type_node, tmp,
7354 build1 (GOTO_EXPR, void_type_node, lab_false),
7357 gimplify_and_add (cmp, pre_p);
7359 if (TYPE_ALIGN (eff_type) > BITS_PER_WORD
7360 || (is_double || size == 16))
7362 tmp = fold_convert (sizetype, next_fp_tmp);
7363 tmp = build2 (BIT_AND_EXPR, sizetype, tmp,
7364 size_int (UNITS_PER_WORD));
7365 tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node,
7367 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node,
7369 gimplify_and_add (tmp, pre_p);
7372 gimplify_and_add (cmp, pre_p);
7374 #ifdef FUNCTION_ARG_SCmode_WART
7375 if (TYPE_MODE (eff_type) == SCmode
7376 && TARGET_SH4 && TARGET_LITTLE_ENDIAN)
7378 tree subtype = TREE_TYPE (eff_type);
7382 = std_gimplify_va_arg_expr (next_fp_tmp, subtype, pre_p, NULL);
7383 imag = get_initialized_tmp_var (imag, pre_p, NULL);
7386 = std_gimplify_va_arg_expr (next_fp_tmp, subtype, pre_p, NULL);
7387 real = get_initialized_tmp_var (real, pre_p, NULL);
7389 result = build2 (COMPLEX_EXPR, type, real, imag);
7390 result = get_initialized_tmp_var (result, pre_p, NULL);
7392 #endif /* FUNCTION_ARG_SCmode_WART */
7394 tmp = build1 (GOTO_EXPR, void_type_node, lab_over);
7395 gimplify_and_add (tmp, pre_p);
7397 tmp = build1 (LABEL_EXPR, void_type_node, lab_false);
7398 gimplify_and_add (tmp, pre_p);
7400 tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
7401 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
7402 gimplify_and_add (tmp, pre_p);
7403 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp_tmp, valist);
7404 gimplify_and_add (tmp, pre_p);
7406 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, valist, next_fp_tmp);
7407 gimplify_and_add (tmp, post_p);
7408 valist = next_fp_tmp;
7412 tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node, next_o,
7414 tmp = build2 (GT_EXPR, boolean_type_node, tmp, next_o_limit);
7415 tmp = build3 (COND_EXPR, void_type_node, tmp,
7416 build1 (GOTO_EXPR, void_type_node, lab_false),
7418 gimplify_and_add (tmp, pre_p);
7420 tmp = build1 (ADDR_EXPR, pptr_type_node, next_o);
7421 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
7422 gimplify_and_add (tmp, pre_p);
7424 tmp = build1 (GOTO_EXPR, void_type_node, lab_over);
7425 gimplify_and_add (tmp, pre_p);
7427 tmp = build1 (LABEL_EXPR, void_type_node, lab_false);
7428 gimplify_and_add (tmp, pre_p);
7430 if (size > 4 && ! (TARGET_SH4 || TARGET_SH2A))
7432 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node,
7433 next_o, next_o_limit);
7434 gimplify_and_add (tmp, pre_p);
7437 tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
7438 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
7439 gimplify_and_add (tmp, pre_p);
7444 tmp = build1 (LABEL_EXPR, void_type_node, lab_over);
7445 gimplify_and_add (tmp, pre_p);
7449 /* ??? In va-sh.h, there had been code to make values larger than
7450 size 8 indirect. This does not match the FUNCTION_ARG macros. */
7452 tmp = std_gimplify_va_arg_expr (valist, type, pre_p, NULL);
7455 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, result, tmp);
7456 gimplify_and_add (tmp, pre_p);
7458 tmp = build1 (LABEL_EXPR, void_type_node, lab_over);
7459 gimplify_and_add (tmp, pre_p);
7465 result = build_va_arg_indirect_ref (result);
7471 sh_promote_prototypes (const_tree type)
7477 return ! sh_attr_renesas_p (type);
7480 /* Whether an argument must be passed by reference. On SHcompact, we
7481 pretend arguments wider than 32-bits that would have been passed in
7482 registers are passed by reference, so that an SHmedia trampoline
7483 loads them into the full 64-bits registers. */
7486 shcompact_byref (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
7487 const_tree type, bool named)
7489 unsigned HOST_WIDE_INT size;
7492 size = int_size_in_bytes (type);
7494 size = GET_MODE_SIZE (mode);
7496 if (cum->arg_count[SH_ARG_INT] < NPARM_REGS (SImode)
7498 || GET_SH_ARG_CLASS (mode) == SH_ARG_INT
7499 || (GET_SH_ARG_CLASS (mode) == SH_ARG_FLOAT
7500 && cum->arg_count[SH_ARG_FLOAT] >= NPARM_REGS (SFmode)))
7502 && !SHCOMPACT_FORCE_ON_STACK (mode, type)
7503 && !SH5_WOULD_BE_PARTIAL_NREGS (*cum, mode, type, named))
7510 sh_pass_by_reference (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7511 const_tree type, bool named)
7513 if (targetm.calls.must_pass_in_stack (mode, type))
7516 /* ??? std_gimplify_va_arg_expr passes NULL for cum. That function
7517 wants to know about pass-by-reference semantics for incoming
7522 if (TARGET_SHCOMPACT)
7524 cum->byref = shcompact_byref (cum, mode, type, named);
7525 return cum->byref != 0;
7532 sh_callee_copies (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7533 const_tree type, bool named ATTRIBUTE_UNUSED)
7535 /* ??? How can it possibly be correct to return true only on the
7536 caller side of the equation? Is there someplace else in the
7537 sh backend that's magically producing the copies? */
7538 return (cum->outgoing
7539 && ((mode == BLKmode ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode))
7540 % SH_MIN_ALIGN_FOR_CALLEE_COPY == 0));
7544 sh_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7545 tree type, bool named ATTRIBUTE_UNUSED)
7550 && PASS_IN_REG_P (*cum, mode, type)
7551 && !(TARGET_SH4 || TARGET_SH2A_DOUBLE)
7552 && (ROUND_REG (*cum, mode)
7554 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
7555 : ROUND_ADVANCE (int_size_in_bytes (type)))
7556 > NPARM_REGS (mode)))
7557 words = NPARM_REGS (mode) - ROUND_REG (*cum, mode);
7559 else if (!TARGET_SHCOMPACT
7560 && SH5_WOULD_BE_PARTIAL_NREGS (*cum, mode, type, named))
7561 words = NPARM_REGS (SImode) - cum->arg_count[SH_ARG_INT];
7563 return words * UNITS_PER_WORD;
7567 /* Define where to put the arguments to a function.
7568 Value is zero to push the argument on the stack,
7569 or a hard register in which to store the argument.
7571 MODE is the argument's machine mode.
7572 TYPE is the data type of the argument (as a tree).
7573 This is null for libcalls where that information may
7575 CUM is a variable of type CUMULATIVE_ARGS which gives info about
7576 the preceding args and about the function being called.
7577 NAMED is nonzero if this argument is a named parameter
7578 (otherwise it is an extra parameter matching an ellipsis).
7580 On SH the first args are normally in registers
7581 and the rest are pushed. Any arg that starts within the first
7582 NPARM_REGS words is at least partially passed in a register unless
7583 its data type forbids. */
7587 sh_function_arg (CUMULATIVE_ARGS *ca, enum machine_mode mode,
7588 tree type, int named)
7590 if (! TARGET_SH5 && mode == VOIDmode)
7591 return GEN_INT (ca->renesas_abi ? 1 : 0);
7594 && PASS_IN_REG_P (*ca, mode, type)
7595 && (named || ! (TARGET_HITACHI || ca->renesas_abi)))
7599 if (mode == SCmode && TARGET_SH4 && TARGET_LITTLE_ENDIAN
7600 && (! FUNCTION_ARG_SCmode_WART || (ROUND_REG (*ca, mode) & 1)))
7602 rtx r1 = gen_rtx_EXPR_LIST (VOIDmode,
7603 gen_rtx_REG (SFmode,
7605 + (ROUND_REG (*ca, mode) ^ 1)),
7607 rtx r2 = gen_rtx_EXPR_LIST (VOIDmode,
7608 gen_rtx_REG (SFmode,
7610 + ((ROUND_REG (*ca, mode) + 1) ^ 1)),
7612 return gen_rtx_PARALLEL(SCmode, gen_rtvec(2, r1, r2));
7615 /* If the alignment of a DF value causes an SF register to be
7616 skipped, we will use that skipped register for the next SF
7618 if ((TARGET_HITACHI || ca->renesas_abi)
7619 && ca->free_single_fp_reg
7621 return gen_rtx_REG (mode, ca->free_single_fp_reg);
7623 regno = (BASE_ARG_REG (mode) + ROUND_REG (*ca, mode))
7624 ^ (mode == SFmode && TARGET_SH4
7625 && TARGET_LITTLE_ENDIAN != 0
7626 && ! TARGET_HITACHI && ! ca->renesas_abi);
7627 return gen_rtx_REG (mode, regno);
7633 if (mode == VOIDmode && TARGET_SHCOMPACT)
7634 return GEN_INT (ca->call_cookie);
7636 /* The following test assumes unnamed arguments are promoted to
7638 if (mode == SFmode && ca->free_single_fp_reg)
7639 return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode, ca->free_single_fp_reg);
7641 if ((GET_SH_ARG_CLASS (mode) == SH_ARG_FLOAT)
7642 && (named || ! ca->prototype_p)
7643 && ca->arg_count[(int) SH_ARG_FLOAT] < NPARM_REGS (SFmode))
7645 if (! ca->prototype_p && TARGET_SHMEDIA)
7646 return SH5_PROTOTYPELESS_FLOAT_ARG (*ca, mode);
7648 return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode,
7650 + ca->arg_count[(int) SH_ARG_FLOAT]);
7653 if (ca->arg_count[(int) SH_ARG_INT] < NPARM_REGS (SImode)
7654 && (! TARGET_SHCOMPACT
7655 || (! SHCOMPACT_FORCE_ON_STACK (mode, type)
7656 && ! SH5_WOULD_BE_PARTIAL_NREGS (*ca, mode,
7659 return gen_rtx_REG (mode, (FIRST_PARM_REG
7660 + ca->arg_count[(int) SH_ARG_INT]));
7669 /* Update the data in CUM to advance over an argument
7670 of mode MODE and data type TYPE.
7671 (TYPE is null for libcalls where that information may not be
7675 sh_function_arg_advance (CUMULATIVE_ARGS *ca, enum machine_mode mode,
7676 tree type, int named)
7680 else if (TARGET_SH5)
7682 tree type2 = (ca->byref && type
7685 enum machine_mode mode2 = (ca->byref && type
7688 int dwords = ((ca->byref
7691 ? int_size_in_bytes (type2)
7692 : GET_MODE_SIZE (mode2)) + 7) / 8;
7693 int numregs = MIN (dwords, NPARM_REGS (SImode)
7694 - ca->arg_count[(int) SH_ARG_INT]);
7698 ca->arg_count[(int) SH_ARG_INT] += numregs;
7699 if (TARGET_SHCOMPACT
7700 && SHCOMPACT_FORCE_ON_STACK (mode2, type2))
7703 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
7705 /* N.B. We want this also for outgoing. */
7706 ca->stack_regs += numregs;
7711 ca->stack_regs += numregs;
7712 ca->byref_regs += numregs;
7716 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
7720 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
7723 else if (dwords > numregs)
7725 int pushregs = numregs;
7727 if (TARGET_SHCOMPACT)
7728 ca->stack_regs += numregs;
7729 while (pushregs < NPARM_REGS (SImode) - 1
7730 && (CALL_COOKIE_INT_REG_GET
7732 NPARM_REGS (SImode) - pushregs)
7736 &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
7740 if (numregs == NPARM_REGS (SImode))
7742 |= CALL_COOKIE_INT_REG (0, 1)
7743 | CALL_COOKIE_STACKSEQ (numregs - 1);
7746 |= CALL_COOKIE_STACKSEQ (numregs);
7749 if (GET_SH_ARG_CLASS (mode2) == SH_ARG_FLOAT
7750 && (named || ! ca->prototype_p))
7752 if (mode2 == SFmode && ca->free_single_fp_reg)
7753 ca->free_single_fp_reg = 0;
7754 else if (ca->arg_count[(int) SH_ARG_FLOAT]
7755 < NPARM_REGS (SFmode))
7758 = MIN ((GET_MODE_SIZE (mode2) + 7) / 8 * 2,
7760 - ca->arg_count[(int) SH_ARG_FLOAT]);
7762 ca->arg_count[(int) SH_ARG_FLOAT] += numfpregs;
7764 if (TARGET_SHCOMPACT && ! ca->prototype_p)
7766 if (ca->outgoing && numregs > 0)
7770 |= (CALL_COOKIE_INT_REG
7771 (ca->arg_count[(int) SH_ARG_INT]
7772 - numregs + ((numfpregs - 2) / 2),
7773 4 + (ca->arg_count[(int) SH_ARG_FLOAT]
7776 while (numfpregs -= 2);
7778 else if (mode2 == SFmode && (named)
7779 && (ca->arg_count[(int) SH_ARG_FLOAT]
7780 < NPARM_REGS (SFmode)))
7781 ca->free_single_fp_reg
7782 = FIRST_FP_PARM_REG - numfpregs
7783 + ca->arg_count[(int) SH_ARG_FLOAT] + 1;
7789 if ((TARGET_HITACHI || ca->renesas_abi) && TARGET_FPU_DOUBLE)
7791 /* Note that we've used the skipped register. */
7792 if (mode == SFmode && ca->free_single_fp_reg)
7794 ca->free_single_fp_reg = 0;
7797 /* When we have a DF after an SF, there's an SF register that get
7798 skipped in order to align the DF value. We note this skipped
7799 register, because the next SF value will use it, and not the
7800 SF that follows the DF. */
7802 && ROUND_REG (*ca, DFmode) != ROUND_REG (*ca, SFmode))
7804 ca->free_single_fp_reg = (ROUND_REG (*ca, SFmode)
7805 + BASE_ARG_REG (mode));
7809 if (! ((TARGET_SH4 || TARGET_SH2A) || ca->renesas_abi)
7810 || PASS_IN_REG_P (*ca, mode, type))
7811 (ca->arg_count[(int) GET_SH_ARG_CLASS (mode)]
7812 = (ROUND_REG (*ca, mode)
7814 ? ROUND_ADVANCE (int_size_in_bytes (type))
7815 : ROUND_ADVANCE (GET_MODE_SIZE (mode)))));
7818 /* The Renesas calling convention doesn't quite fit into this scheme since
7819 the address is passed like an invisible argument, but one that is always
7820 passed in memory. */
7822 sh_struct_value_rtx (tree fndecl, int incoming ATTRIBUTE_UNUSED)
7824 if (TARGET_HITACHI || sh_attr_renesas_p (fndecl))
7826 return gen_rtx_REG (Pmode, 2);
7829 /* Worker function for TARGET_RETURN_IN_MEMORY. */
7832 sh_return_in_memory (const_tree type, const_tree fndecl)
7836 if (TYPE_MODE (type) == BLKmode)
7837 return ((unsigned HOST_WIDE_INT) int_size_in_bytes (type)) > 8;
7839 return GET_MODE_SIZE (TYPE_MODE (type)) > 8;
7843 return (TYPE_MODE (type) == BLKmode
7844 || ((TARGET_HITACHI || sh_attr_renesas_p (fndecl))
7845 && TREE_CODE (type) == RECORD_TYPE));
7849 /* We actually emit the code in sh_expand_prologue. We used to use
7850 a static variable to flag that we need to emit this code, but that
7851 doesn't when inlining, when functions are deferred and then emitted
7852 later. Fortunately, we already have two flags that are part of struct
7853 function that tell if a function uses varargs or stdarg. */
7855 sh_setup_incoming_varargs (CUMULATIVE_ARGS *ca,
7856 enum machine_mode mode,
7858 int *pretend_arg_size,
7859 int second_time ATTRIBUTE_UNUSED)
7861 gcc_assert (cfun->stdarg);
7862 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
7864 int named_parm_regs, anon_parm_regs;
7866 named_parm_regs = (ROUND_REG (*ca, mode)
7868 ? ROUND_ADVANCE (int_size_in_bytes (type))
7869 : ROUND_ADVANCE (GET_MODE_SIZE (mode))));
7870 anon_parm_regs = NPARM_REGS (SImode) - named_parm_regs;
7871 if (anon_parm_regs > 0)
7872 *pretend_arg_size = anon_parm_regs * 4;
7877 sh_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
7883 sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *ca)
7885 return ! (TARGET_HITACHI || ca->renesas_abi) && ! TARGET_SH5;
7889 /* Define the offset between two registers, one to be eliminated, and
7890 the other its replacement, at the start of a routine. */
7893 initial_elimination_offset (int from, int to)
7896 int regs_saved_rounding = 0;
7897 int total_saved_regs_space;
7898 int total_auto_space;
7899 int save_flags = target_flags;
7901 HARD_REG_SET live_regs_mask;
7903 shmedia_space_reserved_for_target_registers = false;
7904 regs_saved = calc_live_regs (&live_regs_mask);
7905 regs_saved += SHMEDIA_REGS_STACK_ADJUST ();
7907 if (shmedia_reserve_space_for_target_registers_p (regs_saved, &live_regs_mask))
7909 shmedia_space_reserved_for_target_registers = true;
7910 regs_saved += shmedia_target_regs_stack_adjust (&live_regs_mask);
7913 if (TARGET_SH5 && regs_saved % (STACK_BOUNDARY / BITS_PER_UNIT))
7914 regs_saved_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
7915 - regs_saved % (STACK_BOUNDARY / BITS_PER_UNIT));
7917 total_auto_space = rounded_frame_size (regs_saved) - regs_saved_rounding;
7918 copy_flags = target_flags;
7919 target_flags = save_flags;
7921 total_saved_regs_space = regs_saved + regs_saved_rounding;
7923 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7924 return total_saved_regs_space + total_auto_space
7925 + crtl->args.info.byref_regs * 8;
7927 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
7928 return total_saved_regs_space + total_auto_space
7929 + crtl->args.info.byref_regs * 8;
7931 /* Initial gap between fp and sp is 0. */
7932 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
7935 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
7936 return rounded_frame_size (0);
7938 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7939 return rounded_frame_size (0);
7941 gcc_assert (from == RETURN_ADDRESS_POINTER_REGNUM
7942 && (to == HARD_FRAME_POINTER_REGNUM
7943 || to == STACK_POINTER_REGNUM));
7946 int n = total_saved_regs_space;
7947 int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
7948 save_schedule schedule;
7951 n += total_auto_space;
7953 /* If it wasn't saved, there's not much we can do. */
7954 if (! TEST_HARD_REG_BIT (live_regs_mask, pr_reg))
7957 target_flags = copy_flags;
7959 sh5_schedule_saves (&live_regs_mask, &schedule, n);
7960 for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
7961 if (entry->reg == pr_reg)
7963 target_flags = save_flags;
7964 return entry->offset;
7969 return total_auto_space;
7972 /* Parse the -mfixed-range= option string. */
7974 sh_fix_range (const char *const_str)
7977 char *str, *dash, *comma;
7979 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
7980 REG2 are either register names or register numbers. The effect
7981 of this option is to mark the registers in the range from REG1 to
7982 REG2 as ``fixed'' so they won't be used by the compiler. */
7984 i = strlen (const_str);
7985 str = (char *) alloca (i + 1);
7986 memcpy (str, const_str, i + 1);
7990 dash = strchr (str, '-');
7993 warning (0, "value of -mfixed-range must have form REG1-REG2");
7997 comma = strchr (dash + 1, ',');
8001 first = decode_reg_name (str);
8004 warning (0, "unknown register name: %s", str);
8008 last = decode_reg_name (dash + 1);
8011 warning (0, "unknown register name: %s", dash + 1);
8019 warning (0, "%s-%s is an empty range", str, dash + 1);
8023 for (i = first; i <= last; ++i)
8024 fixed_regs[i] = call_used_regs[i] = 1;
8034 /* Insert any deferred function attributes from earlier pragmas. */
8036 sh_insert_attributes (tree node, tree *attributes)
8040 if (TREE_CODE (node) != FUNCTION_DECL)
8043 /* We are only interested in fields. */
8047 /* Append the attributes to the deferred attributes. */
8048 *sh_deferred_function_attributes_tail = *attributes;
8049 attrs = sh_deferred_function_attributes;
8053 /* Some attributes imply or require the interrupt attribute. */
8054 if (!lookup_attribute ("interrupt_handler", attrs)
8055 && !lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (node)))
8057 /* If we have a trapa_handler, but no interrupt_handler attribute,
8058 insert an interrupt_handler attribute. */
8059 if (lookup_attribute ("trapa_handler", attrs) != NULL_TREE)
8060 /* We can't use sh_pr_interrupt here because that's not in the
8063 = tree_cons (get_identifier("interrupt_handler"), NULL_TREE, attrs);
8064 /* However, for sp_switch, trap_exit, nosave_low_regs and resbank,
8065 if the interrupt attribute is missing, we ignore the attribute
8067 else if (lookup_attribute ("sp_switch", attrs)
8068 || lookup_attribute ("trap_exit", attrs)
8069 || lookup_attribute ("nosave_low_regs", attrs)
8070 || lookup_attribute ("resbank", attrs))
8074 for (tail = attributes; attrs; attrs = TREE_CHAIN (attrs))
8076 if (is_attribute_p ("sp_switch", TREE_PURPOSE (attrs))
8077 || is_attribute_p ("trap_exit", TREE_PURPOSE (attrs))
8078 || is_attribute_p ("nosave_low_regs", TREE_PURPOSE (attrs))
8079 || is_attribute_p ("resbank", TREE_PURPOSE (attrs)))
8080 warning (OPT_Wattributes,
8081 "%qs attribute only applies to interrupt functions",
8082 IDENTIFIER_POINTER (TREE_PURPOSE (attrs)));
8085 *tail = tree_cons (TREE_PURPOSE (attrs), NULL_TREE,
8087 tail = &TREE_CHAIN (*tail);
8090 attrs = *attributes;
8094 /* Install the processed list. */
8095 *attributes = attrs;
8097 /* Clear deferred attributes. */
8098 sh_deferred_function_attributes = NULL_TREE;
8099 sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
8104 /* Supported attributes:
8106 interrupt_handler -- specifies this function is an interrupt handler.
8108 trapa_handler - like above, but don't save all registers.
8110 sp_switch -- specifies an alternate stack for an interrupt handler
8113 trap_exit -- use a trapa to exit an interrupt function instead of
8116 nosave_low_regs - don't save r0..r7 in an interrupt handler.
8117 This is useful on the SH3 and upwards,
8118 which has a separate set of low regs for User and Supervisor modes.
8119 This should only be used for the lowest level of interrupts. Higher levels
8120 of interrupts must save the registers in case they themselves are
8123 renesas -- use Renesas calling/layout conventions (functions and
8126 resbank -- In case of an ISR, use a register bank to save registers
8127 R0-R14, MACH, MACL, GBR and PR. This is useful only on SH2A targets.
8130 const struct attribute_spec sh_attribute_table[] =
8132 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
8133 { "interrupt_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
8134 { "sp_switch", 1, 1, true, false, false, sh_handle_sp_switch_attribute },
8135 { "trap_exit", 1, 1, true, false, false, sh_handle_trap_exit_attribute },
8136 { "renesas", 0, 0, false, true, false, sh_handle_renesas_attribute },
8137 { "trapa_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
8138 { "nosave_low_regs", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
8139 { "resbank", 0, 0, true, false, false, sh_handle_resbank_handler_attribute },
8140 { "function_vector", 1, 1, true, false, false, sh2a_handle_function_vector_handler_attribute },
8142 /* Symbian support adds three new attributes:
8143 dllexport - for exporting a function/variable that will live in a dll
8144 dllimport - for importing a function/variable from a dll
8146 Microsoft allows multiple declspecs in one __declspec, separating
8147 them with spaces. We do NOT support this. Instead, use __declspec
8149 { "dllimport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
8150 { "dllexport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
8152 { NULL, 0, 0, false, false, false, NULL }
8155 /* Handle a 'resbank' attribute. */
8157 sh_handle_resbank_handler_attribute (tree * node, tree name,
8158 tree args ATTRIBUTE_UNUSED,
8159 int flags ATTRIBUTE_UNUSED,
8160 bool * no_add_attrs)
8164 warning (OPT_Wattributes, "%qs attribute is supported only for SH2A",
8165 IDENTIFIER_POINTER (name));
8166 *no_add_attrs = true;
8168 if (TREE_CODE (*node) != FUNCTION_DECL)
8170 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8171 IDENTIFIER_POINTER (name));
8172 *no_add_attrs = true;
8178 /* Handle an "interrupt_handler" attribute; arguments as in
8179 struct attribute_spec.handler. */
8181 sh_handle_interrupt_handler_attribute (tree *node, tree name,
8182 tree args ATTRIBUTE_UNUSED,
8183 int flags ATTRIBUTE_UNUSED,
8186 if (TREE_CODE (*node) != FUNCTION_DECL)
8188 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8189 IDENTIFIER_POINTER (name));
8190 *no_add_attrs = true;
8192 else if (TARGET_SHCOMPACT)
8194 error ("attribute interrupt_handler is not compatible with -m5-compact");
8195 *no_add_attrs = true;
8201 /* Handle an 'function_vector' attribute; arguments as in
8202 struct attribute_spec.handler. */
8204 sh2a_handle_function_vector_handler_attribute (tree * node, tree name,
8205 tree args ATTRIBUTE_UNUSED,
8206 int flags ATTRIBUTE_UNUSED,
8207 bool * no_add_attrs)
8211 warning (OPT_Wattributes, "%qs attribute only applies to SH2A",
8212 IDENTIFIER_POINTER (name));
8213 *no_add_attrs = true;
8215 else if (TREE_CODE (*node) != FUNCTION_DECL)
8217 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8218 IDENTIFIER_POINTER (name));
8219 *no_add_attrs = true;
8221 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
8223 /* The argument must be a constant integer. */
8224 warning (OPT_Wattributes,
8225 "`%s' attribute argument not an integer constant",
8226 IDENTIFIER_POINTER (name));
8227 *no_add_attrs = true;
8229 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
8231 /* The argument value must be between 0 to 255. */
8232 warning (OPT_Wattributes,
8233 "`%s' attribute argument should be between 0 to 255",
8234 IDENTIFIER_POINTER (name));
8235 *no_add_attrs = true;
8240 /* Returns 1 if current function has been assigned the attribute
8241 'function_vector'. */
8243 sh2a_is_function_vector_call (rtx x)
8245 if (GET_CODE (x) == SYMBOL_REF
8246 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
8248 tree tr = SYMBOL_REF_DECL (x);
8250 if (sh2a_function_vector_p (tr))
8257 /* Returns the function vector number, if the the attribute
8258 'function_vector' is assigned, otherwise returns zero. */
8260 sh2a_get_function_vector_number (rtx x)
8265 if ((GET_CODE (x) == SYMBOL_REF)
8266 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
8268 t = SYMBOL_REF_DECL (x);
8270 if (TREE_CODE (t) != FUNCTION_DECL)
8273 list = SH_ATTRIBUTES (t);
8276 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
8278 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
8282 list = TREE_CHAIN (list);
8291 /* Handle an "sp_switch" attribute; arguments as in
8292 struct attribute_spec.handler. */
8294 sh_handle_sp_switch_attribute (tree *node, tree name, tree args,
8295 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
8297 if (TREE_CODE (*node) != FUNCTION_DECL)
8299 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8300 IDENTIFIER_POINTER (name));
8301 *no_add_attrs = true;
8303 else if (TREE_CODE (TREE_VALUE (args)) != STRING_CST)
8305 /* The argument must be a constant string. */
8306 warning (OPT_Wattributes, "%qs attribute argument not a string constant",
8307 IDENTIFIER_POINTER (name));
8308 *no_add_attrs = true;
8314 /* Handle an "trap_exit" attribute; arguments as in
8315 struct attribute_spec.handler. */
8317 sh_handle_trap_exit_attribute (tree *node, tree name, tree args,
8318 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
8320 if (TREE_CODE (*node) != FUNCTION_DECL)
8322 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8323 IDENTIFIER_POINTER (name));
8324 *no_add_attrs = true;
8326 /* The argument specifies a trap number to be used in a trapa instruction
8327 at function exit (instead of an rte instruction). */
8328 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
8330 /* The argument must be a constant integer. */
8331 warning (OPT_Wattributes, "%qs attribute argument not an "
8332 "integer constant", IDENTIFIER_POINTER (name));
8333 *no_add_attrs = true;
8340 sh_handle_renesas_attribute (tree *node ATTRIBUTE_UNUSED,
8341 tree name ATTRIBUTE_UNUSED,
8342 tree args ATTRIBUTE_UNUSED,
8343 int flags ATTRIBUTE_UNUSED,
8344 bool *no_add_attrs ATTRIBUTE_UNUSED)
8349 /* True if __attribute__((renesas)) or -mrenesas. */
8351 sh_attr_renesas_p (const_tree td)
8358 td = TREE_TYPE (td);
8359 if (td == error_mark_node)
8361 return (lookup_attribute ("renesas", TYPE_ATTRIBUTES (td))
8365 /* True if __attribute__((renesas)) or -mrenesas, for the current
8368 sh_cfun_attr_renesas_p (void)
8370 return sh_attr_renesas_p (current_function_decl);
8374 sh_cfun_interrupt_handler_p (void)
8376 return (lookup_attribute ("interrupt_handler",
8377 DECL_ATTRIBUTES (current_function_decl))
8381 /* Returns 1 if FUNC has been assigned the attribute
8382 "function_vector". */
8384 sh2a_function_vector_p (tree func)
8387 if (TREE_CODE (func) != FUNCTION_DECL)
8390 list = SH_ATTRIBUTES (func);
8393 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
8396 list = TREE_CHAIN (list);
8401 /* Returns TRUE if given tree has the "resbank" attribute. */
8404 sh_cfun_resbank_handler_p (void)
8406 return ((lookup_attribute ("resbank",
8407 DECL_ATTRIBUTES (current_function_decl))
8409 && (lookup_attribute ("interrupt_handler",
8410 DECL_ATTRIBUTES (current_function_decl))
8411 != NULL_TREE) && TARGET_SH2A);
8414 /* Implement TARGET_CHECK_PCH_TARGET_FLAGS. */
8417 sh_check_pch_target_flags (int old_flags)
8419 if ((old_flags ^ target_flags) & (MASK_SH1 | MASK_SH2 | MASK_SH3
8420 | MASK_SH_E | MASK_HARD_SH4
8421 | MASK_FPU_SINGLE | MASK_SH4))
8422 return _("created and used with different architectures / ABIs");
8423 if ((old_flags ^ target_flags) & MASK_HITACHI)
8424 return _("created and used with different ABIs");
8425 if ((old_flags ^ target_flags) & MASK_LITTLE_ENDIAN)
8426 return _("created and used with different endianness");
8430 /* Predicates used by the templates. */
8432 /* Returns 1 if OP is MACL, MACH or PR. The input must be a REG rtx.
8433 Used only in general_movsrc_operand. */
8436 system_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8448 /* Nonzero if OP is a floating point value with value 0.0. */
8451 fp_zero_operand (rtx op)
8455 if (GET_MODE (op) != SFmode)
8458 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
8459 return REAL_VALUES_EQUAL (r, dconst0) && ! REAL_VALUE_MINUS_ZERO (r);
8462 /* Nonzero if OP is a floating point value with value 1.0. */
8465 fp_one_operand (rtx op)
8469 if (GET_MODE (op) != SFmode)
8472 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
8473 return REAL_VALUES_EQUAL (r, dconst1);
8476 /* For -m4 and -m4-single-only, mode switching is used. If we are
8477 compiling without -mfmovd, movsf_ie isn't taken into account for
8478 mode switching. We could check in machine_dependent_reorg for
8479 cases where we know we are in single precision mode, but there is
8480 interface to find that out during reload, so we must avoid
8481 choosing an fldi alternative during reload and thus failing to
8482 allocate a scratch register for the constant loading. */
8486 return ! TARGET_SH4 || TARGET_FMOVD || reload_completed;
8490 tertiary_reload_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8492 enum rtx_code code = GET_CODE (op);
8493 return code == MEM || (TARGET_SH4 && code == CONST_DOUBLE);
8496 /* Return the TLS type for TLS symbols, 0 for otherwise. */
8498 tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8500 if (GET_CODE (op) != SYMBOL_REF)
8502 return SYMBOL_REF_TLS_MODEL (op);
8505 /* Return the destination address of a branch. */
8508 branch_dest (rtx branch)
8510 rtx dest = SET_SRC (PATTERN (branch));
8513 if (GET_CODE (dest) == IF_THEN_ELSE)
8514 dest = XEXP (dest, 1);
8515 dest = XEXP (dest, 0);
8516 dest_uid = INSN_UID (dest);
8517 return INSN_ADDRESSES (dest_uid);
8520 /* Return nonzero if REG is not used after INSN.
8521 We assume REG is a reload reg, and therefore does
8522 not live past labels. It may live past calls or jumps though. */
8524 reg_unused_after (rtx reg, rtx insn)
8529 /* If the reg is set by this instruction, then it is safe for our
8530 case. Disregard the case where this is a store to memory, since
8531 we are checking a register used in the store address. */
8532 set = single_set (insn);
8533 if (set && GET_CODE (SET_DEST (set)) != MEM
8534 && reg_overlap_mentioned_p (reg, SET_DEST (set)))
8537 while ((insn = NEXT_INSN (insn)))
8543 code = GET_CODE (insn);
8546 /* If this is a label that existed before reload, then the register
8547 if dead here. However, if this is a label added by reorg, then
8548 the register may still be live here. We can't tell the difference,
8549 so we just ignore labels completely. */
8550 if (code == CODE_LABEL)
8555 if (code == JUMP_INSN)
8558 /* If this is a sequence, we must handle them all at once.
8559 We could have for instance a call that sets the target register,
8560 and an insn in a delay slot that uses the register. In this case,
8561 we must return 0. */
8562 else if (code == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
8567 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8569 rtx this_insn = XVECEXP (PATTERN (insn), 0, i);
8570 rtx set = single_set (this_insn);
8572 if (GET_CODE (this_insn) == CALL_INSN)
8574 else if (GET_CODE (this_insn) == JUMP_INSN)
8576 if (INSN_ANNULLED_BRANCH_P (this_insn))
8581 if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
8583 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
8585 if (GET_CODE (SET_DEST (set)) != MEM)
8591 && reg_overlap_mentioned_p (reg, PATTERN (this_insn)))
8596 else if (code == JUMP_INSN)
8600 set = single_set (insn);
8601 if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
8603 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
8604 return GET_CODE (SET_DEST (set)) != MEM;
8605 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
8608 if (code == CALL_INSN && call_really_used_regs[REGNO (reg)])
8616 static GTY(()) rtx fpscr_rtx;
8618 get_fpscr_rtx (void)
8622 fpscr_rtx = gen_rtx_REG (PSImode, FPSCR_REG);
8623 REG_USERVAR_P (fpscr_rtx) = 1;
8624 mark_user_reg (fpscr_rtx);
8626 if (! reload_completed || mdep_reorg_phase != SH_AFTER_MDEP_REORG)
8627 mark_user_reg (fpscr_rtx);
8631 static GTY(()) tree fpscr_values;
8634 emit_fpu_switch (rtx scratch, int index)
8638 if (fpscr_values == NULL)
8642 t = build_index_type (integer_one_node);
8643 t = build_array_type (integer_type_node, t);
8644 t = build_decl (VAR_DECL, get_identifier ("__fpscr_values"), t);
8645 DECL_ARTIFICIAL (t) = 1;
8646 DECL_IGNORED_P (t) = 1;
8647 DECL_EXTERNAL (t) = 1;
8648 TREE_STATIC (t) = 1;
8649 TREE_PUBLIC (t) = 1;
8655 src = DECL_RTL (fpscr_values);
8656 if (!can_create_pseudo_p ())
8658 emit_move_insn (scratch, XEXP (src, 0));
8660 emit_insn (gen_addsi3 (scratch, scratch, GEN_INT (index * 4)));
8661 src = adjust_automodify_address (src, PSImode, scratch, index * 4);
8664 src = adjust_address (src, PSImode, index * 4);
8666 dst = get_fpscr_rtx ();
8667 emit_move_insn (dst, src);
8671 emit_sf_insn (rtx pat)
8677 emit_df_insn (rtx pat)
8683 expand_sf_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
8685 emit_sf_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
8689 expand_sf_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
8691 emit_sf_insn ((*fun) (operands[0], operands[1], operands[2],
8696 expand_df_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
8698 emit_df_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
8702 expand_df_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
8704 emit_df_insn ((*fun) (operands[0], operands[1], operands[2],
8708 static rtx get_free_reg (HARD_REG_SET);
8710 /* This function returns a register to use to load the address to load
8711 the fpscr from. Currently it always returns r1 or r7, but when we are
8712 able to use pseudo registers after combine, or have a better mechanism
8713 for choosing a register, it should be done here. */
8714 /* REGS_LIVE is the liveness information for the point for which we
8715 need this allocation. In some bare-bones exit blocks, r1 is live at the
8716 start. We can even have all of r0..r3 being live:
8717 __complex__ long long f (double d) { if (d == 0) return 2; else return 3; }
8718 INSN before which new insns are placed with will clobber the register
8719 we return. If a basic block consists only of setting the return value
8720 register to a pseudo and using that register, the return value is not
8721 live before or after this block, yet we we'll insert our insns right in
8725 get_free_reg (HARD_REG_SET regs_live)
8727 if (! TEST_HARD_REG_BIT (regs_live, 1))
8728 return gen_rtx_REG (Pmode, 1);
8730 /* Hard reg 1 is live; since this is a SMALL_REGISTER_CLASSES target,
8731 there shouldn't be anything but a jump before the function end. */
8732 gcc_assert (!TEST_HARD_REG_BIT (regs_live, 7));
8733 return gen_rtx_REG (Pmode, 7);
8736 /* This function will set the fpscr from memory.
8737 MODE is the mode we are setting it to. */
8739 fpscr_set_from_mem (int mode, HARD_REG_SET regs_live)
8741 enum attr_fp_mode fp_mode = mode;
8742 enum attr_fp_mode norm_mode = ACTUAL_NORMAL_MODE (FP_MODE);
8745 addr_reg = !can_create_pseudo_p () ? get_free_reg (regs_live) : NULL_RTX;
8746 emit_fpu_switch (addr_reg, fp_mode == norm_mode);
8749 /* Is the given character a logical line separator for the assembler? */
8750 #ifndef IS_ASM_LOGICAL_LINE_SEPARATOR
8751 #define IS_ASM_LOGICAL_LINE_SEPARATOR(C, STR) ((C) == ';')
8755 sh_insn_length_adjustment (rtx insn)
8757 /* Instructions with unfilled delay slots take up an extra two bytes for
8758 the nop in the delay slot. */
8759 if (((GET_CODE (insn) == INSN
8760 && GET_CODE (PATTERN (insn)) != USE
8761 && GET_CODE (PATTERN (insn)) != CLOBBER)
8762 || GET_CODE (insn) == CALL_INSN
8763 || (GET_CODE (insn) == JUMP_INSN
8764 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
8765 && GET_CODE (PATTERN (insn)) != ADDR_VEC))
8766 && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE
8767 && get_attr_needs_delay_slot (insn) == NEEDS_DELAY_SLOT_YES)
8770 /* SH2e has a bug that prevents the use of annulled branches, so if
8771 the delay slot is not filled, we'll have to put a NOP in it. */
8772 if (sh_cpu == CPU_SH2E
8773 && GET_CODE (insn) == JUMP_INSN
8774 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
8775 && GET_CODE (PATTERN (insn)) != ADDR_VEC
8776 && get_attr_type (insn) == TYPE_CBRANCH
8777 && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE)
8780 /* sh-dsp parallel processing insn take four bytes instead of two. */
8782 if (GET_CODE (insn) == INSN)
8785 rtx body = PATTERN (insn);
8786 const char *template;
8788 int maybe_label = 1;
8790 if (GET_CODE (body) == ASM_INPUT)
8791 template = XSTR (body, 0);
8792 else if (asm_noperands (body) >= 0)
8794 = decode_asm_operands (body, NULL, NULL, NULL, NULL, NULL);
8803 while (c == ' ' || c == '\t');
8804 /* all sh-dsp parallel-processing insns start with p.
8805 The only non-ppi sh insn starting with p is pref.
8806 The only ppi starting with pr is prnd. */
8807 if ((c == 'p' || c == 'P') && strncasecmp ("re", template, 2))
8809 /* The repeat pseudo-insn expands two three insns, a total of
8810 six bytes in size. */
8811 else if ((c == 'r' || c == 'R')
8812 && ! strncasecmp ("epeat", template, 5))
8814 while (c && c != '\n'
8815 && ! IS_ASM_LOGICAL_LINE_SEPARATOR (c, template))
8817 /* If this is a label, it is obviously not a ppi insn. */
8818 if (c == ':' && maybe_label)
8823 else if (c == '\'' || c == '"')
8828 maybe_label = c != ':';
8836 /* Return TRUE if X references a SYMBOL_REF or LABEL_REF whose symbol
8837 isn't protected by a PIC unspec. */
8839 nonpic_symbol_mentioned_p (rtx x)
8841 register const char *fmt;
8844 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF
8845 || GET_CODE (x) == PC)
8848 /* We don't want to look into the possible MEM location of a
8849 CONST_DOUBLE, since we're not going to use it, in general. */
8850 if (GET_CODE (x) == CONST_DOUBLE)
8853 if (GET_CODE (x) == UNSPEC
8854 && (XINT (x, 1) == UNSPEC_PIC
8855 || XINT (x, 1) == UNSPEC_GOT
8856 || XINT (x, 1) == UNSPEC_GOTOFF
8857 || XINT (x, 1) == UNSPEC_GOTPLT
8858 || XINT (x, 1) == UNSPEC_GOTTPOFF
8859 || XINT (x, 1) == UNSPEC_DTPOFF
8860 || XINT (x, 1) == UNSPEC_PLT))
8863 fmt = GET_RTX_FORMAT (GET_CODE (x));
8864 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8870 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8871 if (nonpic_symbol_mentioned_p (XVECEXP (x, i, j)))
8874 else if (fmt[i] == 'e' && nonpic_symbol_mentioned_p (XEXP (x, i)))
8881 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
8882 @GOTOFF in `reg'. */
8884 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
8887 if (tls_symbolic_operand (orig, Pmode))
8890 if (GET_CODE (orig) == LABEL_REF
8891 || (GET_CODE (orig) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (orig)))
8894 reg = gen_reg_rtx (Pmode);
8896 emit_insn (gen_symGOTOFF2reg (reg, orig));
8899 else if (GET_CODE (orig) == SYMBOL_REF)
8902 reg = gen_reg_rtx (Pmode);
8904 emit_insn (gen_symGOT2reg (reg, orig));
8910 /* Mark the use of a constant in the literal table. If the constant
8911 has multiple labels, make it unique. */
8913 mark_constant_pool_use (rtx x)
8915 rtx insn, lab, pattern;
8920 switch (GET_CODE (x))
8930 /* Get the first label in the list of labels for the same constant
8931 and delete another labels in the list. */
8933 for (insn = PREV_INSN (x); insn; insn = PREV_INSN (insn))
8935 if (GET_CODE (insn) != CODE_LABEL
8936 || LABEL_REFS (insn) != NEXT_INSN (insn))
8941 for (insn = LABEL_REFS (lab); insn; insn = LABEL_REFS (insn))
8942 INSN_DELETED_P (insn) = 1;
8944 /* Mark constants in a window. */
8945 for (insn = NEXT_INSN (x); insn; insn = NEXT_INSN (insn))
8947 if (GET_CODE (insn) != INSN)
8950 pattern = PATTERN (insn);
8951 if (GET_CODE (pattern) != UNSPEC_VOLATILE)
8954 switch (XINT (pattern, 1))
8956 case UNSPECV_CONST2:
8957 case UNSPECV_CONST4:
8958 case UNSPECV_CONST8:
8959 XVECEXP (pattern, 0, 1) = const1_rtx;
8961 case UNSPECV_WINDOW_END:
8962 if (XVECEXP (pattern, 0, 0) == x)
8965 case UNSPECV_CONST_END:
8975 /* Return true if it's possible to redirect BRANCH1 to the destination
8976 of an unconditional jump BRANCH2. We only want to do this if the
8977 resulting branch will have a short displacement. */
8979 sh_can_redirect_branch (rtx branch1, rtx branch2)
8981 if (flag_expensive_optimizations && simplejump_p (branch2))
8983 rtx dest = XEXP (SET_SRC (single_set (branch2)), 0);
8987 for (distance = 0, insn = NEXT_INSN (branch1);
8988 insn && distance < 256;
8989 insn = PREV_INSN (insn))
8994 distance += get_attr_length (insn);
8996 for (distance = 0, insn = NEXT_INSN (branch1);
8997 insn && distance < 256;
8998 insn = NEXT_INSN (insn))
9003 distance += get_attr_length (insn);
9009 /* Return nonzero if register old_reg can be renamed to register new_reg. */
9011 sh_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
9012 unsigned int new_reg)
9014 /* Interrupt functions can only use registers that have already been
9015 saved by the prologue, even if they would normally be
9018 if (sh_cfun_interrupt_handler_p () && !df_regs_ever_live_p (new_reg))
9024 /* Function to update the integer COST
9025 based on the relationship between INSN that is dependent on
9026 DEP_INSN through the dependence LINK. The default is to make no
9027 adjustment to COST. This can be used for example to specify to
9028 the scheduler that an output- or anti-dependence does not incur
9029 the same cost as a data-dependence. The return value should be
9030 the new value for COST. */
9032 sh_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx dep_insn, int cost)
9038 /* On SHmedia, if the dependence is an anti-dependence or
9039 output-dependence, there is no cost. */
9040 if (REG_NOTE_KIND (link) != 0)
9042 /* However, dependencies between target register loads and
9043 uses of the register in a subsequent block that are separated
9044 by a conditional branch are not modelled - we have to do with
9045 the anti-dependency between the target register load and the
9046 conditional branch that ends the current block. */
9047 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
9048 && GET_CODE (PATTERN (dep_insn)) == SET
9049 && (get_attr_type (dep_insn) == TYPE_PT_MEDIA
9050 || get_attr_type (dep_insn) == TYPE_PTABS_MEDIA)
9051 && get_attr_type (insn) == TYPE_CBRANCH_MEDIA)
9053 int orig_cost = cost;
9054 rtx note = find_reg_note (insn, REG_BR_PROB, 0);
9055 rtx target = ((! note
9056 || INTVAL (XEXP (note, 0)) * 2 < REG_BR_PROB_BASE)
9057 ? insn : JUMP_LABEL (insn));
9058 /* On the likely path, the branch costs 1, on the unlikely path,
9062 target = next_active_insn (target);
9063 while (target && ! flow_dependent_p (target, dep_insn)
9065 /* If two branches are executed in immediate succession, with the
9066 first branch properly predicted, this causes a stall at the
9067 second branch, hence we won't need the target for the
9068 second branch for two cycles after the launch of the first
9070 if (cost > orig_cost - 2)
9071 cost = orig_cost - 2;
9077 else if (get_attr_is_mac_media (insn)
9078 && get_attr_is_mac_media (dep_insn))
9081 else if (! reload_completed
9082 && GET_CODE (PATTERN (insn)) == SET
9083 && GET_CODE (SET_SRC (PATTERN (insn))) == FLOAT
9084 && GET_CODE (PATTERN (dep_insn)) == SET
9085 && fp_arith_reg_operand (SET_SRC (PATTERN (dep_insn)), VOIDmode)
9088 /* Schedule the ptabs for a casesi_jump_media in preference to stuff
9089 that is needed at the target. */
9090 else if (get_attr_type (insn) == TYPE_JUMP_MEDIA
9091 && ! flow_dependent_p (insn, dep_insn))
9094 else if (REG_NOTE_KIND (link) == 0)
9096 enum attr_type type;
9099 if (recog_memoized (insn) < 0
9100 || recog_memoized (dep_insn) < 0)
9103 dep_set = single_set (dep_insn);
9105 /* The latency that we specify in the scheduling description refers
9106 to the actual output, not to an auto-increment register; for that,
9107 the latency is one. */
9108 if (dep_set && MEM_P (SET_SRC (dep_set)) && cost > 1)
9110 rtx set = single_set (insn);
9113 && !reg_mentioned_p (SET_DEST (dep_set), SET_SRC (set))
9114 && (!MEM_P (SET_DEST (set))
9115 || !reg_mentioned_p (SET_DEST (dep_set),
9116 XEXP (SET_DEST (set), 0))))
9119 /* The only input for a call that is timing-critical is the
9120 function's address. */
9121 if (GET_CODE (insn) == CALL_INSN)
9123 rtx call = PATTERN (insn);
9125 if (GET_CODE (call) == PARALLEL)
9126 call = XVECEXP (call, 0 ,0);
9127 if (GET_CODE (call) == SET)
9128 call = SET_SRC (call);
9129 if (GET_CODE (call) == CALL && GET_CODE (XEXP (call, 0)) == MEM
9130 /* sibcalli_thunk uses a symbol_ref in an unspec. */
9131 && (GET_CODE (XEXP (XEXP (call, 0), 0)) == UNSPEC
9132 || ! reg_set_p (XEXP (XEXP (call, 0), 0), dep_insn)))
9133 cost -= TARGET_SH4_300 ? 3 : 6;
9135 /* Likewise, the most timing critical input for an sfuncs call
9136 is the function address. However, sfuncs typically start
9137 using their arguments pretty quickly.
9138 Assume a four cycle delay for SH4 before they are needed.
9139 Cached ST40-300 calls are quicker, so assume only a one
9141 ??? Maybe we should encode the delays till input registers
9142 are needed by sfuncs into the sfunc call insn. */
9143 /* All sfunc calls are parallels with at least four components.
9144 Exploit this to avoid unnecessary calls to sfunc_uses_reg. */
9145 else if (GET_CODE (PATTERN (insn)) == PARALLEL
9146 && XVECLEN (PATTERN (insn), 0) >= 4
9147 && (reg = sfunc_uses_reg (insn)))
9149 if (! reg_set_p (reg, dep_insn))
9150 cost -= TARGET_SH4_300 ? 1 : 4;
9152 if (TARGET_HARD_SH4 && !TARGET_SH4_300)
9154 enum attr_type dep_type = get_attr_type (dep_insn);
9156 if (dep_type == TYPE_FLOAD || dep_type == TYPE_PCFLOAD)
9158 else if ((dep_type == TYPE_LOAD_SI || dep_type == TYPE_PCLOAD_SI)
9159 && (type = get_attr_type (insn)) != TYPE_CALL
9160 && type != TYPE_SFUNC)
9162 /* When the preceding instruction loads the shift amount of
9163 the following SHAD/SHLD, the latency of the load is increased
9165 if (get_attr_type (insn) == TYPE_DYN_SHIFT
9166 && get_attr_any_int_load (dep_insn) == ANY_INT_LOAD_YES
9167 && reg_overlap_mentioned_p (SET_DEST (dep_set),
9168 XEXP (SET_SRC (single_set (insn)),
9171 /* When an LS group instruction with a latency of less than
9172 3 cycles is followed by a double-precision floating-point
9173 instruction, FIPR, or FTRV, the latency of the first
9174 instruction is increased to 3 cycles. */
9176 && get_attr_insn_class (dep_insn) == INSN_CLASS_LS_GROUP
9177 && get_attr_dfp_comp (insn) == DFP_COMP_YES)
9179 /* The lsw register of a double-precision computation is ready one
9181 else if (reload_completed
9182 && get_attr_dfp_comp (dep_insn) == DFP_COMP_YES
9183 && (use_pat = single_set (insn))
9184 && ! regno_use_in (REGNO (SET_DEST (single_set (dep_insn))),
9188 if (get_attr_any_fp_comp (dep_insn) == ANY_FP_COMP_YES
9189 && get_attr_late_fp_use (insn) == LATE_FP_USE_YES)
9192 else if (TARGET_SH4_300)
9194 /* Stores need their input register two cycles later. */
9195 if (dep_set && cost >= 1
9196 && ((type = get_attr_type (insn)) == TYPE_STORE
9197 || type == TYPE_PSTORE
9198 || type == TYPE_FSTORE || type == TYPE_MAC_MEM))
9200 rtx set = single_set (insn);
9202 if (!reg_mentioned_p (SET_SRC (set), XEXP (SET_DEST (set), 0))
9203 && rtx_equal_p (SET_SRC (set), SET_DEST (dep_set)))
9206 /* But don't reduce the cost below 1 if the address depends
9207 on a side effect of dep_insn. */
9209 && modified_in_p (XEXP (SET_DEST (set), 0), dep_insn))
9215 /* An anti-dependence penalty of two applies if the first insn is a double
9216 precision fadd / fsub / fmul. */
9217 else if (!TARGET_SH4_300
9218 && REG_NOTE_KIND (link) == REG_DEP_ANTI
9219 && recog_memoized (dep_insn) >= 0
9220 && (get_attr_type (dep_insn) == TYPE_DFP_ARITH
9221 || get_attr_type (dep_insn) == TYPE_DFP_MUL)
9222 /* A lot of alleged anti-flow dependences are fake,
9223 so check this one is real. */
9224 && flow_dependent_p (dep_insn, insn))
9230 /* Check if INSN is flow-dependent on DEP_INSN. Can also be used to check
9231 if DEP_INSN is anti-flow dependent on INSN. */
9233 flow_dependent_p (rtx insn, rtx dep_insn)
9235 rtx tmp = PATTERN (insn);
9237 note_stores (PATTERN (dep_insn), flow_dependent_p_1, &tmp);
9238 return tmp == NULL_RTX;
9241 /* A helper function for flow_dependent_p called through note_stores. */
9243 flow_dependent_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
9245 rtx * pinsn = (rtx *) data;
9247 if (*pinsn && reg_referenced_p (x, *pinsn))
9251 /* For use by sh_allocate_initial_value. Note that sh.md contains some
9252 'special function' patterns (type sfunc) that clobber pr, but that
9253 do not look like function calls to leaf_function_p. Hence we must
9254 do this extra check. */
9258 return DF_REG_DEF_COUNT (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
9261 /* Return where to allocate pseudo for a given hard register initial
9264 sh_allocate_initial_value (rtx hard_reg)
9268 if (REGNO (hard_reg) == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG))
9270 if (current_function_is_leaf
9271 && ! sh_pr_n_sets ()
9272 && ! (TARGET_SHCOMPACT
9273 && ((crtl->args.info.call_cookie
9274 & ~ CALL_COOKIE_RET_TRAMP (1))
9275 || crtl->saves_all_registers)))
9278 x = gen_frame_mem (Pmode, return_address_pointer_rtx);
9286 /* This function returns "2" to indicate dual issue for the SH4
9287 processor. To be used by the DFA pipeline description. */
9289 sh_issue_rate (void)
9291 if (TARGET_SUPERSCALAR)
9297 /* Functions for ready queue reordering for sched1. */
9299 /* Get weight for mode for a set x. */
9301 find_set_regmode_weight (rtx x, enum machine_mode mode)
9303 if (GET_CODE (x) == CLOBBER && register_operand (SET_DEST (x), mode))
9305 if (GET_CODE (x) == SET && register_operand (SET_DEST (x), mode))
9307 if (GET_CODE (SET_DEST (x)) == REG)
9309 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
9319 /* Get regmode weight for insn. */
9321 find_insn_regmode_weight (rtx insn, enum machine_mode mode)
9323 short reg_weight = 0;
9326 /* Increment weight for each register born here. */
9328 reg_weight += find_set_regmode_weight (x, mode);
9329 if (GET_CODE (x) == PARALLEL)
9332 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
9334 x = XVECEXP (PATTERN (insn), 0, j);
9335 reg_weight += find_set_regmode_weight (x, mode);
9338 /* Decrement weight for each register that dies here. */
9339 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
9341 if (REG_NOTE_KIND (x) == REG_DEAD || REG_NOTE_KIND (x) == REG_UNUSED)
9343 rtx note = XEXP (x, 0);
9344 if (GET_CODE (note) == REG && GET_MODE (note) == mode)
9351 /* Calculate regmode weights for all insns of a basic block. */
9353 find_regmode_weight (basic_block b, enum machine_mode mode)
9355 rtx insn, next_tail, head, tail;
9357 get_ebb_head_tail (b, b, &head, &tail);
9358 next_tail = NEXT_INSN (tail);
9360 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
9362 /* Handle register life information. */
9367 INSN_REGMODE_WEIGHT (insn, mode) =
9368 find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DFmode);
9369 else if (mode == SImode)
9370 INSN_REGMODE_WEIGHT (insn, mode) =
9371 find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DImode);
9375 /* Comparison function for ready queue sorting. */
9377 rank_for_reorder (const void *x, const void *y)
9379 rtx tmp = *(const rtx *) y;
9380 rtx tmp2 = *(const rtx *) x;
9382 /* The insn in a schedule group should be issued the first. */
9383 if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
9384 return SCHED_GROUP_P (tmp2) ? 1 : -1;
9386 /* If insns are equally good, sort by INSN_LUID (original insn order), This
9387 minimizes instruction movement, thus minimizing sched's effect on
9388 register pressure. */
9389 return INSN_LUID (tmp) - INSN_LUID (tmp2);
9392 /* Resort the array A in which only element at index N may be out of order. */
9394 swap_reorder (rtx *a, int n)
9396 rtx insn = a[n - 1];
9399 while (i >= 0 && rank_for_reorder (a + i, &insn) >= 0)
9407 #define SCHED_REORDER(READY, N_READY) \
9410 if ((N_READY) == 2) \
9411 swap_reorder (READY, N_READY); \
9412 else if ((N_READY) > 2) \
9413 qsort (READY, N_READY, sizeof (rtx), rank_for_reorder); \
9417 /* Sort the ready list READY by ascending priority, using the SCHED_REORDER
9420 ready_reorder (rtx *ready, int nready)
9422 SCHED_REORDER (ready, nready);
9425 /* Count life regions of r0 for a block. */
9427 find_r0_life_regions (basic_block b)
9436 if (REGNO_REG_SET_P (df_get_live_in (b), R0_REG))
9449 r0_reg = gen_rtx_REG (SImode, R0_REG);
9454 if (find_regno_note (insn, REG_DEAD, R0_REG))
9460 && (pset = single_set (insn))
9461 && reg_overlap_mentioned_p (r0_reg, SET_DEST (pset))
9462 && !find_regno_note (insn, REG_UNUSED, R0_REG))
9470 insn = NEXT_INSN (insn);
9475 /* Calculate regmode weights for all insns of all basic block. */
9477 sh_md_init_global (FILE *dump ATTRIBUTE_UNUSED,
9478 int verbose ATTRIBUTE_UNUSED,
9483 regmode_weight[0] = (short *) xcalloc (old_max_uid, sizeof (short));
9484 regmode_weight[1] = (short *) xcalloc (old_max_uid, sizeof (short));
9485 r0_life_regions = 0;
9487 FOR_EACH_BB_REVERSE (b)
9489 find_regmode_weight (b, SImode);
9490 find_regmode_weight (b, SFmode);
9491 if (!reload_completed)
9492 r0_life_regions += find_r0_life_regions (b);
9495 CURR_REGMODE_PRESSURE (SImode) = 0;
9496 CURR_REGMODE_PRESSURE (SFmode) = 0;
9502 sh_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
9503 int verbose ATTRIBUTE_UNUSED)
9505 if (regmode_weight[0])
9507 free (regmode_weight[0]);
9508 regmode_weight[0] = NULL;
9510 if (regmode_weight[1])
9512 free (regmode_weight[1]);
9513 regmode_weight[1] = NULL;
9517 /* The scalar modes supported differs from the default version in TImode
9518 for 32-bit SHMEDIA. */
9520 sh_scalar_mode_supported_p (enum machine_mode mode)
9522 if (TARGET_SHMEDIA32 && mode == TImode)
9525 return default_scalar_mode_supported_p (mode);
9528 /* Cache the can_issue_more so that we can return it from reorder2. Also,
9529 keep count of register pressures on SImode and SFmode. */
9531 sh_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
9532 int sched_verbose ATTRIBUTE_UNUSED,
9536 if (GET_CODE (PATTERN (insn)) != USE
9537 && GET_CODE (PATTERN (insn)) != CLOBBER)
9538 cached_can_issue_more = can_issue_more - 1;
9540 cached_can_issue_more = can_issue_more;
9542 if (reload_completed)
9543 return cached_can_issue_more;
9545 CURR_REGMODE_PRESSURE (SImode) += INSN_REGMODE_WEIGHT (insn, SImode);
9546 CURR_REGMODE_PRESSURE (SFmode) += INSN_REGMODE_WEIGHT (insn, SFmode);
9548 return cached_can_issue_more;
9552 sh_md_init (FILE *dump ATTRIBUTE_UNUSED,
9553 int verbose ATTRIBUTE_UNUSED,
9554 int veclen ATTRIBUTE_UNUSED)
9556 CURR_REGMODE_PRESSURE (SImode) = 0;
9557 CURR_REGMODE_PRESSURE (SFmode) = 0;
9560 /* Some magic numbers. */
9561 /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
9562 functions that already have high pressure on r0. */
9563 #define R0_MAX_LIFE_REGIONS 2
9564 /* Register Pressure thresholds for SImode and SFmode registers. */
9565 #define SIMODE_MAX_WEIGHT 5
9566 #define SFMODE_MAX_WEIGHT 10
9568 /* Return true if the pressure is high for MODE. */
9570 high_pressure (enum machine_mode mode)
9572 /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
9573 functions that already have high pressure on r0. */
9574 if (r0_life_regions >= R0_MAX_LIFE_REGIONS)
9578 return (CURR_REGMODE_PRESSURE (SFmode) > SFMODE_MAX_WEIGHT);
9580 return (CURR_REGMODE_PRESSURE (SImode) > SIMODE_MAX_WEIGHT);
9583 /* Reorder ready queue if register pressure is high. */
9585 sh_reorder (FILE *dump ATTRIBUTE_UNUSED,
9586 int sched_verbose ATTRIBUTE_UNUSED,
9589 int clock_var ATTRIBUTE_UNUSED)
9591 if (reload_completed)
9592 return sh_issue_rate ();
9594 if (high_pressure (SFmode) || high_pressure (SImode))
9596 ready_reorder (ready, *n_readyp);
9599 return sh_issue_rate ();
9602 /* Skip cycles if the current register pressure is high. */
9604 sh_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
9605 int sched_verbose ATTRIBUTE_UNUSED,
9606 rtx *ready ATTRIBUTE_UNUSED,
9607 int *n_readyp ATTRIBUTE_UNUSED,
9608 int clock_var ATTRIBUTE_UNUSED)
9610 if (reload_completed)
9611 return cached_can_issue_more;
9613 if (high_pressure(SFmode) || high_pressure (SImode))
9616 return cached_can_issue_more;
9619 /* Skip cycles without sorting the ready queue. This will move insn from
9620 Q->R. If this is the last cycle we are skipping; allow sorting of ready
9621 queue by sh_reorder. */
9623 /* Generally, skipping these many cycles are sufficient for all insns to move
9628 sh_dfa_new_cycle (FILE *sched_dump ATTRIBUTE_UNUSED,
9629 int sched_verbose ATTRIBUTE_UNUSED,
9630 rtx insn ATTRIBUTE_UNUSED,
9635 if (reload_completed)
9640 if ((clock_var - last_clock_var) < MAX_SKIPS)
9645 /* If this is the last cycle we are skipping, allow reordering of R. */
9646 if ((clock_var - last_clock_var) == MAX_SKIPS)
9658 /* SHmedia requires registers for branches, so we can't generate new
9659 branches past reload. */
9661 sh_cannot_modify_jumps_p (void)
9663 return (TARGET_SHMEDIA && (reload_in_progress || reload_completed));
9667 sh_target_reg_class (void)
9669 return TARGET_SHMEDIA ? TARGET_REGS : NO_REGS;
9673 sh_optimize_target_register_callee_saved (bool after_prologue_epilogue_gen)
9680 if (! shmedia_space_reserved_for_target_registers)
9682 if (after_prologue_epilogue_gen && ! TARGET_SAVE_ALL_TARGET_REGS)
9684 if (calc_live_regs (&dummy) >= 6 * 8)
9690 sh_ms_bitfield_layout_p (const_tree record_type ATTRIBUTE_UNUSED)
9692 return (TARGET_SH5 || TARGET_HITACHI || sh_attr_renesas_p (record_type));
9696 On the SH1..SH4, the trampoline looks like
9697 2 0002 D202 mov.l l2,r2
9698 1 0000 D301 mov.l l1,r3
9701 5 0008 00000000 l1: .long area
9702 6 000c 00000000 l2: .long function
9704 SH5 (compact) uses r1 instead of r3 for the static chain. */
9707 /* Emit RTL insns to initialize the variable parts of a trampoline.
9708 FNADDR is an RTX for the address of the function's pure code.
9709 CXT is an RTX for the static chain value for the function. */
9712 sh_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
9714 rtx tramp_mem = gen_frame_mem (BLKmode, tramp);
9716 if (TARGET_SHMEDIA64)
9721 rtx movi1 = GEN_INT (0xcc000010);
9722 rtx shori1 = GEN_INT (0xc8000010);
9725 /* The following trampoline works within a +- 128 KB range for cxt:
9726 ptb/u cxt,tr1; movi fnaddr >> 48,r0; shori fnaddr >> 32,r0;
9727 shori fnaddr >> 16,r0; shori fnaddr,r0; ptabs/l r0,tr0
9728 gettr tr1,r1; blink tr0,r63 */
9729 /* Address rounding makes it hard to compute the exact bounds of the
9730 offset for this trampoline, but we have a rather generous offset
9731 range, so frame_offset should do fine as an upper bound. */
9732 if (cxt == virtual_stack_vars_rtx && frame_offset < 0x20000)
9734 /* ??? could optimize this trampoline initialization
9735 by writing DImode words with two insns each. */
9736 rtx mask = force_reg (DImode, GEN_INT (0x3fffc00));
9737 rtx insn = gen_rtx_MINUS (DImode, cxt, tramp);
9738 insn = gen_rtx_ASHIFT (DImode, insn, GEN_INT (10-2));
9739 insn = gen_rtx_AND (DImode, insn, mask);
9740 /* Or in ptb/u .,tr1 pattern */
9741 insn = gen_rtx_IOR (DImode, insn, gen_int_mode (0xec000010, SImode));
9742 insn = force_operand (insn, NULL_RTX);
9743 insn = gen_lowpart (SImode, insn);
9744 emit_move_insn (change_address (tramp_mem, SImode, NULL_RTX), insn);
9745 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (38));
9746 insn = gen_rtx_AND (DImode, insn, mask);
9747 insn = force_operand (gen_rtx_IOR (DImode, movi1, insn), NULL_RTX);
9748 insn = gen_lowpart (SImode, insn);
9749 emit_move_insn (adjust_address (tramp_mem, SImode, 4), insn);
9750 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (22));
9751 insn = gen_rtx_AND (DImode, insn, mask);
9752 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
9753 insn = gen_lowpart (SImode, insn);
9754 emit_move_insn (adjust_address (tramp_mem, SImode, 8), insn);
9755 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (6));
9756 insn = gen_rtx_AND (DImode, insn, mask);
9757 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
9758 insn = gen_lowpart (SImode, insn);
9759 emit_move_insn (adjust_address (tramp_mem, SImode, 12), insn);
9760 insn = gen_rtx_ASHIFT (DImode, fnaddr, GEN_INT (10));
9761 insn = gen_rtx_AND (DImode, insn, mask);
9762 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
9763 insn = gen_lowpart (SImode, insn);
9764 emit_move_insn (adjust_address (tramp_mem, SImode, 16), insn);
9765 emit_move_insn (adjust_address (tramp_mem, SImode, 20),
9766 GEN_INT (0x6bf10600));
9767 emit_move_insn (adjust_address (tramp_mem, SImode, 24),
9768 GEN_INT (0x4415fc10));
9769 emit_move_insn (adjust_address (tramp_mem, SImode, 28),
9770 GEN_INT (0x4401fff0));
9771 emit_insn (gen_ic_invalidate_line (tramp));
9774 tramp_templ = gen_rtx_SYMBOL_REF (Pmode,"__GCC_nested_trampoline");
9775 fixed_len = TRAMPOLINE_SIZE - 2 * GET_MODE_SIZE (Pmode);
9777 tramp_templ = gen_datalabel_ref (tramp_templ);
9779 src = gen_const_mem (BLKmode, tramp_templ);
9780 set_mem_align (dst, 256);
9781 set_mem_align (src, 64);
9782 emit_block_move (dst, src, GEN_INT (fixed_len), BLOCK_OP_NORMAL);
9784 emit_move_insn (adjust_address (tramp_mem, Pmode, fixed_len), fnaddr);
9785 emit_move_insn (adjust_address (tramp_mem, Pmode,
9786 fixed_len + GET_MODE_SIZE (Pmode)),
9788 emit_insn (gen_ic_invalidate_line (tramp));
9791 else if (TARGET_SHMEDIA)
9793 /* movi fnaddr >> 16,r1; shori fnaddr,r1; ptabs/l r1,tr0
9794 movi cxt >> 16,r1; shori cxt,r1; blink tr0,r63 */
9795 rtx quad0 = gen_reg_rtx (DImode), cxtload = gen_reg_rtx (DImode);
9796 rtx quad1 = gen_reg_rtx (DImode), quad2 = gen_reg_rtx (DImode);
9797 /* movi 0,r1: 0xcc000010 shori 0,r1: c8000010 concatenated,
9798 rotated 10 right, and higher 16 bit of every 32 selected. */
9800 = force_reg (V2HImode, (simplify_gen_subreg
9801 (V2HImode, GEN_INT (0x4330432), SImode, 0)));
9802 rtx ptabs = force_reg (DImode, GEN_INT (0x6bf10600));
9803 rtx blink = force_reg (DImode, GEN_INT (0x4401fff0));
9805 tramp = force_reg (Pmode, tramp);
9806 fnaddr = force_reg (SImode, fnaddr);
9807 cxt = force_reg (SImode, cxt);
9808 emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, quad0, 0),
9809 gen_rtx_SUBREG (V2HImode, fnaddr, 0),
9811 emit_insn (gen_rotrdi3_mextr (quad0, quad0,
9812 GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
9813 emit_insn (gen_ashldi3_media (quad0, quad0, const2_rtx));
9814 emit_move_insn (change_address (tramp_mem, DImode, NULL_RTX), quad0);
9815 emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, cxtload, 0),
9816 gen_rtx_SUBREG (V2HImode, cxt, 0),
9818 emit_insn (gen_rotrdi3_mextr (cxtload, cxtload,
9819 GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
9820 emit_insn (gen_ashldi3_media (cxtload, cxtload, const2_rtx));
9821 if (TARGET_LITTLE_ENDIAN)
9823 emit_insn (gen_mshflo_l_di (quad1, ptabs, cxtload));
9824 emit_insn (gen_mextr4 (quad2, cxtload, blink));
9828 emit_insn (gen_mextr4 (quad1, cxtload, ptabs));
9829 emit_insn (gen_mshflo_l_di (quad2, blink, cxtload));
9831 emit_move_insn (adjust_address (tramp_mem, DImode, 8), quad1);
9832 emit_move_insn (adjust_address (tramp_mem, DImode, 16), quad2);
9833 emit_insn (gen_ic_invalidate_line (tramp));
9836 else if (TARGET_SHCOMPACT)
9838 emit_insn (gen_initialize_trampoline (tramp, cxt, fnaddr));
9841 emit_move_insn (change_address (tramp_mem, SImode, NULL_RTX),
9842 gen_int_mode (TARGET_LITTLE_ENDIAN ? 0xd301d202 : 0xd202d301,
9844 emit_move_insn (adjust_address (tramp_mem, SImode, 4),
9845 gen_int_mode (TARGET_LITTLE_ENDIAN ? 0x0009422b : 0x422b0009,
9847 emit_move_insn (adjust_address (tramp_mem, SImode, 8), cxt);
9848 emit_move_insn (adjust_address (tramp_mem, SImode, 12), fnaddr);
9851 if (!TARGET_INLINE_IC_INVALIDATE
9852 || (!(TARGET_SH4A_ARCH || TARGET_SH4_300) && TARGET_USERMODE))
9853 emit_library_call (function_symbol (NULL, "__ic_invalidate",
9855 0, VOIDmode, 1, tramp, SImode);
9857 emit_insn (gen_ic_invalidate_line (tramp));
9861 /* FIXME: This is overly conservative. A SHcompact function that
9862 receives arguments ``by reference'' will have them stored in its
9863 own stack frame, so it must not pass pointers or references to
9864 these arguments to other functions by means of sibling calls. */
9865 /* If PIC, we cannot make sibling calls to global functions
9866 because the PLT requires r12 to be live. */
9868 sh_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
9871 && (! TARGET_SHCOMPACT
9872 || crtl->args.info.stack_regs == 0)
9873 && ! sh_cfun_interrupt_handler_p ()
9875 || (decl && ! TREE_PUBLIC (decl))
9876 || (decl && DECL_VISIBILITY (decl) != VISIBILITY_DEFAULT)));
9879 /* Machine specific built-in functions. */
9881 struct builtin_description
9883 const enum insn_code icode;
9884 const char *const name;
9888 /* describe number and signedness of arguments; arg[0] == result
9889 (1: unsigned, 2: signed, 4: don't care, 8: pointer 0: no argument */
9890 /* 9: 64-bit pointer, 10: 32-bit pointer */
9891 static const char signature_args[][4] =
9893 #define SH_BLTIN_V2SI2 0
9895 #define SH_BLTIN_V4HI2 1
9897 #define SH_BLTIN_V2SI3 2
9899 #define SH_BLTIN_V4HI3 3
9901 #define SH_BLTIN_V8QI3 4
9903 #define SH_BLTIN_MAC_HISI 5
9905 #define SH_BLTIN_SH_HI 6
9907 #define SH_BLTIN_SH_SI 7
9909 #define SH_BLTIN_V4HI2V2SI 8
9911 #define SH_BLTIN_V4HI2V8QI 9
9913 #define SH_BLTIN_SISF 10
9915 #define SH_BLTIN_LDUA_L 11
9917 #define SH_BLTIN_LDUA_Q 12
9919 #define SH_BLTIN_STUA_L 13
9921 #define SH_BLTIN_STUA_Q 14
9923 #define SH_BLTIN_LDUA_L64 15
9925 #define SH_BLTIN_LDUA_Q64 16
9927 #define SH_BLTIN_STUA_L64 17
9929 #define SH_BLTIN_STUA_Q64 18
9931 #define SH_BLTIN_NUM_SHARED_SIGNATURES 19
9932 #define SH_BLTIN_2 19
9933 #define SH_BLTIN_SU 19
9935 #define SH_BLTIN_3 20
9936 #define SH_BLTIN_SUS 20
9938 #define SH_BLTIN_PSSV 21
9940 #define SH_BLTIN_XXUU 22
9941 #define SH_BLTIN_UUUU 22
9943 #define SH_BLTIN_PV 23
9946 /* mcmv: operands considered unsigned. */
9947 /* mmulsum_wq, msad_ubq: result considered unsigned long long. */
9948 /* mperm: control value considered unsigned int. */
9949 /* mshalds, mshard, mshards, mshlld, mshlrd: shift count is unsigned int. */
9950 /* mshards_q: returns signed short. */
9951 /* nsb: takes long long arg, returns unsigned char. */
9952 static const struct builtin_description bdesc[] =
9954 { CODE_FOR_absv2si2, "__builtin_absv2si2", SH_BLTIN_V2SI2 },
9955 { CODE_FOR_absv4hi2, "__builtin_absv4hi2", SH_BLTIN_V4HI2 },
9956 { CODE_FOR_addv2si3, "__builtin_addv2si3", SH_BLTIN_V2SI3 },
9957 { CODE_FOR_addv4hi3, "__builtin_addv4hi3", SH_BLTIN_V4HI3 },
9958 { CODE_FOR_ssaddv2si3,"__builtin_ssaddv2si3", SH_BLTIN_V2SI3 },
9959 { CODE_FOR_usaddv8qi3,"__builtin_usaddv8qi3", SH_BLTIN_V8QI3 },
9960 { CODE_FOR_ssaddv4hi3,"__builtin_ssaddv4hi3", SH_BLTIN_V4HI3 },
9961 { CODE_FOR_alloco_i, "__builtin_sh_media_ALLOCO", SH_BLTIN_PV },
9962 { CODE_FOR_negcmpeqv8qi,"__builtin_sh_media_MCMPEQ_B", SH_BLTIN_V8QI3 },
9963 { CODE_FOR_negcmpeqv2si,"__builtin_sh_media_MCMPEQ_L", SH_BLTIN_V2SI3 },
9964 { CODE_FOR_negcmpeqv4hi,"__builtin_sh_media_MCMPEQ_W", SH_BLTIN_V4HI3 },
9965 { CODE_FOR_negcmpgtuv8qi,"__builtin_sh_media_MCMPGT_UB", SH_BLTIN_V8QI3 },
9966 { CODE_FOR_negcmpgtv2si,"__builtin_sh_media_MCMPGT_L", SH_BLTIN_V2SI3 },
9967 { CODE_FOR_negcmpgtv4hi,"__builtin_sh_media_MCMPGT_W", SH_BLTIN_V4HI3 },
9968 { CODE_FOR_mcmv, "__builtin_sh_media_MCMV", SH_BLTIN_UUUU },
9969 { CODE_FOR_mcnvs_lw, "__builtin_sh_media_MCNVS_LW", SH_BLTIN_3 },
9970 { CODE_FOR_mcnvs_wb, "__builtin_sh_media_MCNVS_WB", SH_BLTIN_V4HI2V8QI },
9971 { CODE_FOR_mcnvs_wub, "__builtin_sh_media_MCNVS_WUB", SH_BLTIN_V4HI2V8QI },
9972 { CODE_FOR_mextr1, "__builtin_sh_media_MEXTR1", SH_BLTIN_V8QI3 },
9973 { CODE_FOR_mextr2, "__builtin_sh_media_MEXTR2", SH_BLTIN_V8QI3 },
9974 { CODE_FOR_mextr3, "__builtin_sh_media_MEXTR3", SH_BLTIN_V8QI3 },
9975 { CODE_FOR_mextr4, "__builtin_sh_media_MEXTR4", SH_BLTIN_V8QI3 },
9976 { CODE_FOR_mextr5, "__builtin_sh_media_MEXTR5", SH_BLTIN_V8QI3 },
9977 { CODE_FOR_mextr6, "__builtin_sh_media_MEXTR6", SH_BLTIN_V8QI3 },
9978 { CODE_FOR_mextr7, "__builtin_sh_media_MEXTR7", SH_BLTIN_V8QI3 },
9979 { CODE_FOR_mmacfx_wl, "__builtin_sh_media_MMACFX_WL", SH_BLTIN_MAC_HISI },
9980 { CODE_FOR_mmacnfx_wl,"__builtin_sh_media_MMACNFX_WL", SH_BLTIN_MAC_HISI },
9981 { CODE_FOR_mulv2si3, "__builtin_mulv2si3", SH_BLTIN_V2SI3, },
9982 { CODE_FOR_mulv4hi3, "__builtin_mulv4hi3", SH_BLTIN_V4HI3 },
9983 { CODE_FOR_mmulfx_l, "__builtin_sh_media_MMULFX_L", SH_BLTIN_V2SI3 },
9984 { CODE_FOR_mmulfx_w, "__builtin_sh_media_MMULFX_W", SH_BLTIN_V4HI3 },
9985 { CODE_FOR_mmulfxrp_w,"__builtin_sh_media_MMULFXRP_W", SH_BLTIN_V4HI3 },
9986 { CODE_FOR_mmulhi_wl, "__builtin_sh_media_MMULHI_WL", SH_BLTIN_V4HI2V2SI },
9987 { CODE_FOR_mmullo_wl, "__builtin_sh_media_MMULLO_WL", SH_BLTIN_V4HI2V2SI },
9988 { CODE_FOR_mmulsum_wq,"__builtin_sh_media_MMULSUM_WQ", SH_BLTIN_XXUU },
9989 { CODE_FOR_mperm_w, "__builtin_sh_media_MPERM_W", SH_BLTIN_SH_HI },
9990 { CODE_FOR_msad_ubq, "__builtin_sh_media_MSAD_UBQ", SH_BLTIN_XXUU },
9991 { CODE_FOR_mshalds_l, "__builtin_sh_media_MSHALDS_L", SH_BLTIN_SH_SI },
9992 { CODE_FOR_mshalds_w, "__builtin_sh_media_MSHALDS_W", SH_BLTIN_SH_HI },
9993 { CODE_FOR_ashrv2si3, "__builtin_ashrv2si3", SH_BLTIN_SH_SI },
9994 { CODE_FOR_ashrv4hi3, "__builtin_ashrv4hi3", SH_BLTIN_SH_HI },
9995 { CODE_FOR_mshards_q, "__builtin_sh_media_MSHARDS_Q", SH_BLTIN_SUS },
9996 { CODE_FOR_mshfhi_b, "__builtin_sh_media_MSHFHI_B", SH_BLTIN_V8QI3 },
9997 { CODE_FOR_mshfhi_l, "__builtin_sh_media_MSHFHI_L", SH_BLTIN_V2SI3 },
9998 { CODE_FOR_mshfhi_w, "__builtin_sh_media_MSHFHI_W", SH_BLTIN_V4HI3 },
9999 { CODE_FOR_mshflo_b, "__builtin_sh_media_MSHFLO_B", SH_BLTIN_V8QI3 },
10000 { CODE_FOR_mshflo_l, "__builtin_sh_media_MSHFLO_L", SH_BLTIN_V2SI3 },
10001 { CODE_FOR_mshflo_w, "__builtin_sh_media_MSHFLO_W", SH_BLTIN_V4HI3 },
10002 { CODE_FOR_ashlv2si3, "__builtin_ashlv2si3", SH_BLTIN_SH_SI },
10003 { CODE_FOR_ashlv4hi3, "__builtin_ashlv4hi3", SH_BLTIN_SH_HI },
10004 { CODE_FOR_lshrv2si3, "__builtin_lshrv2si3", SH_BLTIN_SH_SI },
10005 { CODE_FOR_lshrv4hi3, "__builtin_lshrv4hi3", SH_BLTIN_SH_HI },
10006 { CODE_FOR_subv2si3, "__builtin_subv2si3", SH_BLTIN_V2SI3 },
10007 { CODE_FOR_subv4hi3, "__builtin_subv4hi3", SH_BLTIN_V4HI3 },
10008 { CODE_FOR_sssubv2si3,"__builtin_sssubv2si3", SH_BLTIN_V2SI3 },
10009 { CODE_FOR_ussubv8qi3,"__builtin_ussubv8qi3", SH_BLTIN_V8QI3 },
10010 { CODE_FOR_sssubv4hi3,"__builtin_sssubv4hi3", SH_BLTIN_V4HI3 },
10011 { CODE_FOR_fcosa_s, "__builtin_sh_media_FCOSA_S", SH_BLTIN_SISF },
10012 { CODE_FOR_fsina_s, "__builtin_sh_media_FSINA_S", SH_BLTIN_SISF },
10013 { CODE_FOR_fipr, "__builtin_sh_media_FIPR_S", SH_BLTIN_3 },
10014 { CODE_FOR_ftrv, "__builtin_sh_media_FTRV_S", SH_BLTIN_3 },
10015 { CODE_FOR_mac_media, "__builtin_sh_media_FMAC_S", SH_BLTIN_3 },
10016 { CODE_FOR_sqrtdf2, "__builtin_sh_media_FSQRT_D", SH_BLTIN_2 },
10017 { CODE_FOR_sqrtsf2, "__builtin_sh_media_FSQRT_S", SH_BLTIN_2 },
10018 { CODE_FOR_fsrra_s, "__builtin_sh_media_FSRRA_S", SH_BLTIN_2 },
10019 { CODE_FOR_ldhi_l, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L },
10020 { CODE_FOR_ldhi_q, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q },
10021 { CODE_FOR_ldlo_l, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L },
10022 { CODE_FOR_ldlo_q, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q },
10023 { CODE_FOR_sthi_l, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L },
10024 { CODE_FOR_sthi_q, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q },
10025 { CODE_FOR_stlo_l, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L },
10026 { CODE_FOR_stlo_q, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q },
10027 { CODE_FOR_ldhi_l64, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L64 },
10028 { CODE_FOR_ldhi_q64, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q64 },
10029 { CODE_FOR_ldlo_l64, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L64 },
10030 { CODE_FOR_ldlo_q64, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q64 },
10031 { CODE_FOR_sthi_l64, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L64 },
10032 { CODE_FOR_sthi_q64, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q64 },
10033 { CODE_FOR_stlo_l64, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L64 },
10034 { CODE_FOR_stlo_q64, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q64 },
10035 { CODE_FOR_nsb, "__builtin_sh_media_NSB", SH_BLTIN_SU },
10036 { CODE_FOR_byterev, "__builtin_sh_media_BYTEREV", SH_BLTIN_2 },
10037 { CODE_FOR_prefetch, "__builtin_sh_media_PREFO", SH_BLTIN_PSSV },
10041 sh_media_init_builtins (void)
10043 tree shared[SH_BLTIN_NUM_SHARED_SIGNATURES];
10044 const struct builtin_description *d;
10046 memset (shared, 0, sizeof shared);
10047 for (d = bdesc; d - bdesc < (int) ARRAY_SIZE (bdesc); d++)
10049 tree type, arg_type = 0;
10050 int signature = d->signature;
10053 if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES && shared[signature])
10054 type = shared[signature];
10057 int has_result = signature_args[signature][0] != 0;
10059 if ((signature_args[signature][1] & 8)
10060 && (((signature_args[signature][1] & 1) && TARGET_SHMEDIA32)
10061 || ((signature_args[signature][1] & 2) && TARGET_SHMEDIA64)))
10063 if (! TARGET_FPU_ANY
10064 && FLOAT_MODE_P (insn_data[d->icode].operand[0].mode))
10066 type = void_list_node;
10069 int arg = signature_args[signature][i];
10070 int opno = i - 1 + has_result;
10073 arg_type = ptr_type_node;
10075 arg_type = (*lang_hooks.types.type_for_mode)
10076 (insn_data[d->icode].operand[opno].mode,
10081 arg_type = void_type_node;
10084 type = tree_cons (NULL_TREE, arg_type, type);
10086 type = build_function_type (arg_type, type);
10087 if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES)
10088 shared[signature] = type;
10090 add_builtin_function (d->name, type, d - bdesc, BUILT_IN_MD,
10095 /* Implements target hook vector_mode_supported_p. */
10097 sh_vector_mode_supported_p (enum machine_mode mode)
10100 && ((mode == V2SFmode)
10101 || (mode == V4SFmode)
10102 || (mode == V16SFmode)))
10105 else if (TARGET_SHMEDIA
10106 && ((mode == V8QImode)
10107 || (mode == V2HImode)
10108 || (mode == V4HImode)
10109 || (mode == V2SImode)))
10115 /* Implements target hook dwarf_calling_convention. Return an enum
10116 of dwarf_calling_convention. */
10118 sh_dwarf_calling_convention (const_tree func)
10120 if (sh_attr_renesas_p (func))
10121 return DW_CC_GNU_renesas_sh;
10123 return DW_CC_normal;
10127 sh_init_builtins (void)
10129 if (TARGET_SHMEDIA)
10130 sh_media_init_builtins ();
10133 /* Expand an expression EXP that calls a built-in function,
10134 with result going to TARGET if that's convenient
10135 (and in mode MODE if that's convenient).
10136 SUBTARGET may be used as the target for computing one of EXP's operands.
10137 IGNORE is nonzero if the value is to be ignored. */
10140 sh_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10141 enum machine_mode mode ATTRIBUTE_UNUSED, int ignore)
10143 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10144 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10145 const struct builtin_description *d = &bdesc[fcode];
10146 enum insn_code icode = d->icode;
10147 int signature = d->signature;
10148 enum machine_mode tmode = VOIDmode;
10153 if (signature_args[signature][0])
10158 tmode = insn_data[icode].operand[0].mode;
10160 || GET_MODE (target) != tmode
10161 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10162 target = gen_reg_rtx (tmode);
10163 op[nop++] = target;
10168 for (i = 1; i <= 3; i++, nop++)
10171 enum machine_mode opmode, argmode;
10174 if (! signature_args[signature][i])
10176 arg = CALL_EXPR_ARG (exp, i - 1);
10177 if (arg == error_mark_node)
10179 if (signature_args[signature][i] & 8)
10182 optype = ptr_type_node;
10186 opmode = insn_data[icode].operand[nop].mode;
10187 optype = (*lang_hooks.types.type_for_mode) (opmode, 0);
10189 argmode = TYPE_MODE (TREE_TYPE (arg));
10190 if (argmode != opmode)
10191 arg = build1 (NOP_EXPR, optype, arg);
10192 op[nop] = expand_expr (arg, NULL_RTX, opmode, 0);
10193 if (! (*insn_data[icode].operand[nop].predicate) (op[nop], opmode))
10194 op[nop] = copy_to_mode_reg (opmode, op[nop]);
10200 pat = (*insn_data[d->icode].genfun) (op[0]);
10203 pat = (*insn_data[d->icode].genfun) (op[0], op[1]);
10206 pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2]);
10209 pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2], op[3]);
10212 gcc_unreachable ();
10221 sh_expand_unop_v2sf (enum rtx_code code, rtx op0, rtx op1)
10223 rtx sel0 = const0_rtx;
10224 rtx sel1 = const1_rtx;
10225 rtx (*fn) (rtx, rtx, rtx, rtx, rtx) = gen_unary_sf_op;
10226 rtx op = gen_rtx_fmt_e (code, SFmode, op1);
10228 emit_insn ((*fn) (op0, op1, op, sel0, sel0));
10229 emit_insn ((*fn) (op0, op1, op, sel1, sel1));
10233 sh_expand_binop_v2sf (enum rtx_code code, rtx op0, rtx op1, rtx op2)
10235 rtx op = gen_rtx_fmt_ee (code, SFmode, op1, op2);
10237 emit_insn (gen_binary_sf_op0 (op0, op1, op2, op));
10238 emit_insn (gen_binary_sf_op1 (op0, op1, op2, op));
10241 /* Return the class of registers for which a mode change from FROM to TO
10244 sh_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
10245 enum reg_class class)
10247 /* We want to enable the use of SUBREGs as a means to
10248 VEC_SELECT a single element of a vector. */
10249 if (to == SFmode && VECTOR_MODE_P (from) && GET_MODE_INNER (from) == SFmode)
10250 return (reg_classes_intersect_p (GENERAL_REGS, class));
10252 if (GET_MODE_SIZE (from) != GET_MODE_SIZE (to))
10254 if (TARGET_LITTLE_ENDIAN)
10256 if (GET_MODE_SIZE (to) < 8 || GET_MODE_SIZE (from) < 8)
10257 return reg_classes_intersect_p (DF_REGS, class);
10261 if (GET_MODE_SIZE (from) < 8)
10262 return reg_classes_intersect_p (DF_HI_REGS, class);
10269 /* If ADDRESS refers to a CODE_LABEL, add NUSES to the number of times
10270 that label is used. */
10273 sh_mark_label (rtx address, int nuses)
10275 if (GOTOFF_P (address))
10277 /* Extract the label or symbol. */
10278 address = XEXP (address, 0);
10279 if (GET_CODE (address) == PLUS)
10280 address = XEXP (address, 0);
10281 address = XVECEXP (address, 0, 0);
10283 if (GET_CODE (address) == LABEL_REF
10284 && GET_CODE (XEXP (address, 0)) == CODE_LABEL)
10285 LABEL_NUSES (XEXP (address, 0)) += nuses;
10288 /* Compute extra cost of moving data between one register class
10291 /* If SECONDARY*_RELOAD_CLASS says something about the src/dst pair, regclass
10292 uses this information. Hence, the general register <-> floating point
10293 register information here is not used for SFmode. */
10296 sh_register_move_cost (enum machine_mode mode,
10297 enum reg_class srcclass, enum reg_class dstclass)
10299 if (dstclass == T_REGS || dstclass == PR_REGS)
10302 if (dstclass == MAC_REGS && srcclass == MAC_REGS)
10305 if (mode == SImode && ! TARGET_SHMEDIA && TARGET_FMOVD
10306 && REGCLASS_HAS_FP_REG (srcclass)
10307 && REGCLASS_HAS_FP_REG (dstclass))
10310 if (REGCLASS_HAS_FP_REG (dstclass) && srcclass == T_REGS)
10311 return ((TARGET_HARD_SH4 && !optimize_size) ? 10 : 7);
10313 if ((REGCLASS_HAS_FP_REG (dstclass) && srcclass == MAC_REGS)
10314 || (dstclass == MAC_REGS && REGCLASS_HAS_FP_REG (srcclass)))
10317 if ((REGCLASS_HAS_FP_REG (dstclass)
10318 && REGCLASS_HAS_GENERAL_REG (srcclass))
10319 || (REGCLASS_HAS_GENERAL_REG (dstclass)
10320 && REGCLASS_HAS_FP_REG (srcclass)))
10321 return ((TARGET_SHMEDIA ? 4 : TARGET_FMOVD ? 8 : 12)
10322 * ((GET_MODE_SIZE (mode) + 7) / 8U));
10324 if ((dstclass == FPUL_REGS
10325 && REGCLASS_HAS_GENERAL_REG (srcclass))
10326 || (srcclass == FPUL_REGS
10327 && REGCLASS_HAS_GENERAL_REG (dstclass)))
10330 if ((dstclass == FPUL_REGS
10331 && (srcclass == PR_REGS || srcclass == MAC_REGS || srcclass == T_REGS))
10332 || (srcclass == FPUL_REGS
10333 && (dstclass == PR_REGS || dstclass == MAC_REGS)))
10336 if ((srcclass == TARGET_REGS && ! REGCLASS_HAS_GENERAL_REG (dstclass))
10337 || ((dstclass) == TARGET_REGS && ! REGCLASS_HAS_GENERAL_REG (srcclass)))
10340 /* ??? ptabs faults on (value & 0x3) == 0x3 */
10342 && ((srcclass) == TARGET_REGS || (srcclass) == SIBCALL_REGS))
10344 if (sh_gettrcost >= 0)
10345 return sh_gettrcost;
10346 else if (!TARGET_PT_FIXED)
10350 if ((srcclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (dstclass))
10351 || (dstclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (srcclass)))
10356 && ! REGCLASS_HAS_GENERAL_REG (srcclass)
10357 && ! REGCLASS_HAS_GENERAL_REG (dstclass)))
10358 return 2 * ((GET_MODE_SIZE (mode) + 7) / 8U);
10360 return 2 * ((GET_MODE_SIZE (mode) + 3) / 4U);
10363 static rtx emit_load_ptr (rtx, rtx);
10366 emit_load_ptr (rtx reg, rtx addr)
10368 rtx mem = gen_const_mem (ptr_mode, addr);
10370 if (Pmode != ptr_mode)
10371 mem = gen_rtx_SIGN_EXTEND (Pmode, mem);
10372 return emit_move_insn (reg, mem);
10376 sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
10377 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10380 CUMULATIVE_ARGS cum;
10381 int structure_value_byref = 0;
10382 rtx this, this_value, sibcall, insns, funexp;
10383 tree funtype = TREE_TYPE (function);
10384 int simple_add = CONST_OK_FOR_ADD (delta);
10386 rtx scratch0, scratch1, scratch2;
10389 reload_completed = 1;
10390 epilogue_completed = 1;
10391 current_function_uses_only_leaf_regs = 1;
10393 emit_note (NOTE_INSN_PROLOGUE_END);
10395 /* Find the "this" pointer. We have such a wide range of ABIs for the
10396 SH that it's best to do this completely machine independently.
10397 "this" is passed as first argument, unless a structure return pointer
10398 comes first, in which case "this" comes second. */
10399 INIT_CUMULATIVE_ARGS (cum, funtype, NULL_RTX, 0, 1);
10400 #ifndef PCC_STATIC_STRUCT_RETURN
10401 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
10402 structure_value_byref = 1;
10403 #endif /* not PCC_STATIC_STRUCT_RETURN */
10404 if (structure_value_byref && sh_struct_value_rtx (function, 0) == 0)
10406 tree ptype = build_pointer_type (TREE_TYPE (funtype));
10408 FUNCTION_ARG_ADVANCE (cum, Pmode, ptype, 1);
10410 this = FUNCTION_ARG (cum, Pmode, ptr_type_node, 1);
10412 /* For SHcompact, we only have r0 for a scratch register: r1 is the
10413 static chain pointer (even if you can't have nested virtual functions
10414 right now, someone might implement them sometime), and the rest of the
10415 registers are used for argument passing, are callee-saved, or reserved. */
10416 /* We need to check call_used_regs / fixed_regs in case -fcall_saved-reg /
10417 -ffixed-reg has been used. */
10418 if (! call_used_regs[0] || fixed_regs[0])
10419 error ("r0 needs to be available as a call-clobbered register");
10420 scratch0 = scratch1 = scratch2 = gen_rtx_REG (Pmode, 0);
10423 if (call_used_regs[1] && ! fixed_regs[1])
10424 scratch1 = gen_rtx_REG (ptr_mode, 1);
10425 /* N.B., if not TARGET_HITACHI, register 2 is used to pass the pointer
10426 pointing where to return struct values. */
10427 if (call_used_regs[3] && ! fixed_regs[3])
10428 scratch2 = gen_rtx_REG (Pmode, 3);
10430 else if (TARGET_SHMEDIA)
10432 for (i = FIRST_GENERAL_REG; i <= LAST_GENERAL_REG; i++)
10433 if (i != REGNO (scratch0) &&
10434 call_used_regs[i] && ! fixed_regs[i] && ! FUNCTION_ARG_REGNO_P (i))
10436 scratch1 = gen_rtx_REG (ptr_mode, i);
10439 if (scratch1 == scratch0)
10440 error ("Need a second call-clobbered general purpose register");
10441 for (i = FIRST_TARGET_REG; i <= LAST_TARGET_REG; i++)
10442 if (call_used_regs[i] && ! fixed_regs[i])
10444 scratch2 = gen_rtx_REG (Pmode, i);
10447 if (scratch2 == scratch0)
10448 error ("Need a call-clobbered target register");
10451 this_value = plus_constant (this, delta);
10453 && (simple_add || scratch0 != scratch1)
10454 && strict_memory_address_p (ptr_mode, this_value))
10456 emit_load_ptr (scratch0, this_value);
10461 ; /* Do nothing. */
10462 else if (simple_add)
10463 emit_move_insn (this, this_value);
10466 emit_move_insn (scratch1, GEN_INT (delta));
10467 emit_insn (gen_add2_insn (this, scratch1));
10475 emit_load_ptr (scratch0, this);
10477 offset_addr = plus_constant (scratch0, vcall_offset);
10478 if (strict_memory_address_p (ptr_mode, offset_addr))
10479 ; /* Do nothing. */
10480 else if (! TARGET_SH5 && scratch0 != scratch1)
10482 /* scratch0 != scratch1, and we have indexed loads. Get better
10483 schedule by loading the offset into r1 and using an indexed
10484 load - then the load of r1 can issue before the load from
10485 (this + delta) finishes. */
10486 emit_move_insn (scratch1, GEN_INT (vcall_offset));
10487 offset_addr = gen_rtx_PLUS (Pmode, scratch0, scratch1);
10489 else if (CONST_OK_FOR_ADD (vcall_offset))
10491 emit_insn (gen_add2_insn (scratch0, GEN_INT (vcall_offset)));
10492 offset_addr = scratch0;
10494 else if (scratch0 != scratch1)
10496 emit_move_insn (scratch1, GEN_INT (vcall_offset));
10497 emit_insn (gen_add2_insn (scratch0, scratch1));
10498 offset_addr = scratch0;
10501 gcc_unreachable (); /* FIXME */
10502 emit_load_ptr (scratch0, offset_addr);
10504 if (Pmode != ptr_mode)
10505 scratch0 = gen_rtx_TRUNCATE (ptr_mode, scratch0);
10506 emit_insn (gen_add2_insn (this, scratch0));
10509 /* Generate a tail call to the target function. */
10510 if (! TREE_USED (function))
10512 assemble_external (function);
10513 TREE_USED (function) = 1;
10515 funexp = XEXP (DECL_RTL (function), 0);
10516 /* If the function is overridden, so is the thunk, hence we don't
10517 need GOT addressing even if this is a public symbol. */
10519 if (TARGET_SH1 && ! flag_weak)
10520 sibcall = gen_sibcalli_thunk (funexp, const0_rtx);
10523 if (TARGET_SH2 && flag_pic)
10525 sibcall = gen_sibcall_pcrel (funexp, const0_rtx);
10526 XEXP (XVECEXP (sibcall, 0, 2), 0) = scratch2;
10530 if (TARGET_SHMEDIA && flag_pic)
10532 funexp = gen_sym2PIC (funexp);
10533 PUT_MODE (funexp, Pmode);
10535 emit_move_insn (scratch2, funexp);
10536 funexp = gen_rtx_MEM (FUNCTION_MODE, scratch2);
10537 sibcall = gen_sibcall (funexp, const0_rtx, NULL_RTX);
10539 sibcall = emit_call_insn (sibcall);
10540 SIBLING_CALL_P (sibcall) = 1;
10541 use_reg (&CALL_INSN_FUNCTION_USAGE (sibcall), this);
10544 /* Run just enough of rest_of_compilation to do scheduling and get
10545 the insns emitted. Note that use_thunk calls
10546 assemble_start_function and assemble_end_function. */
10548 insn_locators_alloc ();
10549 insns = get_insns ();
10554 /* Initialize the bitmap obstacks. */
10555 bitmap_obstack_initialize (NULL);
10556 bitmap_obstack_initialize (®_obstack);
10559 rtl_register_cfg_hooks ();
10560 init_rtl_bb_info (ENTRY_BLOCK_PTR);
10561 init_rtl_bb_info (EXIT_BLOCK_PTR);
10562 ENTRY_BLOCK_PTR->flags |= BB_RTL;
10563 EXIT_BLOCK_PTR->flags |= BB_RTL;
10564 find_basic_blocks (insns);
10566 if (flag_schedule_insns_after_reload)
10568 life_analysis (PROP_FINAL);
10570 split_all_insns (1);
10574 /* We must split jmp insn in PIC case. */
10576 split_all_insns_noflow ();
10583 split_all_insns_noflow ();
10589 if (optimize > 0 && flag_delayed_branch)
10590 dbr_schedule (insns);
10592 shorten_branches (insns);
10593 final_start_function (insns, file, 1);
10594 final (insns, file, 1);
10595 final_end_function ();
10596 free_after_compilation (cfun);
10598 reload_completed = 0;
10599 epilogue_completed = 0;
10603 function_symbol (rtx target, const char *name, enum sh_function_kind kind)
10607 /* If this is not an ordinary function, the name usually comes from a
10608 string literal or an sprintf buffer. Make sure we use the same
10609 string consistently, so that cse will be able to unify address loads. */
10610 if (kind != FUNCTION_ORDINARY)
10611 name = IDENTIFIER_POINTER (get_identifier (name));
10612 sym = gen_rtx_SYMBOL_REF (Pmode, name);
10613 SYMBOL_REF_FLAGS (sym) = SYMBOL_FLAG_FUNCTION;
10617 case FUNCTION_ORDINARY:
10621 rtx reg = target ? target : gen_reg_rtx (Pmode);
10623 emit_insn (gen_symGOT2reg (reg, sym));
10629 /* ??? To allow cse to work, we use GOTOFF relocations.
10630 we could add combiner patterns to transform this into
10631 straight pc-relative calls with sym2PIC / bsrf when
10632 label load and function call are still 1:1 and in the
10633 same basic block during combine. */
10634 rtx reg = target ? target : gen_reg_rtx (Pmode);
10636 emit_insn (gen_symGOTOFF2reg (reg, sym));
10641 if (target && sym != target)
10643 emit_move_insn (target, sym);
10649 /* Find the number of a general purpose register in S. */
10651 scavenge_reg (HARD_REG_SET *s)
10654 for (r = FIRST_GENERAL_REG; r <= LAST_GENERAL_REG; r++)
10655 if (TEST_HARD_REG_BIT (*s, r))
10661 sh_get_pr_initial_val (void)
10665 /* ??? Unfortunately, get_hard_reg_initial_val doesn't always work for the
10666 PR register on SHcompact, because it might be clobbered by the prologue.
10667 We check first if that is known to be the case. */
10668 if (TARGET_SHCOMPACT
10669 && ((crtl->args.info.call_cookie
10670 & ~ CALL_COOKIE_RET_TRAMP (1))
10671 || crtl->saves_all_registers))
10672 return gen_frame_mem (SImode, return_address_pointer_rtx);
10674 /* If we haven't finished rtl generation, there might be a nonlocal label
10675 that we haven't seen yet.
10676 ??? get_hard_reg_initial_val fails if it is called after register
10677 allocation has started, unless it has been called before for the
10678 same register. And even then, we end in trouble if we didn't use
10679 the register in the same basic block before. So call
10680 get_hard_reg_initial_val now and wrap it in an unspec if we might
10681 need to replace it. */
10682 /* ??? We also must do this for TARGET_SH1 in general, because otherwise
10683 combine can put the pseudo returned by get_hard_reg_initial_val into
10684 instructions that need a general purpose registers, which will fail to
10685 be recognized when the pseudo becomes allocated to PR. */
10687 = get_hard_reg_initial_val (Pmode, TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
10689 return gen_rtx_UNSPEC (SImode, gen_rtvec (1, val), UNSPEC_RA);
10694 sh_expand_t_scc (enum rtx_code code, rtx target)
10696 rtx result = target;
10699 if (GET_CODE (sh_compare_op0) != REG || REGNO (sh_compare_op0) != T_REG
10700 || GET_CODE (sh_compare_op1) != CONST_INT)
10702 if (GET_CODE (result) != REG)
10703 result = gen_reg_rtx (SImode);
10704 val = INTVAL (sh_compare_op1);
10705 if ((code == EQ && val == 1) || (code == NE && val == 0))
10706 emit_insn (gen_movt (result));
10707 else if (TARGET_SH2A && ((code == EQ && val == 0)
10708 || (code == NE && val == 1)))
10709 emit_insn (gen_movrt (result));
10710 else if ((code == EQ && val == 0) || (code == NE && val == 1))
10712 emit_clobber (result);
10713 emit_insn (gen_subc (result, result, result));
10714 emit_insn (gen_addsi3 (result, result, const1_rtx));
10716 else if (code == EQ || code == NE)
10717 emit_insn (gen_move_insn (result, GEN_INT (code == NE)));
10720 if (result != target)
10721 emit_move_insn (target, result);
10725 /* INSN is an sfunc; return the rtx that describes the address used. */
10727 extract_sfunc_addr (rtx insn)
10729 rtx pattern, part = NULL_RTX;
10732 pattern = PATTERN (insn);
10733 len = XVECLEN (pattern, 0);
10734 for (i = 0; i < len; i++)
10736 part = XVECEXP (pattern, 0, i);
10737 if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == Pmode
10738 && GENERAL_REGISTER_P (true_regnum (XEXP (part, 0))))
10739 return XEXP (part, 0);
10741 gcc_assert (GET_CODE (XVECEXP (pattern, 0, 0)) == UNSPEC_VOLATILE);
10742 return XVECEXP (XVECEXP (pattern, 0, 0), 0, 1);
10745 /* Verify that the register in use_sfunc_addr still agrees with the address
10746 used in the sfunc. This prevents fill_slots_from_thread from changing
10748 INSN is the use_sfunc_addr instruction, and REG is the register it
10751 check_use_sfunc_addr (rtx insn, rtx reg)
10753 /* Search for the sfunc. It should really come right after INSN. */
10754 while ((insn = NEXT_INSN (insn)))
10756 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
10758 if (! INSN_P (insn))
10761 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
10762 insn = XVECEXP (PATTERN (insn), 0, 0);
10763 if (GET_CODE (PATTERN (insn)) != PARALLEL
10764 || get_attr_type (insn) != TYPE_SFUNC)
10766 return rtx_equal_p (extract_sfunc_addr (insn), reg);
10768 gcc_unreachable ();
10771 /* This function returns a constant rtx that represents pi / 2**15 in
10772 SFmode. it's used to scale SFmode angles, in radians, to a
10773 fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
10774 maps to 0x10000). */
10776 static GTY(()) rtx sh_fsca_sf2int_rtx;
10779 sh_fsca_sf2int (void)
10781 if (! sh_fsca_sf2int_rtx)
10783 REAL_VALUE_TYPE rv;
10785 real_from_string (&rv, "10430.378350470453");
10786 sh_fsca_sf2int_rtx = const_double_from_real_value (rv, SFmode);
10789 return sh_fsca_sf2int_rtx;
10792 /* This function returns a constant rtx that represents pi / 2**15 in
10793 DFmode. it's used to scale DFmode angles, in radians, to a
10794 fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
10795 maps to 0x10000). */
10797 static GTY(()) rtx sh_fsca_df2int_rtx;
10800 sh_fsca_df2int (void)
10802 if (! sh_fsca_df2int_rtx)
10804 REAL_VALUE_TYPE rv;
10806 real_from_string (&rv, "10430.378350470453");
10807 sh_fsca_df2int_rtx = const_double_from_real_value (rv, DFmode);
10810 return sh_fsca_df2int_rtx;
10813 /* This function returns a constant rtx that represents 2**15 / pi in
10814 SFmode. it's used to scale a fixed-point signed 16.16-bit fraction
10815 of a full circle back to a SFmode value, i.e., 0x10000 maps to
10818 static GTY(()) rtx sh_fsca_int2sf_rtx;
10821 sh_fsca_int2sf (void)
10823 if (! sh_fsca_int2sf_rtx)
10825 REAL_VALUE_TYPE rv;
10827 real_from_string (&rv, "9.587379924285257e-5");
10828 sh_fsca_int2sf_rtx = const_double_from_real_value (rv, SFmode);
10831 return sh_fsca_int2sf_rtx;
10834 /* Initialize the CUMULATIVE_ARGS structure. */
10837 sh_init_cumulative_args (CUMULATIVE_ARGS * pcum,
10839 rtx libname ATTRIBUTE_UNUSED,
10841 signed int n_named_args,
10842 enum machine_mode mode)
10844 pcum->arg_count [(int) SH_ARG_FLOAT] = 0;
10845 pcum->free_single_fp_reg = 0;
10846 pcum->stack_regs = 0;
10847 pcum->byref_regs = 0;
10849 pcum->outgoing = (n_named_args == -1) ? 0 : 1;
10851 /* XXX - Should we check TARGET_HITACHI here ??? */
10852 pcum->renesas_abi = sh_attr_renesas_p (fntype) ? 1 : 0;
10856 pcum->force_mem = ((TARGET_HITACHI || pcum->renesas_abi)
10857 && aggregate_value_p (TREE_TYPE (fntype), fndecl));
10858 pcum->prototype_p = TYPE_ARG_TYPES (fntype) ? TRUE : FALSE;
10859 pcum->arg_count [(int) SH_ARG_INT]
10860 = TARGET_SH5 && aggregate_value_p (TREE_TYPE (fntype), fndecl);
10863 = CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
10864 && pcum->arg_count [(int) SH_ARG_INT] == 0
10865 && (TYPE_MODE (TREE_TYPE (fntype)) == BLKmode
10866 ? int_size_in_bytes (TREE_TYPE (fntype))
10867 : GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (fntype)))) > 4
10868 && (BASE_RETURN_VALUE_REG (TYPE_MODE (TREE_TYPE (fntype)))
10869 == FIRST_RET_REG));
10873 pcum->arg_count [(int) SH_ARG_INT] = 0;
10874 pcum->prototype_p = FALSE;
10875 if (mode != VOIDmode)
10877 pcum->call_cookie =
10878 CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
10879 && GET_MODE_SIZE (mode) > 4
10880 && BASE_RETURN_VALUE_REG (mode) == FIRST_RET_REG);
10882 /* If the default ABI is the Renesas ABI then all library
10883 calls must assume that the library will be using the
10884 Renesas ABI. So if the function would return its result
10885 in memory then we must force the address of this memory
10886 block onto the stack. Ideally we would like to call
10887 targetm.calls.return_in_memory() here but we do not have
10888 the TYPE or the FNDECL available so we synthesize the
10889 contents of that function as best we can. */
10891 (TARGET_DEFAULT & MASK_HITACHI)
10892 && (mode == BLKmode
10893 || (GET_MODE_SIZE (mode) > 4
10894 && !(mode == DFmode
10895 && TARGET_FPU_DOUBLE)));
10899 pcum->call_cookie = 0;
10900 pcum->force_mem = FALSE;
10905 /* Replace any occurrence of FROM(n) in X with TO(n). The function does
10906 not enter into CONST_DOUBLE for the replace.
10908 Note that copying is not done so X must not be shared unless all copies
10909 are to be modified.
10911 This is like replace_rtx, except that we operate on N_REPLACEMENTS
10912 replacements simultaneously - FROM(n) is replacements[n*2] and to(n) is
10913 replacements[n*2+1] - and that we take mode changes into account.
10915 If a replacement is ambiguous, return NULL_RTX.
10917 If MODIFY is zero, don't modify any rtl in place,
10918 just return zero or nonzero for failure / success. */
10921 replace_n_hard_rtx (rtx x, rtx *replacements, int n_replacements, int modify)
10926 /* The following prevents loops occurrence when we change MEM in
10927 CONST_DOUBLE onto the same CONST_DOUBLE. */
10928 if (x != 0 && GET_CODE (x) == CONST_DOUBLE)
10931 for (i = n_replacements - 1; i >= 0 ; i--)
10932 if (x == replacements[i*2] && GET_MODE (x) == GET_MODE (replacements[i*2+1]))
10933 return replacements[i*2+1];
10935 /* Allow this function to make replacements in EXPR_LISTs. */
10939 if (GET_CODE (x) == SUBREG)
10941 rtx new = replace_n_hard_rtx (SUBREG_REG (x), replacements,
10942 n_replacements, modify);
10944 if (GET_CODE (new) == CONST_INT)
10946 x = simplify_subreg (GET_MODE (x), new,
10947 GET_MODE (SUBREG_REG (x)),
10953 SUBREG_REG (x) = new;
10957 else if (GET_CODE (x) == REG)
10959 unsigned regno = REGNO (x);
10960 unsigned nregs = (regno < FIRST_PSEUDO_REGISTER
10961 ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
10962 rtx result = NULL_RTX;
10964 for (i = n_replacements - 1; i >= 0; i--)
10966 rtx from = replacements[i*2];
10967 rtx to = replacements[i*2+1];
10968 unsigned from_regno, from_nregs, to_regno, new_regno;
10970 if (GET_CODE (from) != REG)
10972 from_regno = REGNO (from);
10973 from_nregs = (from_regno < FIRST_PSEUDO_REGISTER
10974 ? HARD_REGNO_NREGS (from_regno, GET_MODE (from)) : 1);
10975 if (regno < from_regno + from_nregs && regno + nregs > from_regno)
10977 if (regno < from_regno
10978 || regno + nregs > from_regno + nregs
10979 || GET_CODE (to) != REG
10982 to_regno = REGNO (to);
10983 if (to_regno < FIRST_PSEUDO_REGISTER)
10985 new_regno = regno + to_regno - from_regno;
10986 if ((unsigned) HARD_REGNO_NREGS (new_regno, GET_MODE (x))
10989 result = gen_rtx_REG (GET_MODE (x), new_regno);
10991 else if (GET_MODE (x) <= GET_MODE (to))
10992 result = gen_lowpart_common (GET_MODE (x), to);
10994 result = gen_lowpart_SUBREG (GET_MODE (x), to);
10997 return result ? result : x;
10999 else if (GET_CODE (x) == ZERO_EXTEND)
11001 rtx new = replace_n_hard_rtx (XEXP (x, 0), replacements,
11002 n_replacements, modify);
11004 if (GET_CODE (new) == CONST_INT)
11006 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
11007 new, GET_MODE (XEXP (x, 0)));
11017 fmt = GET_RTX_FORMAT (GET_CODE (x));
11018 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
11024 new = replace_n_hard_rtx (XEXP (x, i), replacements,
11025 n_replacements, modify);
11031 else if (fmt[i] == 'E')
11032 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
11034 new = replace_n_hard_rtx (XVECEXP (x, i, j), replacements,
11035 n_replacements, modify);
11039 XVECEXP (x, i, j) = new;
11047 sh_gen_truncate (enum machine_mode mode, rtx x, int need_sign_ext)
11049 enum rtx_code code = TRUNCATE;
11051 if (GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
11053 rtx inner = XEXP (x, 0);
11054 enum machine_mode inner_mode = GET_MODE (inner);
11056 if (inner_mode == mode)
11058 else if (GET_MODE_SIZE (inner_mode) >= GET_MODE_SIZE (mode))
11060 else if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode)
11061 && (! need_sign_ext || GET_CODE (x) == SIGN_EXTEND))
11063 code = GET_CODE (x);
11067 return gen_rtx_fmt_e (code, mode, x);
11070 /* called via for_each_rtx after reload, to clean up truncates of
11071 registers that span multiple actual hard registers. */
11073 shmedia_cleanup_truncate (rtx *p, void *n_changes)
11077 if (GET_CODE (x) != TRUNCATE)
11080 if (GET_MODE_SIZE (GET_MODE (reg)) > 8 && GET_CODE (reg) == REG)
11082 enum machine_mode reg_mode = GET_MODE (reg);
11083 XEXP (x, 0) = simplify_subreg (DImode, reg, reg_mode,
11084 subreg_lowpart_offset (DImode, reg_mode));
11085 *(int*) n_changes += 1;
11091 /* Load and store depend on the highpart of the address. However,
11092 set_attr_alternative does not give well-defined results before reload,
11093 so we must look at the rtl ourselves to see if any of the feeding
11094 registers is used in a memref. */
11096 /* Called by sh_contains_memref_p via for_each_rtx. */
11098 sh_contains_memref_p_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
11100 return (GET_CODE (*loc) == MEM);
11103 /* Return nonzero iff INSN contains a MEM. */
11105 sh_contains_memref_p (rtx insn)
11107 return for_each_rtx (&PATTERN (insn), &sh_contains_memref_p_1, NULL);
11110 /* Return nonzero iff INSN loads a banked register. */
11112 sh_loads_bankedreg_p (rtx insn)
11114 if (GET_CODE (PATTERN (insn)) == SET)
11116 rtx op = SET_DEST (PATTERN(insn));
11117 if (REG_P (op) && BANKED_REGISTER_P (REGNO (op)))
11124 /* FNADDR is the MEM expression from a call expander. Return an address
11125 to use in an SHmedia insn pattern. */
11127 shmedia_prepare_call_address (rtx fnaddr, int is_sibcall)
11131 fnaddr = XEXP (fnaddr, 0);
11132 is_sym = GET_CODE (fnaddr) == SYMBOL_REF;
11133 if (flag_pic && is_sym)
11135 if (! SYMBOL_REF_LOCAL_P (fnaddr))
11137 rtx reg = gen_reg_rtx (Pmode);
11139 /* We must not use GOTPLT for sibcalls, because PIC_REG
11140 must be restored before the PLT code gets to run. */
11142 emit_insn (gen_symGOT2reg (reg, fnaddr));
11144 emit_insn (gen_symGOTPLT2reg (reg, fnaddr));
11149 fnaddr = gen_sym2PIC (fnaddr);
11150 PUT_MODE (fnaddr, Pmode);
11153 /* If ptabs might trap, make this visible to the rest of the compiler.
11154 We generally assume that symbols pertain to valid locations, but
11155 it is possible to generate invalid symbols with asm or linker tricks.
11156 In a list of functions where each returns its successor, an invalid
11157 symbol might denote an empty list. */
11158 if (!TARGET_PT_FIXED
11159 && (!is_sym || TARGET_INVALID_SYMBOLS)
11160 && (!REG_P (fnaddr) || ! TARGET_REGISTER_P (REGNO (fnaddr))))
11162 rtx tr = gen_reg_rtx (PDImode);
11164 emit_insn (gen_ptabs (tr, fnaddr));
11167 else if (! target_reg_operand (fnaddr, Pmode))
11168 fnaddr = copy_to_mode_reg (Pmode, fnaddr);
11173 sh_secondary_reload (bool in_p, rtx x, enum reg_class class,
11174 enum machine_mode mode, secondary_reload_info *sri)
11178 if (REGCLASS_HAS_FP_REG (class)
11179 && ! TARGET_SHMEDIA
11180 && immediate_operand ((x), mode)
11181 && ! ((fp_zero_operand (x) || fp_one_operand (x))
11182 && mode == SFmode && fldi_ok ()))
11186 sri->icode = CODE_FOR_reload_insf__frn;
11189 sri->icode = CODE_FOR_reload_indf__frn;
11192 /* ??? If we knew that we are in the appropriate mode -
11193 single precision - we could use a reload pattern directly. */
11198 if (class == FPUL_REGS
11199 && ((GET_CODE (x) == REG
11200 && (REGNO (x) == MACL_REG || REGNO (x) == MACH_REG
11201 || REGNO (x) == T_REG))
11202 || GET_CODE (x) == PLUS))
11203 return GENERAL_REGS;
11204 if (class == FPUL_REGS && immediate_operand (x, mode))
11206 if (satisfies_constraint_I08 (x) || fp_zero_operand (x))
11207 return GENERAL_REGS;
11208 else if (mode == SFmode)
11210 sri->icode = CODE_FOR_reload_insi__i_fpul;
11213 if (class == FPSCR_REGS
11214 && ((GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
11215 || (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == PLUS)))
11216 return GENERAL_REGS;
11217 if (REGCLASS_HAS_FP_REG (class)
11219 && immediate_operand (x, mode)
11220 && x != CONST0_RTX (GET_MODE (x))
11221 && GET_MODE (x) != V4SFmode)
11222 return GENERAL_REGS;
11223 if ((mode == QImode || mode == HImode)
11224 && TARGET_SHMEDIA && inqhi_operand (x, mode))
11226 sri->icode = ((mode == QImode)
11227 ? CODE_FOR_reload_inqi : CODE_FOR_reload_inhi);
11230 if (TARGET_SHMEDIA && class == GENERAL_REGS
11231 && (GET_CODE (x) == LABEL_REF || PIC_DIRECT_ADDR_P (x)))
11232 return TARGET_REGS;
11233 } /* end of input-only processing. */
11235 if (((REGCLASS_HAS_FP_REG (class)
11236 && (GET_CODE (x) == REG
11237 && (GENERAL_OR_AP_REGISTER_P (REGNO (x))
11238 || (FP_REGISTER_P (REGNO (x)) && mode == SImode
11239 && TARGET_FMOVD))))
11240 || (REGCLASS_HAS_GENERAL_REG (class)
11241 && GET_CODE (x) == REG
11242 && FP_REGISTER_P (REGNO (x))))
11243 && ! TARGET_SHMEDIA
11244 && (mode == SFmode || mode == SImode))
11246 if ((class == FPUL_REGS
11247 || (REGCLASS_HAS_FP_REG (class)
11248 && ! TARGET_SHMEDIA && mode == SImode))
11249 && (GET_CODE (x) == MEM
11250 || (GET_CODE (x) == REG
11251 && (REGNO (x) >= FIRST_PSEUDO_REGISTER
11252 || REGNO (x) == T_REG
11253 || system_reg_operand (x, VOIDmode)))))
11255 if (class == FPUL_REGS)
11256 return GENERAL_REGS;
11259 if ((class == TARGET_REGS
11260 || (TARGET_SHMEDIA && class == SIBCALL_REGS))
11261 && !satisfies_constraint_Csy (x)
11262 && (GET_CODE (x) != REG || ! GENERAL_REGISTER_P (REGNO (x))))
11263 return GENERAL_REGS;
11264 if ((class == MAC_REGS || class == PR_REGS)
11265 && GET_CODE (x) == REG && ! GENERAL_REGISTER_P (REGNO (x))
11266 && class != REGNO_REG_CLASS (REGNO (x)))
11267 return GENERAL_REGS;
11268 if (class != GENERAL_REGS && GET_CODE (x) == REG
11269 && TARGET_REGISTER_P (REGNO (x)))
11270 return GENERAL_REGS;
11274 enum sh_divide_strategy_e sh_div_strategy = SH_DIV_STRATEGY_DEFAULT;