1 /* Output routines for GCC for Renesas / SuperH SH.
2 Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
4 Contributed by Steve Chamberlain (sac@cygnus.com).
5 Improved by Jim Wilson (wilson@cygnus.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
27 #include "insn-config.h"
35 #include "hard-reg-set.h"
37 #include "insn-attr.h"
41 #include "integrate.h"
45 #include "target-def.h"
47 #include "langhooks.h"
48 #include "basic-block.h"
50 #include "cfglayout.h"
52 #include "sched-int.h"
54 #include "tree-gimple.h"
56 #include "alloc-pool.h"
57 #include "tm-constrs.h"
60 int code_for_indirect_jump_scratch = CODE_FOR_indirect_jump_scratch;
62 #define MSW (TARGET_LITTLE_ENDIAN ? 1 : 0)
63 #define LSW (TARGET_LITTLE_ENDIAN ? 0 : 1)
65 /* These are some macros to abstract register modes. */
66 #define CONST_OK_FOR_ADD(size) \
67 (TARGET_SHMEDIA ? CONST_OK_FOR_I10 (size) : CONST_OK_FOR_I08 (size))
68 #define GEN_MOV (*(TARGET_SHMEDIA64 ? gen_movdi : gen_movsi))
69 #define GEN_ADD3 (*(TARGET_SHMEDIA64 ? gen_adddi3 : gen_addsi3))
70 #define GEN_SUB3 (*(TARGET_SHMEDIA64 ? gen_subdi3 : gen_subsi3))
72 /* Used to simplify the logic below. Find the attributes wherever
74 #define SH_ATTRIBUTES(decl) \
75 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
76 : DECL_ATTRIBUTES (decl) \
77 ? (DECL_ATTRIBUTES (decl)) \
78 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
80 /* Set to 1 by expand_prologue() when the function is an interrupt handler. */
81 int current_function_interrupt;
83 tree sh_deferred_function_attributes;
84 tree *sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
86 /* Global variables for machine-dependent things. */
88 /* Which cpu are we scheduling for. */
89 enum processor_type sh_cpu;
91 /* Definitions used in ready queue reordering for first scheduling pass. */
93 /* Reg weights arrays for modes SFmode and SImode, indexed by insn LUID. */
94 static short *regmode_weight[2];
96 /* Total SFmode and SImode weights of scheduled insns. */
97 static int curr_regmode_pressure[2];
99 /* Number of r0 life regions. */
100 static int r0_life_regions;
102 /* If true, skip cycles for Q -> R movement. */
103 static int skip_cycles = 0;
105 /* Cached value of can_issue_more. This is cached in sh_variable_issue hook
106 and returned from sh_reorder2. */
107 static short cached_can_issue_more;
109 /* Saved operands from the last compare to use when we generate an scc
115 /* Provides the class number of the smallest class containing
118 enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
120 R0_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
121 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
122 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
123 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
124 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
125 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
126 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
127 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
128 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
129 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
130 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
131 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
132 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
133 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
134 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
135 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
136 FP0_REGS,FP_REGS, FP_REGS, FP_REGS,
137 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
138 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
139 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
140 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
141 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
142 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
143 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
144 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
145 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
146 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
147 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
148 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
149 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
150 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
151 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
152 TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
153 TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
154 DF_REGS, DF_REGS, DF_REGS, DF_REGS,
155 DF_REGS, DF_REGS, DF_REGS, DF_REGS,
156 NO_REGS, GENERAL_REGS, PR_REGS, T_REGS,
157 MAC_REGS, MAC_REGS, FPUL_REGS, FPSCR_REGS,
158 GENERAL_REGS, GENERAL_REGS,
161 char sh_register_names[FIRST_PSEUDO_REGISTER] \
162 [MAX_REGISTER_NAME_LENGTH + 1] = SH_REGISTER_NAMES_INITIALIZER;
164 char sh_additional_register_names[ADDREGNAMES_SIZE] \
165 [MAX_ADDITIONAL_REGISTER_NAME_LENGTH + 1]
166 = SH_ADDITIONAL_REGISTER_NAMES_INITIALIZER;
168 int assembler_dialect;
170 static bool shmedia_space_reserved_for_target_registers;
172 static bool sh_handle_option (size_t, const char *, int);
173 static void split_branches (rtx);
174 static int branch_dest (rtx);
175 static void force_into (rtx, rtx);
176 static void print_slot (rtx);
177 static rtx add_constant (rtx, enum machine_mode, rtx);
178 static void dump_table (rtx, rtx);
179 static int hi_const (rtx);
180 static int broken_move (rtx);
181 static int mova_p (rtx);
182 static rtx find_barrier (int, rtx, rtx);
183 static int noncall_uses_reg (rtx, rtx, rtx *);
184 static rtx gen_block_redirect (rtx, int, int);
185 static void sh_reorg (void);
186 static void output_stack_adjust (int, rtx, int, HARD_REG_SET *);
187 static rtx frame_insn (rtx);
188 static rtx push (int);
189 static void pop (int);
190 static void push_regs (HARD_REG_SET *, int);
191 static int calc_live_regs (HARD_REG_SET *);
192 static HOST_WIDE_INT rounded_frame_size (int);
193 static rtx mark_constant_pool_use (rtx);
194 const struct attribute_spec sh_attribute_table[];
195 static tree sh_handle_interrupt_handler_attribute (tree *, tree, tree, int, bool *);
196 static tree sh_handle_resbank_handler_attribute (tree *, tree,
198 static tree sh2a_handle_function_vector_handler_attribute (tree *, tree,
200 static tree sh_handle_sp_switch_attribute (tree *, tree, tree, int, bool *);
201 static tree sh_handle_trap_exit_attribute (tree *, tree, tree, int, bool *);
202 static tree sh_handle_renesas_attribute (tree *, tree, tree, int, bool *);
203 static void sh_output_function_epilogue (FILE *, HOST_WIDE_INT);
204 static void sh_insert_attributes (tree, tree *);
205 static const char *sh_check_pch_target_flags (int);
206 static int sh_adjust_cost (rtx, rtx, rtx, int);
207 static int sh_issue_rate (void);
208 static int sh_dfa_new_cycle (FILE *, int, rtx, int, int, int *sort_p);
209 static short find_set_regmode_weight (rtx, enum machine_mode);
210 static short find_insn_regmode_weight (rtx, enum machine_mode);
211 static void find_regmode_weight (basic_block, enum machine_mode);
212 static int find_r0_life_regions (basic_block);
213 static void sh_md_init_global (FILE *, int, int);
214 static void sh_md_finish_global (FILE *, int);
215 static int rank_for_reorder (const void *, const void *);
216 static void swap_reorder (rtx *, int);
217 static void ready_reorder (rtx *, int);
218 static short high_pressure (enum machine_mode);
219 static int sh_reorder (FILE *, int, rtx *, int *, int);
220 static int sh_reorder2 (FILE *, int, rtx *, int *, int);
221 static void sh_md_init (FILE *, int, int);
222 static int sh_variable_issue (FILE *, int, rtx, int);
224 static bool sh_function_ok_for_sibcall (tree, tree);
226 static bool sh_cannot_modify_jumps_p (void);
227 static int sh_target_reg_class (void);
228 static bool sh_optimize_target_register_callee_saved (bool);
229 static bool sh_ms_bitfield_layout_p (const_tree);
231 static void sh_init_builtins (void);
232 static void sh_media_init_builtins (void);
233 static rtx sh_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
234 static void sh_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
235 static void sh_file_start (void);
236 static int flow_dependent_p (rtx, rtx);
237 static void flow_dependent_p_1 (rtx, const_rtx, void *);
238 static int shiftcosts (rtx);
239 static int andcosts (rtx);
240 static int addsubcosts (rtx);
241 static int multcosts (rtx);
242 static bool unspec_caller_rtx_p (rtx);
243 static bool sh_cannot_copy_insn_p (rtx);
244 static bool sh_rtx_costs (rtx, int, int, int *);
245 static int sh_address_cost (rtx);
246 static int sh_pr_n_sets (void);
247 static rtx sh_allocate_initial_value (rtx);
248 static int shmedia_target_regs_stack_space (HARD_REG_SET *);
249 static int shmedia_reserve_space_for_target_registers_p (int, HARD_REG_SET *);
250 static int shmedia_target_regs_stack_adjust (HARD_REG_SET *);
251 static int scavenge_reg (HARD_REG_SET *s);
252 struct save_schedule_s;
253 static struct save_entry_s *sh5_schedule_saves (HARD_REG_SET *,
254 struct save_schedule_s *, int);
256 static rtx sh_struct_value_rtx (tree, int);
257 static bool sh_return_in_memory (const_tree, const_tree);
258 static rtx sh_builtin_saveregs (void);
259 static void sh_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
260 static bool sh_strict_argument_naming (CUMULATIVE_ARGS *);
261 static bool sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *);
262 static tree sh_build_builtin_va_list (void);
263 static void sh_va_start (tree, rtx);
264 static tree sh_gimplify_va_arg_expr (tree, tree, tree *, tree *);
265 static bool sh_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
267 static bool sh_callee_copies (CUMULATIVE_ARGS *, enum machine_mode,
269 static int sh_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
271 static bool sh_scalar_mode_supported_p (enum machine_mode);
272 static int sh_dwarf_calling_convention (const_tree);
273 static void sh_encode_section_info (tree, rtx, int);
274 static int sh2a_function_vector_p (tree);
277 /* Initialize the GCC target structure. */
278 #undef TARGET_ATTRIBUTE_TABLE
279 #define TARGET_ATTRIBUTE_TABLE sh_attribute_table
281 /* The next two are used for debug info when compiling with -gdwarf. */
282 #undef TARGET_ASM_UNALIGNED_HI_OP
283 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uaword\t"
284 #undef TARGET_ASM_UNALIGNED_SI_OP
285 #define TARGET_ASM_UNALIGNED_SI_OP "\t.ualong\t"
287 /* These are NULLed out on non-SH5 in OVERRIDE_OPTIONS. */
288 #undef TARGET_ASM_UNALIGNED_DI_OP
289 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaquad\t"
290 #undef TARGET_ASM_ALIGNED_DI_OP
291 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
293 #undef TARGET_ASM_FUNCTION_EPILOGUE
294 #define TARGET_ASM_FUNCTION_EPILOGUE sh_output_function_epilogue
296 #undef TARGET_ASM_OUTPUT_MI_THUNK
297 #define TARGET_ASM_OUTPUT_MI_THUNK sh_output_mi_thunk
299 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
300 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
302 #undef TARGET_ASM_FILE_START
303 #define TARGET_ASM_FILE_START sh_file_start
304 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
305 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
307 #undef TARGET_DEFAULT_TARGET_FLAGS
308 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
309 #undef TARGET_HANDLE_OPTION
310 #define TARGET_HANDLE_OPTION sh_handle_option
312 #undef TARGET_INSERT_ATTRIBUTES
313 #define TARGET_INSERT_ATTRIBUTES sh_insert_attributes
315 #undef TARGET_SCHED_ADJUST_COST
316 #define TARGET_SCHED_ADJUST_COST sh_adjust_cost
318 #undef TARGET_SCHED_ISSUE_RATE
319 #define TARGET_SCHED_ISSUE_RATE sh_issue_rate
321 /* The next 5 hooks have been implemented for reenabling sched1. With the
322 help of these macros we are limiting the movement of insns in sched1 to
323 reduce the register pressure. The overall idea is to keep count of SImode
324 and SFmode regs required by already scheduled insns. When these counts
325 cross some threshold values; give priority to insns that free registers.
326 The insn that frees registers is most likely to be the insn with lowest
327 LUID (original insn order); but such an insn might be there in the stalled
328 queue (Q) instead of the ready queue (R). To solve this, we skip cycles
329 upto a max of 8 cycles so that such insns may move from Q -> R.
331 The description of the hooks are as below:
333 TARGET_SCHED_INIT_GLOBAL: Added a new target hook in the generic
334 scheduler; it is called inside the sched_init function just after
335 find_insn_reg_weights function call. It is used to calculate the SImode
336 and SFmode weights of insns of basic blocks; much similar to what
337 find_insn_reg_weights does.
338 TARGET_SCHED_FINISH_GLOBAL: Corresponding cleanup hook.
340 TARGET_SCHED_DFA_NEW_CYCLE: Skip cycles if high register pressure is
341 indicated by TARGET_SCHED_REORDER2; doing this may move insns from
344 TARGET_SCHED_REORDER: If the register pressure for SImode or SFmode is
345 high; reorder the ready queue so that the insn with lowest LUID will be
348 TARGET_SCHED_REORDER2: If the register pressure is high, indicate to
349 TARGET_SCHED_DFA_NEW_CYCLE to skip cycles.
351 TARGET_SCHED_VARIABLE_ISSUE: Cache the value of can_issue_more so that it
352 can be returned from TARGET_SCHED_REORDER2.
354 TARGET_SCHED_INIT: Reset the register pressure counting variables. */
356 #undef TARGET_SCHED_DFA_NEW_CYCLE
357 #define TARGET_SCHED_DFA_NEW_CYCLE sh_dfa_new_cycle
359 #undef TARGET_SCHED_INIT_GLOBAL
360 #define TARGET_SCHED_INIT_GLOBAL sh_md_init_global
362 #undef TARGET_SCHED_FINISH_GLOBAL
363 #define TARGET_SCHED_FINISH_GLOBAL sh_md_finish_global
365 #undef TARGET_SCHED_VARIABLE_ISSUE
366 #define TARGET_SCHED_VARIABLE_ISSUE sh_variable_issue
368 #undef TARGET_SCHED_REORDER
369 #define TARGET_SCHED_REORDER sh_reorder
371 #undef TARGET_SCHED_REORDER2
372 #define TARGET_SCHED_REORDER2 sh_reorder2
374 #undef TARGET_SCHED_INIT
375 #define TARGET_SCHED_INIT sh_md_init
377 #undef TARGET_CANNOT_MODIFY_JUMPS_P
378 #define TARGET_CANNOT_MODIFY_JUMPS_P sh_cannot_modify_jumps_p
379 #undef TARGET_BRANCH_TARGET_REGISTER_CLASS
380 #define TARGET_BRANCH_TARGET_REGISTER_CLASS sh_target_reg_class
381 #undef TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED
382 #define TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED \
383 sh_optimize_target_register_callee_saved
385 #undef TARGET_MS_BITFIELD_LAYOUT_P
386 #define TARGET_MS_BITFIELD_LAYOUT_P sh_ms_bitfield_layout_p
388 #undef TARGET_INIT_BUILTINS
389 #define TARGET_INIT_BUILTINS sh_init_builtins
390 #undef TARGET_EXPAND_BUILTIN
391 #define TARGET_EXPAND_BUILTIN sh_expand_builtin
393 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
394 #define TARGET_FUNCTION_OK_FOR_SIBCALL sh_function_ok_for_sibcall
396 #undef TARGET_CANNOT_COPY_INSN_P
397 #define TARGET_CANNOT_COPY_INSN_P sh_cannot_copy_insn_p
398 #undef TARGET_RTX_COSTS
399 #define TARGET_RTX_COSTS sh_rtx_costs
400 #undef TARGET_ADDRESS_COST
401 #define TARGET_ADDRESS_COST sh_address_cost
402 #undef TARGET_ALLOCATE_INITIAL_VALUE
403 #define TARGET_ALLOCATE_INITIAL_VALUE sh_allocate_initial_value
405 #undef TARGET_MACHINE_DEPENDENT_REORG
406 #define TARGET_MACHINE_DEPENDENT_REORG sh_reorg
409 #undef TARGET_HAVE_TLS
410 #define TARGET_HAVE_TLS true
413 #undef TARGET_PROMOTE_PROTOTYPES
414 #define TARGET_PROMOTE_PROTOTYPES sh_promote_prototypes
415 #undef TARGET_PROMOTE_FUNCTION_ARGS
416 #define TARGET_PROMOTE_FUNCTION_ARGS sh_promote_prototypes
417 #undef TARGET_PROMOTE_FUNCTION_RETURN
418 #define TARGET_PROMOTE_FUNCTION_RETURN sh_promote_prototypes
420 #undef TARGET_STRUCT_VALUE_RTX
421 #define TARGET_STRUCT_VALUE_RTX sh_struct_value_rtx
422 #undef TARGET_RETURN_IN_MEMORY
423 #define TARGET_RETURN_IN_MEMORY sh_return_in_memory
425 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
426 #define TARGET_EXPAND_BUILTIN_SAVEREGS sh_builtin_saveregs
427 #undef TARGET_SETUP_INCOMING_VARARGS
428 #define TARGET_SETUP_INCOMING_VARARGS sh_setup_incoming_varargs
429 #undef TARGET_STRICT_ARGUMENT_NAMING
430 #define TARGET_STRICT_ARGUMENT_NAMING sh_strict_argument_naming
431 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
432 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED sh_pretend_outgoing_varargs_named
433 #undef TARGET_MUST_PASS_IN_STACK
434 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
435 #undef TARGET_PASS_BY_REFERENCE
436 #define TARGET_PASS_BY_REFERENCE sh_pass_by_reference
437 #undef TARGET_CALLEE_COPIES
438 #define TARGET_CALLEE_COPIES sh_callee_copies
439 #undef TARGET_ARG_PARTIAL_BYTES
440 #define TARGET_ARG_PARTIAL_BYTES sh_arg_partial_bytes
442 #undef TARGET_BUILD_BUILTIN_VA_LIST
443 #define TARGET_BUILD_BUILTIN_VA_LIST sh_build_builtin_va_list
444 #undef TARGET_EXPAND_BUILTIN_VA_START
445 #define TARGET_EXPAND_BUILTIN_VA_START sh_va_start
446 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
447 #define TARGET_GIMPLIFY_VA_ARG_EXPR sh_gimplify_va_arg_expr
449 #undef TARGET_SCALAR_MODE_SUPPORTED_P
450 #define TARGET_SCALAR_MODE_SUPPORTED_P sh_scalar_mode_supported_p
451 #undef TARGET_VECTOR_MODE_SUPPORTED_P
452 #define TARGET_VECTOR_MODE_SUPPORTED_P sh_vector_mode_supported_p
454 #undef TARGET_CHECK_PCH_TARGET_FLAGS
455 #define TARGET_CHECK_PCH_TARGET_FLAGS sh_check_pch_target_flags
457 #undef TARGET_DWARF_CALLING_CONVENTION
458 #define TARGET_DWARF_CALLING_CONVENTION sh_dwarf_calling_convention
460 /* Return regmode weight for insn. */
461 #define INSN_REGMODE_WEIGHT(INSN, MODE) regmode_weight[((MODE) == SImode) ? 0 : 1][INSN_UID (INSN)]
463 /* Return current register pressure for regmode. */
464 #define CURR_REGMODE_PRESSURE(MODE) curr_regmode_pressure[((MODE) == SImode) ? 0 : 1]
466 #undef TARGET_ENCODE_SECTION_INFO
467 #define TARGET_ENCODE_SECTION_INFO sh_encode_section_info
471 #undef TARGET_ENCODE_SECTION_INFO
472 #define TARGET_ENCODE_SECTION_INFO sh_symbian_encode_section_info
473 #undef TARGET_STRIP_NAME_ENCODING
474 #define TARGET_STRIP_NAME_ENCODING sh_symbian_strip_name_encoding
475 #undef TARGET_CXX_IMPORT_EXPORT_CLASS
476 #define TARGET_CXX_IMPORT_EXPORT_CLASS symbian_import_export_class
480 #undef TARGET_SECONDARY_RELOAD
481 #define TARGET_SECONDARY_RELOAD sh_secondary_reload
483 /* Machine-specific symbol_ref flags. */
484 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
486 struct gcc_target targetm = TARGET_INITIALIZER;
488 /* Implement TARGET_HANDLE_OPTION. */
491 sh_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED,
492 int value ATTRIBUTE_UNUSED)
497 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH1;
501 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2;
505 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A;
509 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_NOFPU;
513 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_SINGLE;
516 case OPT_m2a_single_only:
517 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_SINGLE_ONLY;
521 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2E;
525 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH3;
529 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH3E;
536 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4;
540 case OPT_m4_100_nofpu:
541 case OPT_m4_200_nofpu:
542 case OPT_m4_300_nofpu:
546 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_NOFPU;
550 case OPT_m4_100_single:
551 case OPT_m4_200_single:
552 case OPT_m4_300_single:
553 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_SINGLE;
556 case OPT_m4_single_only:
557 case OPT_m4_100_single_only:
558 case OPT_m4_200_single_only:
559 case OPT_m4_300_single_only:
560 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_SINGLE_ONLY;
564 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A;
569 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_NOFPU;
573 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_SINGLE;
576 case OPT_m4a_single_only:
577 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_SINGLE_ONLY;
581 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_32MEDIA;
584 case OPT_m5_32media_nofpu:
585 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_32MEDIA_NOFPU;
589 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_64MEDIA;
592 case OPT_m5_64media_nofpu:
593 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_64MEDIA_NOFPU;
597 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_COMPACT;
600 case OPT_m5_compact_nofpu:
601 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_COMPACT_NOFPU;
609 /* Print the operand address in x to the stream. */
612 print_operand_address (FILE *stream, rtx x)
614 switch (GET_CODE (x))
618 fprintf (stream, "@%s", reg_names[true_regnum (x)]);
623 rtx base = XEXP (x, 0);
624 rtx index = XEXP (x, 1);
626 switch (GET_CODE (index))
629 fprintf (stream, "@(%d,%s)", (int) INTVAL (index),
630 reg_names[true_regnum (base)]);
636 int base_num = true_regnum (base);
637 int index_num = true_regnum (index);
639 fprintf (stream, "@(r0,%s)",
640 reg_names[MAX (base_num, index_num)]);
651 fprintf (stream, "@-%s", reg_names[true_regnum (XEXP (x, 0))]);
655 fprintf (stream, "@%s+", reg_names[true_regnum (XEXP (x, 0))]);
659 x = mark_constant_pool_use (x);
660 output_addr_const (stream, x);
665 /* Print operand x (an rtx) in assembler syntax to file stream
666 according to modifier code.
668 '.' print a .s if insn needs delay slot
669 ',' print LOCAL_LABEL_PREFIX
670 '@' print trap, rte or rts depending upon pragma interruptness
671 '#' output a nop if there is nothing to put in the delay slot
672 ''' print likelihood suffix (/u for unlikely).
673 '>' print branch target if -fverbose-asm
674 'O' print a constant without the #
675 'R' print the LSW of a dp value - changes if in little endian
676 'S' print the MSW of a dp value - changes if in little endian
677 'T' print the next word of a dp value - same as 'R' in big endian mode.
678 'M' SHMEDIA: print an `x' if `m' will print `base,index'.
679 otherwise: print .b / .w / .l / .s / .d suffix if operand is a MEM.
680 'N' print 'r63' if the operand is (const_int 0).
681 'd' print a V2SF reg as dN instead of fpN.
682 'm' print a pair `base,offset' or `base,index', for LD and ST.
683 'U' Likewise for {LD,ST}{HI,LO}.
684 'V' print the position of a single bit set.
685 'W' print the position of a single bit cleared.
686 't' print a memory address which is a register.
687 'u' prints the lowest 16 bits of CONST_INT, as an unsigned value.
688 'o' output an operator. */
691 print_operand (FILE *stream, rtx x, int code)
694 enum machine_mode mode;
702 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
703 && get_attr_length (XVECEXP (final_sequence, 0, 1)))
704 fprintf (stream, ASSEMBLER_DIALECT ? "/s" : ".s");
707 fprintf (stream, "%s", LOCAL_LABEL_PREFIX);
710 trapa_attr = lookup_attribute ("trap_exit",
711 DECL_ATTRIBUTES (current_function_decl));
713 fprintf (stream, "trapa #%ld",
714 (long) TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (trapa_attr))));
715 else if (sh_cfun_interrupt_handler_p ())
717 if (sh_cfun_resbank_handler_p ())
718 fprintf (stream, "resbank\n");
719 fprintf (stream, "rte");
722 fprintf (stream, "rts");
725 /* Output a nop if there's nothing in the delay slot. */
726 if (dbr_sequence_length () == 0)
727 fprintf (stream, "\n\tnop");
731 rtx note = find_reg_note (current_output_insn, REG_BR_PROB, 0);
733 if (note && INTVAL (XEXP (note, 0)) * 2 < REG_BR_PROB_BASE)
734 fputs ("/u", stream);
738 if (flag_verbose_asm && JUMP_LABEL (current_output_insn))
740 fputs ("\t! target: ", stream);
741 output_addr_const (stream, JUMP_LABEL (current_output_insn));
745 x = mark_constant_pool_use (x);
746 output_addr_const (stream, x);
748 /* N.B.: %R / %S / %T adjust memory addresses by four.
749 For SHMEDIA, that means they can be used to access the first and
750 second 32 bit part of a 64 bit (or larger) value that
751 might be held in floating point registers or memory.
752 While they can be used to access 64 bit parts of a larger value
753 held in general purpose registers, that won't work with memory -
754 neither for fp registers, since the frxx names are used. */
756 if (REG_P (x) || GET_CODE (x) == SUBREG)
758 regno = true_regnum (x);
759 regno += FP_REGISTER_P (regno) ? 1 : LSW;
760 fputs (reg_names[regno], (stream));
764 x = adjust_address (x, SImode, 4 * LSW);
765 print_operand_address (stream, XEXP (x, 0));
772 if (mode == VOIDmode)
774 if (GET_MODE_SIZE (mode) >= 8)
775 sub = simplify_subreg (SImode, x, mode, 4 * LSW);
777 print_operand (stream, sub, 0);
779 output_operand_lossage ("invalid operand to %%R");
783 if (REG_P (x) || GET_CODE (x) == SUBREG)
785 regno = true_regnum (x);
786 regno += FP_REGISTER_P (regno) ? 0 : MSW;
787 fputs (reg_names[regno], (stream));
791 x = adjust_address (x, SImode, 4 * MSW);
792 print_operand_address (stream, XEXP (x, 0));
799 if (mode == VOIDmode)
801 if (GET_MODE_SIZE (mode) >= 8)
802 sub = simplify_subreg (SImode, x, mode, 4 * MSW);
804 print_operand (stream, sub, 0);
806 output_operand_lossage ("invalid operand to %%S");
810 /* Next word of a double. */
811 switch (GET_CODE (x))
814 fputs (reg_names[REGNO (x) + 1], (stream));
817 if (GET_CODE (XEXP (x, 0)) != PRE_DEC
818 && GET_CODE (XEXP (x, 0)) != POST_INC)
819 x = adjust_address (x, SImode, 4);
820 print_operand_address (stream, XEXP (x, 0));
828 gcc_assert (GET_CODE (x) == MEM);
830 switch (GET_CODE (x))
834 print_operand (stream, x, 0);
842 switch (GET_CODE (x))
844 case PLUS: fputs ("add", stream); break;
845 case MINUS: fputs ("sub", stream); break;
846 case MULT: fputs ("mul", stream); break;
847 case DIV: fputs ("div", stream); break;
848 case EQ: fputs ("eq", stream); break;
849 case NE: fputs ("ne", stream); break;
850 case GT: case LT: fputs ("gt", stream); break;
851 case GE: case LE: fputs ("ge", stream); break;
852 case GTU: case LTU: fputs ("gtu", stream); break;
853 case GEU: case LEU: fputs ("geu", stream); break;
861 if (GET_CODE (x) == MEM
862 && GET_CODE (XEXP (x, 0)) == PLUS
863 && (GET_CODE (XEXP (XEXP (x, 0), 1)) == REG
864 || GET_CODE (XEXP (XEXP (x, 0), 1)) == SUBREG))
869 if (GET_CODE (x) == MEM)
871 switch (GET_MODE (x))
873 case QImode: fputs (".b", stream); break;
874 case HImode: fputs (".w", stream); break;
875 case SImode: fputs (".l", stream); break;
876 case SFmode: fputs (".s", stream); break;
877 case DFmode: fputs (".d", stream); break;
878 default: gcc_unreachable ();
885 gcc_assert (GET_CODE (x) == MEM);
889 switch (GET_CODE (x))
893 print_operand (stream, x, 0);
894 fputs (", 0", stream);
898 print_operand (stream, XEXP (x, 0), 0);
899 fputs (", ", stream);
900 print_operand (stream, XEXP (x, 1), 0);
910 int num = exact_log2 (INTVAL (x));
911 gcc_assert (num >= 0);
912 fprintf (stream, "#%d", num);
918 int num = exact_log2 (~INTVAL (x));
919 gcc_assert (num >= 0);
920 fprintf (stream, "#%d", num);
925 gcc_assert (GET_CODE (x) == REG && GET_MODE (x) == V2SFmode);
927 fprintf ((stream), "d%s", reg_names[REGNO (x)] + 1);
931 if (x == CONST0_RTX (GET_MODE (x)))
933 fprintf ((stream), "r63");
938 if (GET_CODE (x) == CONST_INT)
940 fprintf ((stream), "%u", (unsigned) INTVAL (x) & (0x10000 - 1));
950 switch (GET_CODE (x))
954 rtx inner = XEXP (x, 0);
956 enum machine_mode inner_mode;
958 /* We might see SUBREGs with vector mode registers inside. */
959 if (GET_CODE (inner) == SUBREG
960 && (GET_MODE_SIZE (GET_MODE (inner))
961 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
962 && subreg_lowpart_p (inner))
963 inner = SUBREG_REG (inner);
964 if (GET_CODE (inner) == CONST_INT)
966 x = GEN_INT (trunc_int_for_mode (INTVAL (inner), GET_MODE (x)));
969 inner_mode = GET_MODE (inner);
970 if (GET_CODE (inner) == SUBREG
971 && (GET_MODE_SIZE (GET_MODE (inner))
972 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
973 && GET_CODE (SUBREG_REG (inner)) == REG)
975 offset = subreg_regno_offset (REGNO (SUBREG_REG (inner)),
976 GET_MODE (SUBREG_REG (inner)),
979 inner = SUBREG_REG (inner);
981 if (GET_CODE (inner) != REG || GET_MODE_SIZE (inner_mode) > 8)
983 /* Floating point register pairs are always big endian;
984 general purpose registers are 64 bit wide. */
985 regno = REGNO (inner);
986 regno = (HARD_REGNO_NREGS (regno, inner_mode)
987 - HARD_REGNO_NREGS (regno, mode))
995 /* FIXME: We need this on SHmedia32 because reload generates
996 some sign-extended HI or QI loads into DImode registers
997 but, because Pmode is SImode, the address ends up with a
998 subreg:SI of the DImode register. Maybe reload should be
999 fixed so as to apply alter_subreg to such loads? */
1001 gcc_assert (trapping_target_operand (x, VOIDmode));
1002 x = XEXP (XEXP (x, 2), 0);
1003 goto default_output;
1005 gcc_assert (SUBREG_BYTE (x) == 0
1006 && GET_CODE (SUBREG_REG (x)) == REG);
1014 if (FP_REGISTER_P (regno)
1015 && mode == V16SFmode)
1016 fprintf ((stream), "mtrx%s", reg_names[regno] + 2);
1017 else if (FP_REGISTER_P (REGNO (x))
1018 && mode == V4SFmode)
1019 fprintf ((stream), "fv%s", reg_names[regno] + 2);
1020 else if (GET_CODE (x) == REG
1021 && mode == V2SFmode)
1022 fprintf ((stream), "fp%s", reg_names[regno] + 2);
1023 else if (FP_REGISTER_P (REGNO (x))
1024 && GET_MODE_SIZE (mode) > 4)
1025 fprintf ((stream), "d%s", reg_names[regno] + 1);
1027 fputs (reg_names[regno], (stream));
1031 output_address (XEXP (x, 0));
1036 && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1037 || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
1038 && (GET_MODE (XEXP (x, 0)) == DImode
1039 || GET_MODE (XEXP (x, 0)) == SImode)
1040 && GET_CODE (XEXP (XEXP (x, 0), 0)) == TRUNCATE
1041 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode)
1043 rtx val = XEXP (XEXP (XEXP (x, 0), 0), 0);
1045 bool nested_expr = false;
1047 fputc ('(', stream);
1048 if (GET_CODE (val) == ASHIFTRT)
1050 fputc ('(', stream);
1051 val2 = XEXP (val, 0);
1053 if (GET_CODE (val2) == CONST
1054 || GET_RTX_CLASS (GET_CODE (val2)) != RTX_OBJ)
1056 fputc ('(', stream);
1059 output_addr_const (stream, val2);
1061 fputc (')', stream);
1062 if (GET_CODE (val) == ASHIFTRT)
1064 fputs (" >> ", stream);
1065 output_addr_const (stream, XEXP (val, 1));
1066 fputc (')', stream);
1068 fputs (" & 65535)", stream);
1075 fputc ('#', stream);
1076 output_addr_const (stream, x);
1084 /* Encode symbol attributes of a SYMBOL_REF into its
1085 SYMBOL_REF_FLAGS. */
1087 sh_encode_section_info (tree decl, rtx rtl, int first)
1089 default_encode_section_info (decl, rtl, first);
1091 if (TREE_CODE (decl) == FUNCTION_DECL
1092 && sh2a_function_vector_p (decl) && TARGET_SH2A)
1093 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FUNCVEC_FUNCTION;
1096 /* Like force_operand, but guarantees that VALUE ends up in TARGET. */
1098 force_into (rtx value, rtx target)
1100 value = force_operand (value, target);
1101 if (! rtx_equal_p (value, target))
1102 emit_insn (gen_move_insn (target, value));
1105 /* Emit code to perform a block move. Choose the best method.
1107 OPERANDS[0] is the destination.
1108 OPERANDS[1] is the source.
1109 OPERANDS[2] is the size.
1110 OPERANDS[3] is the alignment safe to use. */
1113 expand_block_move (rtx *operands)
1115 int align = INTVAL (operands[3]);
1116 int constp = (GET_CODE (operands[2]) == CONST_INT);
1117 int bytes = (constp ? INTVAL (operands[2]) : 0);
1122 /* If we could use mov.l to move words and dest is word-aligned, we
1123 can use movua.l for loads and still generate a relatively short
1124 and efficient sequence. */
1125 if (TARGET_SH4A_ARCH && align < 4
1126 && MEM_ALIGN (operands[0]) >= 32
1127 && can_move_by_pieces (bytes, 32))
1129 rtx dest = copy_rtx (operands[0]);
1130 rtx src = copy_rtx (operands[1]);
1131 /* We could use different pseudos for each copied word, but
1132 since movua can only load into r0, it's kind of
1134 rtx temp = gen_reg_rtx (SImode);
1135 rtx src_addr = copy_addr_to_reg (XEXP (src, 0));
1138 while (copied + 4 <= bytes)
1140 rtx to = adjust_address (dest, SImode, copied);
1141 rtx from = adjust_automodify_address (src, BLKmode,
1144 set_mem_size (from, GEN_INT (4));
1145 emit_insn (gen_movua (temp, from));
1146 emit_move_insn (src_addr, plus_constant (src_addr, 4));
1147 emit_move_insn (to, temp);
1152 move_by_pieces (adjust_address (dest, BLKmode, copied),
1153 adjust_automodify_address (src, BLKmode,
1155 bytes - copied, align, 0);
1160 /* If it isn't a constant number of bytes, or if it doesn't have 4 byte
1161 alignment, or if it isn't a multiple of 4 bytes, then fail. */
1162 if (align < 4 || (bytes % 4 != 0))
1165 if (TARGET_HARD_SH4)
1169 else if (bytes == 12)
1171 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1172 rtx r4 = gen_rtx_REG (SImode, 4);
1173 rtx r5 = gen_rtx_REG (SImode, 5);
1175 function_symbol (func_addr_rtx, "__movmemSI12_i4", SFUNC_STATIC);
1176 force_into (XEXP (operands[0], 0), r4);
1177 force_into (XEXP (operands[1], 0), r5);
1178 emit_insn (gen_block_move_real_i4 (func_addr_rtx));
1181 else if (! TARGET_SMALLCODE)
1183 const char *entry_name;
1184 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1186 rtx r4 = gen_rtx_REG (SImode, 4);
1187 rtx r5 = gen_rtx_REG (SImode, 5);
1188 rtx r6 = gen_rtx_REG (SImode, 6);
1190 entry_name = (bytes & 4 ? "__movmem_i4_odd" : "__movmem_i4_even");
1191 function_symbol (func_addr_rtx, entry_name, SFUNC_STATIC);
1192 force_into (XEXP (operands[0], 0), r4);
1193 force_into (XEXP (operands[1], 0), r5);
1195 dwords = bytes >> 3;
1196 emit_insn (gen_move_insn (r6, GEN_INT (dwords - 1)));
1197 emit_insn (gen_block_lump_real_i4 (func_addr_rtx));
1206 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1207 rtx r4 = gen_rtx_REG (SImode, 4);
1208 rtx r5 = gen_rtx_REG (SImode, 5);
1210 sprintf (entry, "__movmemSI%d", bytes);
1211 function_symbol (func_addr_rtx, entry, SFUNC_STATIC);
1212 force_into (XEXP (operands[0], 0), r4);
1213 force_into (XEXP (operands[1], 0), r5);
1214 emit_insn (gen_block_move_real (func_addr_rtx));
1218 /* This is the same number of bytes as a memcpy call, but to a different
1219 less common function name, so this will occasionally use more space. */
1220 if (! TARGET_SMALLCODE)
1222 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1223 int final_switch, while_loop;
1224 rtx r4 = gen_rtx_REG (SImode, 4);
1225 rtx r5 = gen_rtx_REG (SImode, 5);
1226 rtx r6 = gen_rtx_REG (SImode, 6);
1228 function_symbol (func_addr_rtx, "__movmem", SFUNC_STATIC);
1229 force_into (XEXP (operands[0], 0), r4);
1230 force_into (XEXP (operands[1], 0), r5);
1232 /* r6 controls the size of the move. 16 is decremented from it
1233 for each 64 bytes moved. Then the negative bit left over is used
1234 as an index into a list of move instructions. e.g., a 72 byte move
1235 would be set up with size(r6) = 14, for one iteration through the
1236 big while loop, and a switch of -2 for the last part. */
1238 final_switch = 16 - ((bytes / 4) % 16);
1239 while_loop = ((bytes / 4) / 16 - 1) * 16;
1240 emit_insn (gen_move_insn (r6, GEN_INT (while_loop + final_switch)));
1241 emit_insn (gen_block_lump_real (func_addr_rtx));
1248 /* Prepare operands for a move define_expand; specifically, one of the
1249 operands must be in a register. */
1252 prepare_move_operands (rtx operands[], enum machine_mode mode)
1254 if ((mode == SImode || mode == DImode)
1256 && ! ((mode == Pmode || mode == ptr_mode)
1257 && tls_symbolic_operand (operands[1], Pmode) != 0))
1260 if (SYMBOLIC_CONST_P (operands[1]))
1262 if (GET_CODE (operands[0]) == MEM)
1263 operands[1] = force_reg (Pmode, operands[1]);
1264 else if (TARGET_SHMEDIA
1265 && GET_CODE (operands[1]) == LABEL_REF
1266 && target_reg_operand (operands[0], mode))
1270 temp = (!can_create_pseudo_p ()
1272 : gen_reg_rtx (Pmode));
1273 operands[1] = legitimize_pic_address (operands[1], mode, temp);
1276 else if (GET_CODE (operands[1]) == CONST
1277 && GET_CODE (XEXP (operands[1], 0)) == PLUS
1278 && SYMBOLIC_CONST_P (XEXP (XEXP (operands[1], 0), 0)))
1280 temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
1281 temp = legitimize_pic_address (XEXP (XEXP (operands[1], 0), 0),
1283 operands[1] = expand_binop (mode, add_optab, temp,
1284 XEXP (XEXP (operands[1], 0), 1),
1285 (!can_create_pseudo_p ()
1287 : gen_reg_rtx (Pmode)),
1288 0, OPTAB_LIB_WIDEN);
1292 if (! reload_in_progress && ! reload_completed)
1294 /* Copy the source to a register if both operands aren't registers. */
1295 if (! register_operand (operands[0], mode)
1296 && ! sh_register_operand (operands[1], mode))
1297 operands[1] = copy_to_mode_reg (mode, operands[1]);
1299 if (GET_CODE (operands[0]) == MEM && ! memory_operand (operands[0], mode))
1301 /* This is like change_address_1 (operands[0], mode, 0, 1) ,
1302 except that we can't use that function because it is static. */
1303 rtx new = change_address (operands[0], mode, 0);
1304 MEM_COPY_ATTRIBUTES (new, operands[0]);
1308 /* This case can happen while generating code to move the result
1309 of a library call to the target. Reject `st r0,@(rX,rY)' because
1310 reload will fail to find a spill register for rX, since r0 is already
1311 being used for the source. */
1313 && refers_to_regno_p (R0_REG, R0_REG + 1, operands[1], (rtx *)0)
1314 && GET_CODE (operands[0]) == MEM
1315 && GET_CODE (XEXP (operands[0], 0)) == PLUS
1316 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == REG)
1317 operands[1] = copy_to_mode_reg (mode, operands[1]);
1320 if (mode == Pmode || mode == ptr_mode)
1323 enum tls_model tls_kind;
1327 if (GET_CODE (op1) == CONST
1328 && GET_CODE (XEXP (op1, 0)) == PLUS
1329 && tls_symbolic_operand (XEXP (XEXP (op1, 0), 0), Pmode))
1331 opc = XEXP (XEXP (op1, 0), 1);
1332 op1 = XEXP (XEXP (op1, 0), 0);
1337 if ((tls_kind = tls_symbolic_operand (op1, Pmode)))
1339 rtx tga_op1, tga_ret, tmp, tmp2;
1343 case TLS_MODEL_GLOBAL_DYNAMIC:
1344 tga_ret = gen_rtx_REG (Pmode, R0_REG);
1345 emit_call_insn (gen_tls_global_dynamic (tga_ret, op1));
1349 case TLS_MODEL_LOCAL_DYNAMIC:
1350 tga_ret = gen_rtx_REG (Pmode, R0_REG);
1351 emit_call_insn (gen_tls_local_dynamic (tga_ret, op1));
1353 tmp = gen_reg_rtx (Pmode);
1354 emit_move_insn (tmp, tga_ret);
1356 if (register_operand (op0, Pmode))
1359 tmp2 = gen_reg_rtx (Pmode);
1361 emit_insn (gen_symDTPOFF2reg (tmp2, op1, tmp));
1365 case TLS_MODEL_INITIAL_EXEC:
1368 /* Don't schedule insns for getting GOT address when
1369 the first scheduling is enabled, to avoid spill
1371 if (flag_schedule_insns)
1372 emit_insn (gen_blockage ());
1373 emit_insn (gen_GOTaddr2picreg ());
1374 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode,
1376 if (flag_schedule_insns)
1377 emit_insn (gen_blockage ());
1379 tga_op1 = !can_create_pseudo_p () ? op0 : gen_reg_rtx (Pmode);
1380 tmp = gen_sym2GOTTPOFF (op1);
1381 emit_insn (gen_tls_initial_exec (tga_op1, tmp));
1385 case TLS_MODEL_LOCAL_EXEC:
1386 tmp2 = gen_reg_rtx (Pmode);
1387 emit_insn (gen_load_gbr (tmp2));
1388 tmp = gen_reg_rtx (Pmode);
1389 emit_insn (gen_symTPOFF2reg (tmp, op1));
1391 if (register_operand (op0, Pmode))
1394 op1 = gen_reg_rtx (Pmode);
1396 emit_insn (gen_addsi3 (op1, tmp, tmp2));
1403 emit_insn (gen_addsi3 (op1, op1, force_reg (SImode, opc)));
1412 prepare_cbranch_operands (rtx *operands, enum machine_mode mode,
1413 enum rtx_code comparison)
1416 rtx scratch = NULL_RTX;
1418 if (comparison == CODE_FOR_nothing)
1419 comparison = GET_CODE (operands[0]);
1421 scratch = operands[4];
1422 if (GET_CODE (operands[1]) == CONST_INT
1423 && GET_CODE (operands[2]) != CONST_INT)
1425 rtx tmp = operands[1];
1427 operands[1] = operands[2];
1429 comparison = swap_condition (comparison);
1431 if (GET_CODE (operands[2]) == CONST_INT)
1433 HOST_WIDE_INT val = INTVAL (operands[2]);
1434 if ((val == -1 || val == -0x81)
1435 && (comparison == GT || comparison == LE))
1437 comparison = (comparison == GT) ? GE : LT;
1438 operands[2] = gen_int_mode (val + 1, mode);
1440 else if ((val == 1 || val == 0x80)
1441 && (comparison == GE || comparison == LT))
1443 comparison = (comparison == GE) ? GT : LE;
1444 operands[2] = gen_int_mode (val - 1, mode);
1446 else if (val == 1 && (comparison == GEU || comparison == LTU))
1448 comparison = (comparison == GEU) ? NE : EQ;
1449 operands[2] = CONST0_RTX (mode);
1451 else if (val == 0x80 && (comparison == GEU || comparison == LTU))
1453 comparison = (comparison == GEU) ? GTU : LEU;
1454 operands[2] = gen_int_mode (val - 1, mode);
1456 else if (val == 0 && (comparison == GTU || comparison == LEU))
1457 comparison = (comparison == GTU) ? NE : EQ;
1458 else if (mode == SImode
1459 && ((val == 0x7fffffff
1460 && (comparison == GTU || comparison == LEU))
1461 || ((unsigned HOST_WIDE_INT) val
1462 == (unsigned HOST_WIDE_INT) 0x7fffffff + 1
1463 && (comparison == GEU || comparison == LTU))))
1465 comparison = (comparison == GTU || comparison == GEU) ? LT : GE;
1466 operands[2] = CONST0_RTX (mode);
1470 if (can_create_pseudo_p ())
1471 operands[1] = force_reg (mode, op1);
1472 /* When we are handling DImode comparisons, we want to keep constants so
1473 that we can optimize the component comparisons; however, memory loads
1474 are better issued as a whole so that they can be scheduled well.
1475 SImode equality comparisons allow I08 constants, but only when they
1476 compare r0. Hence, if operands[1] has to be loaded from somewhere else
1477 into a register, that register might as well be r0, and we allow the
1478 constant. If it is already in a register, this is likely to be
1479 allocated to a different hard register, thus we load the constant into
1480 a register unless it is zero. */
1481 if (!REG_P (operands[2])
1482 && (GET_CODE (operands[2]) != CONST_INT
1483 || (mode == SImode && operands[2] != CONST0_RTX (SImode)
1484 && ((comparison != EQ && comparison != NE)
1485 || (REG_P (op1) && REGNO (op1) != R0_REG)
1486 || !satisfies_constraint_I08 (operands[2])))))
1488 if (scratch && GET_MODE (scratch) == mode)
1490 emit_move_insn (scratch, operands[2]);
1491 operands[2] = scratch;
1493 else if (can_create_pseudo_p ())
1494 operands[2] = force_reg (mode, operands[2]);
1500 expand_cbranchsi4 (rtx *operands, enum rtx_code comparison, int probability)
1502 rtx (*branch_expander) (rtx) = gen_branch_true;
1505 comparison = prepare_cbranch_operands (operands, SImode, comparison);
1508 case NE: case LT: case LE: case LTU: case LEU:
1509 comparison = reverse_condition (comparison);
1510 branch_expander = gen_branch_false;
1513 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, T_REG),
1514 gen_rtx_fmt_ee (comparison, SImode,
1515 operands[1], operands[2])));
1516 jump = emit_jump_insn (branch_expander (operands[3]));
1517 if (probability >= 0)
1519 = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (probability),
1524 /* ??? How should we distribute probabilities when more than one branch
1525 is generated. So far we only have soem ad-hoc observations:
1526 - If the operands are random, they are likely to differ in both parts.
1527 - If comparing items in a hash chain, the operands are random or equal;
1528 operation should be EQ or NE.
1529 - If items are searched in an ordered tree from the root, we can expect
1530 the highpart to be unequal about half of the time; operation should be
1531 an inequality comparison, operands non-constant, and overall probability
1532 about 50%. Likewise for quicksort.
1533 - Range checks will be often made against constants. Even if we assume for
1534 simplicity an even distribution of the non-constant operand over a
1535 sub-range here, the same probability could be generated with differently
1536 wide sub-ranges - as long as the ratio of the part of the subrange that
1537 is before the threshold to the part that comes after the threshold stays
1538 the same. Thus, we can't really tell anything here;
1539 assuming random distribution is at least simple.
1543 expand_cbranchdi4 (rtx *operands, enum rtx_code comparison)
1545 enum rtx_code msw_taken, msw_skip, lsw_taken;
1546 rtx skip_label = NULL_RTX;
1547 rtx op1h, op1l, op2h, op2l;
1550 int msw_taken_prob = -1, msw_skip_prob = -1, lsw_taken_prob = -1;
1551 rtx scratch = operands[4];
1553 comparison = prepare_cbranch_operands (operands, DImode, comparison);
1554 op1h = gen_highpart_mode (SImode, DImode, operands[1]);
1555 op2h = gen_highpart_mode (SImode, DImode, operands[2]);
1556 op1l = gen_lowpart (SImode, operands[1]);
1557 op2l = gen_lowpart (SImode, operands[2]);
1558 msw_taken = msw_skip = lsw_taken = CODE_FOR_nothing;
1559 prob = split_branch_probability;
1560 rev_prob = REG_BR_PROB_BASE - prob;
1563 /* ??? Should we use the cmpeqdi_t pattern for equality comparisons?
1564 That costs 1 cycle more when the first branch can be predicted taken,
1565 but saves us mispredicts because only one branch needs prediction.
1566 It also enables generating the cmpeqdi_t-1 pattern. */
1568 if (TARGET_CMPEQDI_T)
1570 emit_insn (gen_cmpeqdi_t (operands[1], operands[2]));
1571 emit_jump_insn (gen_branch_true (operands[3]));
1578 /* If we had more precision, we'd use rev_prob - (rev_prob >> 32) .
1580 msw_skip_prob = rev_prob;
1581 if (REG_BR_PROB_BASE <= 65535)
1582 lsw_taken_prob = prob ? REG_BR_PROB_BASE : 0;
1585 gcc_assert (HOST_BITS_PER_WIDEST_INT >= 64);
1589 - ((HOST_WIDEST_INT) REG_BR_PROB_BASE * rev_prob
1590 / ((HOST_WIDEST_INT) prob << 32)))
1596 if (TARGET_CMPEQDI_T)
1598 emit_insn (gen_cmpeqdi_t (operands[1], operands[2]));
1599 emit_jump_insn (gen_branch_false (operands[3]));
1603 msw_taken_prob = prob;
1608 msw_taken = comparison;
1609 if (GET_CODE (op2l) == CONST_INT && INTVAL (op2l) == -1)
1611 if (comparison != GTU || op2h != CONST0_RTX (SImode))
1612 msw_skip = swap_condition (msw_taken);
1616 if (op2l == CONST0_RTX (SImode))
1617 msw_taken = comparison;
1620 msw_taken = comparison == GE ? GT : GTU;
1621 msw_skip = swap_condition (msw_taken);
1626 msw_taken = comparison;
1627 if (op2l == CONST0_RTX (SImode))
1629 msw_skip = swap_condition (msw_taken);
1633 if (GET_CODE (op2l) == CONST_INT && INTVAL (op2l) == -1)
1634 msw_taken = comparison;
1638 if (comparison == LE)
1640 else if (op2h != CONST0_RTX (SImode))
1644 msw_skip = swap_condition (msw_taken);
1647 default: return false;
1649 num_branches = ((msw_taken != CODE_FOR_nothing)
1650 + (msw_skip != CODE_FOR_nothing)
1651 + (lsw_taken != CODE_FOR_nothing));
1652 if (comparison != EQ && comparison != NE && num_branches > 1)
1654 if (!CONSTANT_P (operands[2])
1655 && prob >= (int) (REG_BR_PROB_BASE * 3 / 8U)
1656 && prob <= (int) (REG_BR_PROB_BASE * 5 / 8U))
1658 msw_taken_prob = prob / 2U;
1660 = REG_BR_PROB_BASE * rev_prob / (REG_BR_PROB_BASE + rev_prob);
1661 lsw_taken_prob = prob;
1665 msw_taken_prob = prob;
1666 msw_skip_prob = REG_BR_PROB_BASE;
1667 /* ??? If we have a constant op2h, should we use that when
1668 calculating lsw_taken_prob? */
1669 lsw_taken_prob = prob;
1674 operands[4] = NULL_RTX;
1675 if (reload_completed
1676 && ! arith_reg_or_0_operand (op2h, SImode) && true_regnum (op1h)
1677 && (msw_taken != CODE_FOR_nothing || msw_skip != CODE_FOR_nothing))
1679 emit_move_insn (scratch, operands[2]);
1680 operands[2] = scratch;
1682 if (msw_taken != CODE_FOR_nothing)
1683 expand_cbranchsi4 (operands, msw_taken, msw_taken_prob);
1684 if (msw_skip != CODE_FOR_nothing)
1686 rtx taken_label = operands[3];
1688 /* Operands were possibly modified, but msw_skip doesn't expect this.
1689 Always use the original ones. */
1690 if (msw_taken != CODE_FOR_nothing)
1696 operands[3] = skip_label = gen_label_rtx ();
1697 expand_cbranchsi4 (operands, msw_skip, msw_skip_prob);
1698 operands[3] = taken_label;
1702 if (lsw_taken != CODE_FOR_nothing)
1704 if (reload_completed
1705 && ! arith_reg_or_0_operand (op2l, SImode) && true_regnum (op1l))
1706 operands[4] = scratch;
1707 expand_cbranchsi4 (operands, lsw_taken, lsw_taken_prob);
1709 if (msw_skip != CODE_FOR_nothing)
1710 emit_label (skip_label);
1714 /* Prepare the operands for an scc instruction; make sure that the
1715 compare has been done. */
1717 prepare_scc_operands (enum rtx_code code)
1719 rtx t_reg = gen_rtx_REG (SImode, T_REG);
1720 enum rtx_code oldcode = code;
1721 enum machine_mode mode;
1723 /* First need a compare insn. */
1727 /* It isn't possible to handle this case. */
1744 if (code != oldcode)
1746 rtx tmp = sh_compare_op0;
1747 sh_compare_op0 = sh_compare_op1;
1748 sh_compare_op1 = tmp;
1751 mode = GET_MODE (sh_compare_op0);
1752 if (mode == VOIDmode)
1753 mode = GET_MODE (sh_compare_op1);
1755 sh_compare_op0 = force_reg (mode, sh_compare_op0);
1756 if ((code != EQ && code != NE
1757 && (sh_compare_op1 != const0_rtx
1758 || code == GTU || code == GEU || code == LTU || code == LEU))
1759 || (mode == DImode && sh_compare_op1 != const0_rtx)
1760 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1761 sh_compare_op1 = force_reg (mode, sh_compare_op1);
1763 if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
1764 (mode == SFmode ? emit_sf_insn : emit_df_insn)
1765 (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
1766 gen_rtx_SET (VOIDmode, t_reg,
1767 gen_rtx_fmt_ee (code, SImode,
1768 sh_compare_op0, sh_compare_op1)),
1769 gen_rtx_USE (VOIDmode, get_fpscr_rtx ()))));
1771 emit_insn (gen_rtx_SET (VOIDmode, t_reg,
1772 gen_rtx_fmt_ee (code, SImode,
1773 sh_compare_op0, sh_compare_op1)));
1778 /* Called from the md file, set up the operands of a compare instruction. */
1781 from_compare (rtx *operands, int code)
1783 enum machine_mode mode = GET_MODE (sh_compare_op0);
1785 if (mode == VOIDmode)
1786 mode = GET_MODE (sh_compare_op1);
1789 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1791 /* Force args into regs, since we can't use constants here. */
1792 sh_compare_op0 = force_reg (mode, sh_compare_op0);
1793 if (sh_compare_op1 != const0_rtx
1794 || code == GTU || code == GEU
1795 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1796 sh_compare_op1 = force_reg (mode, sh_compare_op1);
1798 if (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT && code == GE)
1800 from_compare (operands, GT);
1801 insn = gen_ieee_ccmpeqsf_t (sh_compare_op0, sh_compare_op1);
1804 insn = gen_rtx_SET (VOIDmode,
1805 gen_rtx_REG (SImode, T_REG),
1806 gen_rtx_fmt_ee (code, SImode,
1807 sh_compare_op0, sh_compare_op1));
1808 if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
1810 insn = gen_rtx_PARALLEL (VOIDmode,
1812 gen_rtx_USE (VOIDmode, get_fpscr_rtx ())));
1813 (mode == SFmode ? emit_sf_insn : emit_df_insn) (insn);
1819 /* Functions to output assembly code. */
1821 /* Return a sequence of instructions to perform DI or DF move.
1823 Since the SH cannot move a DI or DF in one instruction, we have
1824 to take care when we see overlapping source and dest registers. */
1827 output_movedouble (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1828 enum machine_mode mode)
1830 rtx dst = operands[0];
1831 rtx src = operands[1];
1833 if (GET_CODE (dst) == MEM
1834 && GET_CODE (XEXP (dst, 0)) == PRE_DEC)
1835 return "mov.l %T1,%0\n\tmov.l %1,%0";
1837 if (register_operand (dst, mode)
1838 && register_operand (src, mode))
1840 if (REGNO (src) == MACH_REG)
1841 return "sts mach,%S0\n\tsts macl,%R0";
1843 /* When mov.d r1,r2 do r2->r3 then r1->r2;
1844 when mov.d r1,r0 do r1->r0 then r2->r1. */
1846 if (REGNO (src) + 1 == REGNO (dst))
1847 return "mov %T1,%T0\n\tmov %1,%0";
1849 return "mov %1,%0\n\tmov %T1,%T0";
1851 else if (GET_CODE (src) == CONST_INT)
1853 if (INTVAL (src) < 0)
1854 output_asm_insn ("mov #-1,%S0", operands);
1856 output_asm_insn ("mov #0,%S0", operands);
1858 return "mov %1,%R0";
1860 else if (GET_CODE (src) == MEM)
1863 int dreg = REGNO (dst);
1864 rtx inside = XEXP (src, 0);
1866 switch (GET_CODE (inside))
1869 ptrreg = REGNO (inside);
1873 ptrreg = subreg_regno (inside);
1877 ptrreg = REGNO (XEXP (inside, 0));
1878 /* ??? A r0+REG address shouldn't be possible here, because it isn't
1879 an offsettable address. Unfortunately, offsettable addresses use
1880 QImode to check the offset, and a QImode offsettable address
1881 requires r0 for the other operand, which is not currently
1882 supported, so we can't use the 'o' constraint.
1883 Thus we must check for and handle r0+REG addresses here.
1884 We punt for now, since this is likely very rare. */
1885 gcc_assert (GET_CODE (XEXP (inside, 1)) != REG);
1889 return "mov.l %1,%0\n\tmov.l %1+4,%T0";
1891 return "mov.l %1,%0\n\tmov.l %1,%T0";
1896 /* Work out the safe way to copy. Copy into the second half first. */
1898 return "mov.l %T1,%T0\n\tmov.l %1,%0";
1901 return "mov.l %1,%0\n\tmov.l %T1,%T0";
1904 /* Print an instruction which would have gone into a delay slot after
1905 another instruction, but couldn't because the other instruction expanded
1906 into a sequence where putting the slot insn at the end wouldn't work. */
1909 print_slot (rtx insn)
1911 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file, optimize, 1, NULL);
1913 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
1917 output_far_jump (rtx insn, rtx op)
1919 struct { rtx lab, reg, op; } this;
1920 rtx braf_base_lab = NULL_RTX;
1923 int offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn));
1926 this.lab = gen_label_rtx ();
1930 && offset - get_attr_length (insn) <= 32766)
1933 jump = "mov.w %O0,%1; braf %1";
1941 jump = "mov.l %O0,%1; braf %1";
1943 jump = "mov.l r0,@-r15; mova %O0,r0; mov.l @r0,%1; add r0,%1; mov.l @r15+,r0; jmp @%1";
1946 jump = "mov.l %O0,%1; jmp @%1";
1948 /* If we have a scratch register available, use it. */
1949 if (GET_CODE ((prev = prev_nonnote_insn (insn))) == INSN
1950 && INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
1952 this.reg = SET_DEST (XVECEXP (PATTERN (prev), 0, 0));
1953 if (REGNO (this.reg) == R0_REG && flag_pic && ! TARGET_SH2)
1954 jump = "mov.l r1,@-r15; mova %O0,r0; mov.l @r0,r1; add r1,r0; mov.l @r15+,r1; jmp @%1";
1955 output_asm_insn (jump, &this.lab);
1956 if (dbr_sequence_length ())
1957 print_slot (final_sequence);
1959 output_asm_insn ("nop", 0);
1963 /* Output the delay slot insn first if any. */
1964 if (dbr_sequence_length ())
1965 print_slot (final_sequence);
1967 this.reg = gen_rtx_REG (SImode, 13);
1968 /* We must keep the stack aligned to 8-byte boundaries on SH5.
1969 Fortunately, MACL is fixed and call-clobbered, and we never
1970 need its value across jumps, so save r13 in it instead of in
1973 output_asm_insn ("lds r13, macl", 0);
1975 output_asm_insn ("mov.l r13,@-r15", 0);
1976 output_asm_insn (jump, &this.lab);
1978 output_asm_insn ("sts macl, r13", 0);
1980 output_asm_insn ("mov.l @r15+,r13", 0);
1982 if (far && flag_pic && TARGET_SH2)
1984 braf_base_lab = gen_label_rtx ();
1985 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1986 CODE_LABEL_NUMBER (braf_base_lab));
1989 output_asm_insn (".align 2", 0);
1990 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (this.lab));
1992 if (far && flag_pic)
1995 this.lab = braf_base_lab;
1996 output_asm_insn (".long %O2-%O0", &this.lab);
1999 output_asm_insn (far ? ".long %O2" : ".word %O2-%O0", &this.lab);
2003 /* Local label counter, used for constants in the pool and inside
2004 pattern branches. */
2006 static int lf = 100;
2008 /* Output code for ordinary branches. */
2011 output_branch (int logic, rtx insn, rtx *operands)
2013 switch (get_attr_length (insn))
2016 /* This can happen if filling the delay slot has caused a forward
2017 branch to exceed its range (we could reverse it, but only
2018 when we know we won't overextend other branches; this should
2019 best be handled by relaxation).
2020 It can also happen when other condbranches hoist delay slot insn
2021 from their destination, thus leading to code size increase.
2022 But the branch will still be in the range -4092..+4098 bytes. */
2027 /* The call to print_slot will clobber the operands. */
2028 rtx op0 = operands[0];
2030 /* If the instruction in the delay slot is annulled (true), then
2031 there is no delay slot where we can put it now. The only safe
2032 place for it is after the label. final will do that by default. */
2035 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
2036 && get_attr_length (XVECEXP (final_sequence, 0, 1)))
2038 asm_fprintf (asm_out_file, "\tb%s%ss\t%LLF%d\n", logic ? "f" : "t",
2039 ASSEMBLER_DIALECT ? "/" : ".", label);
2040 print_slot (final_sequence);
2043 asm_fprintf (asm_out_file, "\tb%s\t%LLF%d\n", logic ? "f" : "t", label);
2045 output_asm_insn ("bra\t%l0", &op0);
2046 fprintf (asm_out_file, "\tnop\n");
2047 (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
2051 /* When relaxing, handle this like a short branch. The linker
2052 will fix it up if it still doesn't fit after relaxation. */
2054 return logic ? "bt%.\t%l0" : "bf%.\t%l0";
2056 /* These are for SH2e, in which we have to account for the
2057 extra nop because of the hardware bug in annulled branches. */
2063 gcc_assert (!final_sequence
2064 || !(INSN_ANNULLED_BRANCH_P
2065 (XVECEXP (final_sequence, 0, 0))));
2066 asm_fprintf (asm_out_file, "b%s%ss\t%LLF%d\n",
2068 ASSEMBLER_DIALECT ? "/" : ".", label);
2069 fprintf (asm_out_file, "\tnop\n");
2070 output_asm_insn ("bra\t%l0", operands);
2071 fprintf (asm_out_file, "\tnop\n");
2072 (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
2076 /* When relaxing, fall through. */
2081 sprintf (buffer, "b%s%ss\t%%l0",
2083 ASSEMBLER_DIALECT ? "/" : ".");
2084 output_asm_insn (buffer, &operands[0]);
2089 /* There should be no longer branches now - that would
2090 indicate that something has destroyed the branches set
2091 up in machine_dependent_reorg. */
2096 /* Output a code sequence for INSN using TEMPLATE with OPERANDS; but before,
2097 fill in operands 9 as a label to the successor insn.
2098 We try to use jump threading where possible.
2099 IF CODE matches the comparison in the IF_THEN_ELSE of a following jump,
2100 we assume the jump is taken. I.e. EQ means follow jmp and bf, NE means
2101 follow jmp and bt, if the address is in range. */
2103 output_branchy_insn (enum rtx_code code, const char *template,
2104 rtx insn, rtx *operands)
2106 rtx next_insn = NEXT_INSN (insn);
2108 if (next_insn && GET_CODE (next_insn) == JUMP_INSN && condjump_p (next_insn))
2110 rtx src = SET_SRC (PATTERN (next_insn));
2111 if (GET_CODE (src) == IF_THEN_ELSE && GET_CODE (XEXP (src, 0)) != code)
2113 /* Following branch not taken */
2114 operands[9] = gen_label_rtx ();
2115 emit_label_after (operands[9], next_insn);
2116 INSN_ADDRESSES_NEW (operands[9],
2117 INSN_ADDRESSES (INSN_UID (next_insn))
2118 + get_attr_length (next_insn));
2123 int offset = (branch_dest (next_insn)
2124 - INSN_ADDRESSES (INSN_UID (next_insn)) + 4);
2125 if (offset >= -252 && offset <= 258)
2127 if (GET_CODE (src) == IF_THEN_ELSE)
2129 src = XEXP (src, 1);
2135 operands[9] = gen_label_rtx ();
2136 emit_label_after (operands[9], insn);
2137 INSN_ADDRESSES_NEW (operands[9],
2138 INSN_ADDRESSES (INSN_UID (insn))
2139 + get_attr_length (insn));
2144 output_ieee_ccmpeq (rtx insn, rtx *operands)
2146 return output_branchy_insn (NE, "bt\t%l9\n\tfcmp/eq\t%1,%0",
2150 /* Output the start of the assembler file. */
2153 sh_file_start (void)
2155 default_file_start ();
2158 /* Declare the .directive section before it is used. */
2159 fputs ("\t.section .directive, \"SM\", @progbits, 1\n", asm_out_file);
2160 fputs ("\t.asciz \"#<SYMEDIT>#\\n\"\n", asm_out_file);
2164 /* We need to show the text section with the proper
2165 attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
2166 emits it without attributes in TEXT_SECTION_ASM_OP, else GAS
2167 will complain. We can teach GAS specifically about the
2168 default attributes for our choice of text section, but
2169 then we would have to change GAS again if/when we change
2170 the text section name. */
2171 fprintf (asm_out_file, "%s\n", TEXT_SECTION_ASM_OP);
2173 /* Switch to the data section so that the coffsem symbol
2174 isn't in the text section. */
2175 switch_to_section (data_section);
2177 if (TARGET_LITTLE_ENDIAN)
2178 fputs ("\t.little\n", asm_out_file);
2182 if (TARGET_SHCOMPACT)
2183 fputs ("\t.mode\tSHcompact\n", asm_out_file);
2184 else if (TARGET_SHMEDIA)
2185 fprintf (asm_out_file, "\t.mode\tSHmedia\n\t.abi\t%i\n",
2186 TARGET_SHMEDIA64 ? 64 : 32);
2190 /* Check if PAT includes UNSPEC_CALLER unspec pattern. */
2193 unspec_caller_rtx_p (rtx pat)
2195 switch (GET_CODE (pat))
2198 return unspec_caller_rtx_p (XEXP (pat, 0));
2201 if (unspec_caller_rtx_p (XEXP (pat, 0)))
2203 return unspec_caller_rtx_p (XEXP (pat, 1));
2205 if (XINT (pat, 1) == UNSPEC_CALLER)
2214 /* Indicate that INSN cannot be duplicated. This is true for insn
2215 that generates a unique label. */
2218 sh_cannot_copy_insn_p (rtx insn)
2222 if (!reload_completed || !flag_pic)
2225 if (GET_CODE (insn) != INSN)
2227 if (asm_noperands (insn) >= 0)
2230 pat = PATTERN (insn);
2231 if (GET_CODE (pat) != SET)
2233 pat = SET_SRC (pat);
2235 if (unspec_caller_rtx_p (pat))
2241 /* Actual number of instructions used to make a shift by N. */
2242 static const char ashiftrt_insns[] =
2243 { 0,1,2,3,4,5,8,8,8,8,8,8,8,8,8,8,2,3,4,5,8,8,8,8,8,8,8,8,8,8,8,2};
2245 /* Left shift and logical right shift are the same. */
2246 static const char shift_insns[] =
2247 { 0,1,1,2,2,3,3,4,1,2,2,3,3,4,3,3,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
2249 /* Individual shift amounts needed to get the above length sequences.
2250 One bit right shifts clobber the T bit, so when possible, put one bit
2251 shifts in the middle of the sequence, so the ends are eligible for
2252 branch delay slots. */
2253 static const short shift_amounts[32][5] = {
2254 {0}, {1}, {2}, {2, 1},
2255 {2, 2}, {2, 1, 2}, {2, 2, 2}, {2, 2, 1, 2},
2256 {8}, {8, 1}, {8, 2}, {8, 1, 2},
2257 {8, 2, 2}, {8, 2, 1, 2}, {8, -2, 8}, {8, -1, 8},
2258 {16}, {16, 1}, {16, 2}, {16, 1, 2},
2259 {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
2260 {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
2261 {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
2263 /* Likewise, but for shift amounts < 16, up to three highmost bits
2264 might be clobbered. This is typically used when combined with some
2265 kind of sign or zero extension. */
2267 static const char ext_shift_insns[] =
2268 { 0,1,1,2,2,3,2,2,1,2,2,3,3,3,2,2,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
2270 static const short ext_shift_amounts[32][4] = {
2271 {0}, {1}, {2}, {2, 1},
2272 {2, 2}, {2, 1, 2}, {8, -2}, {8, -1},
2273 {8}, {8, 1}, {8, 2}, {8, 1, 2},
2274 {8, 2, 2}, {16, -2, -1}, {16, -2}, {16, -1},
2275 {16}, {16, 1}, {16, 2}, {16, 1, 2},
2276 {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
2277 {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
2278 {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
2280 /* Assuming we have a value that has been sign-extended by at least one bit,
2281 can we use the ext_shift_amounts with the last shift turned to an arithmetic shift
2282 to shift it by N without data loss, and quicker than by other means? */
2283 #define EXT_SHIFT_SIGNED(n) (((n) | 8) == 15)
2285 /* This is used in length attributes in sh.md to help compute the length
2286 of arbitrary constant shift instructions. */
2289 shift_insns_rtx (rtx insn)
2291 rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2292 int shift_count = INTVAL (XEXP (set_src, 1));
2293 enum rtx_code shift_code = GET_CODE (set_src);
2298 return ashiftrt_insns[shift_count];
2301 return shift_insns[shift_count];
2307 /* Return the cost of a shift. */
2317 if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
2319 if (GET_MODE (x) == DImode
2320 && GET_CODE (XEXP (x, 1)) == CONST_INT
2321 && INTVAL (XEXP (x, 1)) == 1)
2324 /* Everything else is invalid, because there is no pattern for it. */
2327 /* If shift by a non constant, then this will be expensive. */
2328 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2329 return SH_DYNAMIC_SHIFT_COST;
2331 value = INTVAL (XEXP (x, 1));
2333 /* Otherwise, return the true cost in instructions. */
2334 if (GET_CODE (x) == ASHIFTRT)
2336 int cost = ashiftrt_insns[value];
2337 /* If SH3, then we put the constant in a reg and use shad. */
2338 if (cost > 1 + SH_DYNAMIC_SHIFT_COST)
2339 cost = 1 + SH_DYNAMIC_SHIFT_COST;
2343 return shift_insns[value];
2346 /* Return the cost of an AND operation. */
2353 /* Anding with a register is a single cycle and instruction. */
2354 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2357 i = INTVAL (XEXP (x, 1));
2361 if (satisfies_constraint_I10 (XEXP (x, 1))
2362 || satisfies_constraint_J16 (XEXP (x, 1)))
2365 return 1 + rtx_cost (XEXP (x, 1), AND);
2368 /* These constants are single cycle extu.[bw] instructions. */
2369 if (i == 0xff || i == 0xffff)
2371 /* Constants that can be used in an and immediate instruction in a single
2372 cycle, but this requires r0, so make it a little more expensive. */
2373 if (CONST_OK_FOR_K08 (i))
2375 /* Constants that can be loaded with a mov immediate and an and.
2376 This case is probably unnecessary. */
2377 if (CONST_OK_FOR_I08 (i))
2379 /* Any other constants requires a 2 cycle pc-relative load plus an and.
2380 This case is probably unnecessary. */
2384 /* Return the cost of an addition or a subtraction. */
2389 /* Adding a register is a single cycle insn. */
2390 if (GET_CODE (XEXP (x, 1)) == REG
2391 || GET_CODE (XEXP (x, 1)) == SUBREG)
2394 /* Likewise for small constants. */
2395 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2396 && CONST_OK_FOR_ADD (INTVAL (XEXP (x, 1))))
2400 switch (GET_CODE (XEXP (x, 1)))
2405 return TARGET_SHMEDIA64 ? 5 : 3;
2408 if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1))))
2410 else if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1)) >> 16))
2412 else if (CONST_OK_FOR_I16 ((INTVAL (XEXP (x, 1)) >> 16) >> 16))
2420 /* Any other constant requires a 2 cycle pc-relative load plus an
2425 /* Return the cost of a multiply. */
2427 multcosts (rtx x ATTRIBUTE_UNUSED)
2429 if (sh_multcost >= 0)
2432 /* ??? We have a mul insn, but it has a latency of three, and doesn't
2433 accept constants. Ideally, we would use a cost of one or two and
2434 add the cost of the operand, but disregard the latter when inside loops
2435 and loop invariant code motion is still to follow.
2436 Using a multiply first and splitting it later if it's a loss
2437 doesn't work because of different sign / zero extension semantics
2438 of multiplies vs. shifts. */
2439 return TARGET_SMALLCODE ? 2 : 3;
2443 /* We have a mul insn, so we can never take more than the mul and the
2444 read of the mac reg, but count more because of the latency and extra
2446 if (TARGET_SMALLCODE)
2451 /* If we're aiming at small code, then just count the number of
2452 insns in a multiply call sequence. */
2453 if (TARGET_SMALLCODE)
2456 /* Otherwise count all the insns in the routine we'd be calling too. */
2460 /* Compute a (partial) cost for rtx X. Return true if the complete
2461 cost has been computed, and false if subexpressions should be
2462 scanned. In either case, *TOTAL contains the cost result. */
2465 sh_rtx_costs (rtx x, int code, int outer_code, int *total)
2472 if (INTVAL (x) == 0)
2474 else if (outer_code == AND && and_operand ((x), DImode))
2476 else if ((outer_code == IOR || outer_code == XOR
2477 || outer_code == PLUS)
2478 && CONST_OK_FOR_I10 (INTVAL (x)))
2480 else if (CONST_OK_FOR_I16 (INTVAL (x)))
2481 *total = COSTS_N_INSNS (outer_code != SET);
2482 else if (CONST_OK_FOR_I16 (INTVAL (x) >> 16))
2483 *total = COSTS_N_INSNS ((outer_code != SET) + 1);
2484 else if (CONST_OK_FOR_I16 ((INTVAL (x) >> 16) >> 16))
2485 *total = COSTS_N_INSNS ((outer_code != SET) + 2);
2487 *total = COSTS_N_INSNS ((outer_code != SET) + 3);
2490 if (CONST_OK_FOR_I08 (INTVAL (x)))
2492 else if ((outer_code == AND || outer_code == IOR || outer_code == XOR)
2493 && CONST_OK_FOR_K08 (INTVAL (x)))
2495 /* prepare_cmp_insn will force costly constants int registers before
2496 the cbranch[sd]i4 patterns can see them, so preserve potentially
2497 interesting ones not covered by I08 above. */
2498 else if (outer_code == COMPARE
2499 && ((unsigned HOST_WIDE_INT) INTVAL (x)
2500 == (unsigned HOST_WIDE_INT) 0x7fffffff + 1
2501 || INTVAL (x) == 0x7fffffff
2502 || INTVAL (x) == 0x80 || INTVAL (x) == -0x81))
2511 if (TARGET_SHMEDIA64)
2512 *total = COSTS_N_INSNS (4);
2513 else if (TARGET_SHMEDIA32)
2514 *total = COSTS_N_INSNS (2);
2521 *total = COSTS_N_INSNS (4);
2522 /* prepare_cmp_insn will force costly constants int registers before
2523 the cbranchdi4 pattern can see them, so preserve potentially
2524 interesting ones. */
2525 else if (outer_code == COMPARE && GET_MODE (x) == DImode)
2531 if (x == CONST0_RTX (GET_MODE (x)))
2533 else if (sh_1el_vec (x, VOIDmode))
2534 *total = outer_code != SET;
2535 if (sh_rep_vec (x, VOIDmode))
2536 *total = ((GET_MODE_UNIT_SIZE (GET_MODE (x)) + 3) / 4
2537 + (outer_code != SET));
2538 *total = COSTS_N_INSNS (3) + (outer_code != SET);
2543 *total = COSTS_N_INSNS (addsubcosts (x));
2547 *total = COSTS_N_INSNS (andcosts (x));
2551 *total = COSTS_N_INSNS (multcosts (x));
2557 *total = COSTS_N_INSNS (shiftcosts (x));
2564 *total = COSTS_N_INSNS (20);
2568 if (sh_1el_vec (x, VOIDmode))
2569 *total = outer_code != SET;
2570 if (sh_rep_vec (x, VOIDmode))
2571 *total = ((GET_MODE_UNIT_SIZE (GET_MODE (x)) + 3) / 4
2572 + (outer_code != SET));
2573 *total = COSTS_N_INSNS (3) + (outer_code != SET);
2586 /* Compute the cost of an address. For the SH, all valid addresses are
2587 the same cost. Use a slightly higher cost for reg + reg addressing,
2588 since it increases pressure on r0. */
2591 sh_address_cost (rtx X)
2593 return (GET_CODE (X) == PLUS
2594 && ! CONSTANT_P (XEXP (X, 1))
2595 && ! TARGET_SHMEDIA ? 1 : 0);
2598 /* Code to expand a shift. */
2601 gen_ashift (int type, int n, rtx reg)
2603 /* Negative values here come from the shift_amounts array. */
2616 emit_insn (gen_ashrsi3_k (reg, reg, GEN_INT (n)));
2620 emit_insn (gen_lshrsi3_m (reg, reg, GEN_INT (n)));
2622 emit_insn (gen_lshrsi3_k (reg, reg, GEN_INT (n)));
2625 emit_insn (gen_ashlsi3_std (reg, reg, GEN_INT (n)));
2630 /* Same for HImode */
2633 gen_ashift_hi (int type, int n, rtx reg)
2635 /* Negative values here come from the shift_amounts array. */
2649 /* We don't have HImode right shift operations because using the
2650 ordinary 32 bit shift instructions for that doesn't generate proper
2651 zero/sign extension.
2652 gen_ashift_hi is only called in contexts where we know that the
2653 sign extension works out correctly. */
2656 if (GET_CODE (reg) == SUBREG)
2658 offset = SUBREG_BYTE (reg);
2659 reg = SUBREG_REG (reg);
2661 gen_ashift (type, n, gen_rtx_SUBREG (SImode, reg, offset));
2665 emit_insn (gen_ashlhi3_k (reg, reg, GEN_INT (n)));
2670 /* Output RTL to split a constant shift into its component SH constant
2671 shift instructions. */
2674 gen_shifty_op (int code, rtx *operands)
2676 int value = INTVAL (operands[2]);
2679 /* Truncate the shift count in case it is out of bounds. */
2680 value = value & 0x1f;
2684 if (code == LSHIFTRT)
2686 emit_insn (gen_rotlsi3_1 (operands[0], operands[0]));
2687 emit_insn (gen_movt (operands[0]));
2690 else if (code == ASHIFT)
2692 /* There is a two instruction sequence for 31 bit left shifts,
2693 but it requires r0. */
2694 if (GET_CODE (operands[0]) == REG && REGNO (operands[0]) == 0)
2696 emit_insn (gen_andsi3 (operands[0], operands[0], const1_rtx));
2697 emit_insn (gen_rotlsi3_31 (operands[0], operands[0]));
2702 else if (value == 0)
2704 /* This can happen even when optimizing, if there were subregs before
2705 reload. Don't output a nop here, as this is never optimized away;
2706 use a no-op move instead. */
2707 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[0]));
2711 max = shift_insns[value];
2712 for (i = 0; i < max; i++)
2713 gen_ashift (code, shift_amounts[value][i], operands[0]);
2716 /* Same as above, but optimized for values where the topmost bits don't
2720 gen_shifty_hi_op (int code, rtx *operands)
2722 int value = INTVAL (operands[2]);
2724 void (*gen_fun) (int, int, rtx);
2726 /* This operation is used by and_shl for SImode values with a few
2727 high bits known to be cleared. */
2731 emit_insn (gen_nop ());
2735 gen_fun = GET_MODE (operands[0]) == HImode ? gen_ashift_hi : gen_ashift;
2738 max = ext_shift_insns[value];
2739 for (i = 0; i < max; i++)
2740 gen_fun (code, ext_shift_amounts[value][i], operands[0]);
2743 /* When shifting right, emit the shifts in reverse order, so that
2744 solitary negative values come first. */
2745 for (i = ext_shift_insns[value] - 1; i >= 0; i--)
2746 gen_fun (code, ext_shift_amounts[value][i], operands[0]);
2749 /* Output RTL for an arithmetic right shift. */
2751 /* ??? Rewrite to use super-optimizer sequences. */
2754 expand_ashiftrt (rtx *operands)
2762 if (GET_CODE (operands[2]) != CONST_INT)
2764 rtx count = copy_to_mode_reg (SImode, operands[2]);
2765 emit_insn (gen_negsi2 (count, count));
2766 emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
2769 else if (ashiftrt_insns[INTVAL (operands[2]) & 31]
2770 > 1 + SH_DYNAMIC_SHIFT_COST)
2773 = force_reg (SImode, GEN_INT (- (INTVAL (operands[2]) & 31)));
2774 emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
2778 if (GET_CODE (operands[2]) != CONST_INT)
2781 value = INTVAL (operands[2]) & 31;
2785 /* If we are called from abs expansion, arrange things so that we
2786 we can use a single MT instruction that doesn't clobber the source,
2787 if LICM can hoist out the load of the constant zero. */
2788 if (currently_expanding_to_rtl)
2790 emit_insn (gen_cmpgtsi_t (force_reg (SImode, CONST0_RTX (SImode)),
2792 emit_insn (gen_mov_neg_si_t (operands[0]));
2795 emit_insn (gen_ashrsi2_31 (operands[0], operands[1]));
2798 else if (value >= 16 && value <= 19)
2800 wrk = gen_reg_rtx (SImode);
2801 emit_insn (gen_ashrsi2_16 (wrk, operands[1]));
2804 gen_ashift (ASHIFTRT, 1, wrk);
2805 emit_move_insn (operands[0], wrk);
2808 /* Expand a short sequence inline, longer call a magic routine. */
2809 else if (value <= 5)
2811 wrk = gen_reg_rtx (SImode);
2812 emit_move_insn (wrk, operands[1]);
2814 gen_ashift (ASHIFTRT, 1, wrk);
2815 emit_move_insn (operands[0], wrk);
2819 wrk = gen_reg_rtx (Pmode);
2821 /* Load the value into an arg reg and call a helper. */
2822 emit_move_insn (gen_rtx_REG (SImode, 4), operands[1]);
2823 sprintf (func, "__ashiftrt_r4_%d", value);
2824 function_symbol (wrk, func, SFUNC_STATIC);
2825 emit_insn (gen_ashrsi3_n (GEN_INT (value), wrk));
2826 emit_move_insn (operands[0], gen_rtx_REG (SImode, 4));
2831 sh_dynamicalize_shift_p (rtx count)
2833 return shift_insns[INTVAL (count)] > 1 + SH_DYNAMIC_SHIFT_COST;
2836 /* Try to find a good way to implement the combiner pattern
2837 [(set (match_operand:SI 0 "register_operand" "r")
2838 (and:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
2839 (match_operand:SI 2 "const_int_operand" "n"))
2840 (match_operand:SI 3 "const_int_operand" "n"))) .
2841 LEFT_RTX is operand 2 in the above pattern, and MASK_RTX is operand 3.
2842 return 0 for simple right / left or left/right shift combination.
2843 return 1 for a combination of shifts with zero_extend.
2844 return 2 for a combination of shifts with an AND that needs r0.
2845 return 3 for a combination of shifts with an AND that needs an extra
2846 scratch register, when the three highmost bits of the AND mask are clear.
2847 return 4 for a combination of shifts with an AND that needs an extra
2848 scratch register, when any of the three highmost bits of the AND mask
2850 If ATTRP is set, store an initial right shift width in ATTRP[0],
2851 and the instruction length in ATTRP[1] . These values are not valid
2853 When ATTRP is set and returning 1, ATTRP[2] gets set to the index into
2854 shift_amounts for the last shift value that is to be used before the
2857 shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
2859 unsigned HOST_WIDE_INT mask, lsb, mask2, lsb2;
2860 int left = INTVAL (left_rtx), right;
2862 int cost, best_cost = 10000;
2863 int best_right = 0, best_len = 0;
2867 if (left < 0 || left > 31)
2869 if (GET_CODE (mask_rtx) == CONST_INT)
2870 mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> left;
2872 mask = (unsigned HOST_WIDE_INT) GET_MODE_MASK (SImode) >> left;
2873 /* Can this be expressed as a right shift / left shift pair? */
2874 lsb = ((mask ^ (mask - 1)) >> 1) + 1;
2875 right = exact_log2 (lsb);
2876 mask2 = ~(mask + lsb - 1);
2877 lsb2 = ((mask2 ^ (mask2 - 1)) >> 1) + 1;
2878 /* mask has no zeroes but trailing zeroes <==> ! mask2 */
2880 best_cost = shift_insns[right] + shift_insns[right + left];
2881 /* mask has no trailing zeroes <==> ! right */
2882 else if (! right && mask2 == ~(lsb2 - 1))
2884 int late_right = exact_log2 (lsb2);
2885 best_cost = shift_insns[left + late_right] + shift_insns[late_right];
2887 /* Try to use zero extend. */
2888 if (mask2 == ~(lsb2 - 1))
2892 for (width = 8; width <= 16; width += 8)
2894 /* Can we zero-extend right away? */
2895 if (lsb2 == (unsigned HOST_WIDE_INT) 1 << width)
2898 = 1 + ext_shift_insns[right] + ext_shift_insns[left + right];
2899 if (cost < best_cost)
2910 /* ??? Could try to put zero extend into initial right shift,
2911 or even shift a bit left before the right shift. */
2912 /* Determine value of first part of left shift, to get to the
2913 zero extend cut-off point. */
2914 first = width - exact_log2 (lsb2) + right;
2915 if (first >= 0 && right + left - first >= 0)
2917 cost = ext_shift_insns[right] + ext_shift_insns[first] + 1
2918 + ext_shift_insns[right + left - first];
2919 if (cost < best_cost)
2931 /* Try to use r0 AND pattern */
2932 for (i = 0; i <= 2; i++)
2936 if (! CONST_OK_FOR_K08 (mask >> i))
2938 cost = (i != 0) + 2 + ext_shift_insns[left + i];
2939 if (cost < best_cost)
2944 best_len = cost - 1;
2947 /* Try to use a scratch register to hold the AND operand. */
2948 can_ext = ((mask << left) & ((unsigned HOST_WIDE_INT) 3 << 30)) == 0;
2949 for (i = 0; i <= 2; i++)
2953 cost = (i != 0) + (CONST_OK_FOR_I08 (mask >> i) ? 2 : 3)
2954 + (can_ext ? ext_shift_insns : shift_insns)[left + i];
2955 if (cost < best_cost)
2960 best_len = cost - 1 - ! CONST_OK_FOR_I08 (mask >> i);
2966 attrp[0] = best_right;
2967 attrp[1] = best_len;
2972 /* This is used in length attributes of the unnamed instructions
2973 corresponding to shl_and_kind return values of 1 and 2. */
2975 shl_and_length (rtx insn)
2977 rtx set_src, left_rtx, mask_rtx;
2980 set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2981 left_rtx = XEXP (XEXP (set_src, 0), 1);
2982 mask_rtx = XEXP (set_src, 1);
2983 shl_and_kind (left_rtx, mask_rtx, attributes);
2984 return attributes[1];
2987 /* This is used in length attribute of the and_shl_scratch instruction. */
2990 shl_and_scr_length (rtx insn)
2992 rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2993 int len = shift_insns[INTVAL (XEXP (set_src, 1))];
2994 rtx op = XEXP (set_src, 0);
2995 len += shift_insns[INTVAL (XEXP (op, 1))] + 1;
2996 op = XEXP (XEXP (op, 0), 0);
2997 return len + shift_insns[INTVAL (XEXP (op, 1))];
3000 /* Generate rtl for instructions for which shl_and_kind advised a particular
3001 method of generating them, i.e. returned zero. */
3004 gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
3007 unsigned HOST_WIDE_INT mask;
3008 int kind = shl_and_kind (left_rtx, mask_rtx, attributes);
3009 int right, total_shift;
3010 void (*shift_gen_fun) (int, rtx *) = gen_shifty_hi_op;
3012 right = attributes[0];
3013 total_shift = INTVAL (left_rtx) + right;
3014 mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> total_shift;
3021 int first = attributes[2];
3026 emit_insn ((mask << right) <= 0xff
3027 ? gen_zero_extendqisi2 (dest,
3028 gen_lowpart (QImode, source))
3029 : gen_zero_extendhisi2 (dest,
3030 gen_lowpart (HImode, source)));
3034 emit_insn (gen_movsi (dest, source));
3038 operands[2] = GEN_INT (right);
3039 gen_shifty_hi_op (LSHIFTRT, operands);
3043 operands[2] = GEN_INT (first);
3044 gen_shifty_hi_op (ASHIFT, operands);
3045 total_shift -= first;
3049 emit_insn (mask <= 0xff
3050 ? gen_zero_extendqisi2 (dest, gen_lowpart (QImode, dest))
3051 : gen_zero_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3052 if (total_shift > 0)
3054 operands[2] = GEN_INT (total_shift);
3055 gen_shifty_hi_op (ASHIFT, operands);
3060 shift_gen_fun = gen_shifty_op;
3062 /* If the topmost bit that matters is set, set the topmost bits
3063 that don't matter. This way, we might be able to get a shorter
3065 if (mask & ((HOST_WIDE_INT) 1 << (31 - total_shift)))
3066 mask |= (HOST_WIDE_INT) ~0 << (31 - total_shift);
3068 /* Don't expand fine-grained when combining, because that will
3069 make the pattern fail. */
3070 if (currently_expanding_to_rtl
3071 || reload_in_progress || reload_completed)
3075 /* Cases 3 and 4 should be handled by this split
3076 only while combining */
3077 gcc_assert (kind <= 2);
3080 emit_insn (gen_lshrsi3 (dest, source, GEN_INT (right)));
3083 emit_insn (gen_andsi3 (dest, source, GEN_INT (mask)));
3088 operands[2] = GEN_INT (total_shift);
3089 shift_gen_fun (ASHIFT, operands);
3096 if (kind != 4 && total_shift < 16)
3098 neg = -ext_shift_amounts[total_shift][1];
3100 neg -= ext_shift_amounts[total_shift][2];
3104 emit_insn (gen_and_shl_scratch (dest, source,
3107 GEN_INT (total_shift + neg),
3109 emit_insn (gen_movsi (dest, dest));
3116 /* Try to find a good way to implement the combiner pattern
3117 [(set (match_operand:SI 0 "register_operand" "=r")
3118 (sign_extract:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
3119 (match_operand:SI 2 "const_int_operand" "n")
3120 (match_operand:SI 3 "const_int_operand" "n")
3122 (clobber (reg:SI T_REG))]
3123 LEFT_RTX is operand 2 in the above pattern, and SIZE_RTX is operand 3.
3124 return 0 for simple left / right shift combination.
3125 return 1 for left shift / 8 bit sign extend / left shift.
3126 return 2 for left shift / 16 bit sign extend / left shift.
3127 return 3 for left shift / 8 bit sign extend / shift / sign extend.
3128 return 4 for left shift / 16 bit sign extend / shift / sign extend.
3129 return 5 for left shift / 16 bit sign extend / right shift
3130 return 6 for < 8 bit sign extend / left shift.
3131 return 7 for < 8 bit sign extend / left shift / single right shift.
3132 If COSTP is nonzero, assign the calculated cost to *COSTP. */
3135 shl_sext_kind (rtx left_rtx, rtx size_rtx, int *costp)
3137 int left, size, insize, ext;
3138 int cost = 0, best_cost;
3141 left = INTVAL (left_rtx);
3142 size = INTVAL (size_rtx);
3143 insize = size - left;
3144 gcc_assert (insize > 0);
3145 /* Default to left / right shift. */
3147 best_cost = shift_insns[32 - insize] + ashiftrt_insns[32 - size];
3150 /* 16 bit shift / sign extend / 16 bit shift */
3151 cost = shift_insns[16 - insize] + 1 + ashiftrt_insns[16 - size];
3152 /* If ashiftrt_insns[16 - size] is 8, this choice will be overridden
3153 below, by alternative 3 or something even better. */
3154 if (cost < best_cost)
3160 /* Try a plain sign extend between two shifts. */
3161 for (ext = 16; ext >= insize; ext -= 8)
3165 cost = ext_shift_insns[ext - insize] + 1 + shift_insns[size - ext];
3166 if (cost < best_cost)
3168 kind = ext / (unsigned) 8;
3172 /* Check if we can do a sloppy shift with a final signed shift
3173 restoring the sign. */
3174 if (EXT_SHIFT_SIGNED (size - ext))
3175 cost = ext_shift_insns[ext - insize] + ext_shift_insns[size - ext] + 1;
3176 /* If not, maybe it's still cheaper to do the second shift sloppy,
3177 and do a final sign extend? */
3178 else if (size <= 16)
3179 cost = ext_shift_insns[ext - insize] + 1
3180 + ext_shift_insns[size > ext ? size - ext : ext - size] + 1;
3183 if (cost < best_cost)
3185 kind = ext / (unsigned) 8 + 2;
3189 /* Check if we can sign extend in r0 */
3192 cost = 3 + shift_insns[left];
3193 if (cost < best_cost)
3198 /* Try the same with a final signed shift. */
3201 cost = 3 + ext_shift_insns[left + 1] + 1;
3202 if (cost < best_cost)
3211 /* Try to use a dynamic shift. */
3212 cost = shift_insns[32 - insize] + 1 + SH_DYNAMIC_SHIFT_COST;
3213 if (cost < best_cost)
3224 /* Function to be used in the length attribute of the instructions
3225 implementing this pattern. */
3228 shl_sext_length (rtx insn)
3230 rtx set_src, left_rtx, size_rtx;
3233 set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
3234 left_rtx = XEXP (XEXP (set_src, 0), 1);
3235 size_rtx = XEXP (set_src, 1);
3236 shl_sext_kind (left_rtx, size_rtx, &cost);
3240 /* Generate rtl for this pattern */
3243 gen_shl_sext (rtx dest, rtx left_rtx, rtx size_rtx, rtx source)
3246 int left, size, insize, cost;
3249 kind = shl_sext_kind (left_rtx, size_rtx, &cost);
3250 left = INTVAL (left_rtx);
3251 size = INTVAL (size_rtx);
3252 insize = size - left;
3260 int ext = kind & 1 ? 8 : 16;
3261 int shift2 = size - ext;
3263 /* Don't expand fine-grained when combining, because that will
3264 make the pattern fail. */
3265 if (! currently_expanding_to_rtl
3266 && ! reload_in_progress && ! reload_completed)
3268 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3269 emit_insn (gen_movsi (dest, source));
3273 emit_insn (gen_movsi (dest, source));
3277 operands[2] = GEN_INT (ext - insize);
3278 gen_shifty_hi_op (ASHIFT, operands);
3281 ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
3282 : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3287 operands[2] = GEN_INT (shift2);
3288 gen_shifty_op (ASHIFT, operands);
3295 if (EXT_SHIFT_SIGNED (shift2))
3297 operands[2] = GEN_INT (shift2 + 1);
3298 gen_shifty_op (ASHIFT, operands);
3299 operands[2] = const1_rtx;
3300 gen_shifty_op (ASHIFTRT, operands);
3303 operands[2] = GEN_INT (shift2);
3304 gen_shifty_hi_op (ASHIFT, operands);
3308 operands[2] = GEN_INT (-shift2);
3309 gen_shifty_hi_op (LSHIFTRT, operands);
3311 emit_insn (size <= 8
3312 ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
3313 : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3320 if (! currently_expanding_to_rtl
3321 && ! reload_in_progress && ! reload_completed)
3322 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3326 operands[2] = GEN_INT (16 - insize);
3327 gen_shifty_hi_op (ASHIFT, operands);
3328 emit_insn (gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3330 /* Don't use gen_ashrsi3 because it generates new pseudos. */
3332 gen_ashift (ASHIFTRT, 1, dest);
3337 /* Don't expand fine-grained when combining, because that will
3338 make the pattern fail. */
3339 if (! currently_expanding_to_rtl
3340 && ! reload_in_progress && ! reload_completed)
3342 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3343 emit_insn (gen_movsi (dest, source));
3346 emit_insn (gen_andsi3 (dest, source, GEN_INT ((1 << insize) - 1)));
3347 emit_insn (gen_xorsi3 (dest, dest, GEN_INT (1 << (insize - 1))));
3348 emit_insn (gen_addsi3 (dest, dest, GEN_INT (-1 << (insize - 1))));
3350 operands[2] = kind == 7 ? GEN_INT (left + 1) : left_rtx;
3351 gen_shifty_op (ASHIFT, operands);
3353 emit_insn (gen_ashrsi3_k (dest, dest, const1_rtx));
3361 /* Prefix a symbol_ref name with "datalabel". */
3364 gen_datalabel_ref (rtx sym)
3368 if (GET_CODE (sym) == LABEL_REF)
3369 return gen_rtx_CONST (GET_MODE (sym),
3370 gen_rtx_UNSPEC (GET_MODE (sym),
3374 gcc_assert (GET_CODE (sym) == SYMBOL_REF);
3376 str = XSTR (sym, 0);
3377 /* Share all SYMBOL_REF strings with the same value - that is important
3379 str = IDENTIFIER_POINTER (get_identifier (str));
3380 XSTR (sym, 0) = str;
3386 static alloc_pool label_ref_list_pool;
3388 typedef struct label_ref_list_d
3391 struct label_ref_list_d *next;
3392 } *label_ref_list_t;
3394 /* The SH cannot load a large constant into a register, constants have to
3395 come from a pc relative load. The reference of a pc relative load
3396 instruction must be less than 1k in front of the instruction. This
3397 means that we often have to dump a constant inside a function, and
3398 generate code to branch around it.
3400 It is important to minimize this, since the branches will slow things
3401 down and make things bigger.
3403 Worst case code looks like:
3421 We fix this by performing a scan before scheduling, which notices which
3422 instructions need to have their operands fetched from the constant table
3423 and builds the table.
3427 scan, find an instruction which needs a pcrel move. Look forward, find the
3428 last barrier which is within MAX_COUNT bytes of the requirement.
3429 If there isn't one, make one. Process all the instructions between
3430 the find and the barrier.
3432 In the above example, we can tell that L3 is within 1k of L1, so
3433 the first move can be shrunk from the 3 insn+constant sequence into
3434 just 1 insn, and the constant moved to L3 to make:
3445 Then the second move becomes the target for the shortening process. */
3449 rtx value; /* Value in table. */
3450 rtx label; /* Label of value. */
3451 label_ref_list_t wend; /* End of window. */
3452 enum machine_mode mode; /* Mode of value. */
3454 /* True if this constant is accessed as part of a post-increment
3455 sequence. Note that HImode constants are never accessed in this way. */
3456 bool part_of_sequence_p;
3459 /* The maximum number of constants that can fit into one pool, since
3460 constants in the range 0..510 are at least 2 bytes long, and in the
3461 range from there to 1018 at least 4 bytes. */
3463 #define MAX_POOL_SIZE 372
3464 static pool_node pool_vector[MAX_POOL_SIZE];
3465 static int pool_size;
3466 static rtx pool_window_label;
3467 static int pool_window_last;
3469 static int max_labelno_before_reorg;
3471 /* ??? If we need a constant in HImode which is the truncated value of a
3472 constant we need in SImode, we could combine the two entries thus saving
3473 two bytes. Is this common enough to be worth the effort of implementing
3476 /* ??? This stuff should be done at the same time that we shorten branches.
3477 As it is now, we must assume that all branches are the maximum size, and
3478 this causes us to almost always output constant pools sooner than
3481 /* Add a constant to the pool and return its label. */
3484 add_constant (rtx x, enum machine_mode mode, rtx last_value)
3488 label_ref_list_t ref, newref;
3490 /* First see if we've already got it. */
3491 for (i = 0; i < pool_size; i++)
3493 if (x->code == pool_vector[i].value->code
3494 && mode == pool_vector[i].mode)
3496 if (x->code == CODE_LABEL)
3498 if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
3501 if (rtx_equal_p (x, pool_vector[i].value))
3506 || ! rtx_equal_p (last_value, pool_vector[i-1].value))
3508 new = gen_label_rtx ();
3509 LABEL_REFS (new) = pool_vector[i].label;
3510 pool_vector[i].label = lab = new;
3512 if (lab && pool_window_label)
3514 newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
3515 newref->label = pool_window_label;
3516 ref = pool_vector[pool_window_last].wend;
3518 pool_vector[pool_window_last].wend = newref;
3521 pool_window_label = new;
3522 pool_window_last = i;
3528 /* Need a new one. */
3529 pool_vector[pool_size].value = x;
3530 if (last_value && rtx_equal_p (last_value, pool_vector[pool_size - 1].value))
3533 pool_vector[pool_size - 1].part_of_sequence_p = true;
3536 lab = gen_label_rtx ();
3537 pool_vector[pool_size].mode = mode;
3538 pool_vector[pool_size].label = lab;
3539 pool_vector[pool_size].wend = NULL;
3540 pool_vector[pool_size].part_of_sequence_p = (lab == 0);
3541 if (lab && pool_window_label)
3543 newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
3544 newref->label = pool_window_label;
3545 ref = pool_vector[pool_window_last].wend;
3547 pool_vector[pool_window_last].wend = newref;
3550 pool_window_label = lab;
3551 pool_window_last = pool_size;
3556 /* Output the literal table. START, if nonzero, is the first instruction
3557 this table is needed for, and also indicates that there is at least one
3558 casesi_worker_2 instruction; We have to emit the operand3 labels from
3559 these insns at a 4-byte aligned position. BARRIER is the barrier
3560 after which we are to place the table. */
3563 dump_table (rtx start, rtx barrier)
3569 label_ref_list_t ref;
3572 /* Do two passes, first time dump out the HI sized constants. */
3574 for (i = 0; i < pool_size; i++)
3576 pool_node *p = &pool_vector[i];
3578 if (p->mode == HImode)
3582 scan = emit_insn_after (gen_align_2 (), scan);
3585 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3586 scan = emit_label_after (lab, scan);
3587 scan = emit_insn_after (gen_consttable_2 (p->value, const0_rtx),
3589 for (ref = p->wend; ref; ref = ref->next)
3592 scan = emit_insn_after (gen_consttable_window_end (lab), scan);
3595 else if (p->mode == DFmode)
3603 scan = emit_insn_after (gen_align_4 (), scan);
3605 for (; start != barrier; start = NEXT_INSN (start))
3606 if (GET_CODE (start) == INSN
3607 && recog_memoized (start) == CODE_FOR_casesi_worker_2)
3609 rtx src = SET_SRC (XVECEXP (PATTERN (start), 0, 0));
3610 rtx lab = XEXP (XVECEXP (src, 0, 3), 0);
3612 scan = emit_label_after (lab, scan);
3615 if (TARGET_FMOVD && TARGET_ALIGN_DOUBLE && have_df)
3617 rtx align_insn = NULL_RTX;
3619 scan = emit_label_after (gen_label_rtx (), scan);
3620 scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
3623 for (i = 0; i < pool_size; i++)
3625 pool_node *p = &pool_vector[i];
3633 if (align_insn && !p->part_of_sequence_p)
3635 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3636 emit_label_before (lab, align_insn);
3637 emit_insn_before (gen_consttable_4 (p->value, const0_rtx),
3639 for (ref = p->wend; ref; ref = ref->next)
3642 emit_insn_before (gen_consttable_window_end (lab),
3645 delete_insn (align_insn);
3646 align_insn = NULL_RTX;
3651 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3652 scan = emit_label_after (lab, scan);
3653 scan = emit_insn_after (gen_consttable_4 (p->value,
3655 need_align = ! need_align;
3661 scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
3666 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3667 scan = emit_label_after (lab, scan);
3668 scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
3675 if (p->mode != HImode)
3677 for (ref = p->wend; ref; ref = ref->next)
3680 scan = emit_insn_after (gen_consttable_window_end (lab),
3689 for (i = 0; i < pool_size; i++)
3691 pool_node *p = &pool_vector[i];
3702 scan = emit_label_after (gen_label_rtx (), scan);
3703 scan = emit_insn_after (gen_align_4 (), scan);
3705 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3706 scan = emit_label_after (lab, scan);
3707 scan = emit_insn_after (gen_consttable_4 (p->value, const0_rtx),
3715 scan = emit_label_after (gen_label_rtx (), scan);
3716 scan = emit_insn_after (gen_align_4 (), scan);
3718 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3719 scan = emit_label_after (lab, scan);
3720 scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
3727 if (p->mode != HImode)
3729 for (ref = p->wend; ref; ref = ref->next)
3732 scan = emit_insn_after (gen_consttable_window_end (lab), scan);
3737 scan = emit_insn_after (gen_consttable_end (), scan);
3738 scan = emit_barrier_after (scan);
3740 pool_window_label = NULL_RTX;
3741 pool_window_last = 0;
3744 /* Return nonzero if constant would be an ok source for a
3745 mov.w instead of a mov.l. */
3750 return (GET_CODE (src) == CONST_INT
3751 && INTVAL (src) >= -32768
3752 && INTVAL (src) <= 32767);
3755 #define MOVA_LABELREF(mova) XVECEXP (SET_SRC (PATTERN (mova)), 0, 0)
3757 /* Nonzero if the insn is a move instruction which needs to be fixed. */
3759 /* ??? For a DImode/DFmode moves, we don't need to fix it if each half of the
3760 CONST_DOUBLE input value is CONST_OK_FOR_I08. For a SFmode move, we don't
3761 need to fix it if the input value is CONST_OK_FOR_I08. */
3764 broken_move (rtx insn)
3766 if (GET_CODE (insn) == INSN)
3768 rtx pat = PATTERN (insn);
3769 if (GET_CODE (pat) == PARALLEL)
3770 pat = XVECEXP (pat, 0, 0);
3771 if (GET_CODE (pat) == SET
3772 /* We can load any 8-bit value if we don't care what the high
3773 order bits end up as. */
3774 && GET_MODE (SET_DEST (pat)) != QImode
3775 && (CONSTANT_P (SET_SRC (pat))
3776 /* Match mova_const. */
3777 || (GET_CODE (SET_SRC (pat)) == UNSPEC
3778 && XINT (SET_SRC (pat), 1) == UNSPEC_MOVA
3779 && GET_CODE (XVECEXP (SET_SRC (pat), 0, 0)) == CONST))
3781 && GET_CODE (SET_SRC (pat)) == CONST_DOUBLE
3782 && (fp_zero_operand (SET_SRC (pat))
3783 || fp_one_operand (SET_SRC (pat)))
3784 /* ??? If this is a -m4 or -m4-single compilation, in general
3785 we don't know the current setting of fpscr, so disable fldi.
3786 There is an exception if this was a register-register move
3787 before reload - and hence it was ascertained that we have
3788 single precision setting - and in a post-reload optimization
3789 we changed this to do a constant load. In that case
3790 we don't have an r0 clobber, hence we must use fldi. */
3791 && (! TARGET_SH4 || TARGET_FMOVD
3792 || (GET_CODE (XEXP (XVECEXP (PATTERN (insn), 0, 2), 0))
3794 && GET_CODE (SET_DEST (pat)) == REG
3795 && FP_REGISTER_P (REGNO (SET_DEST (pat))))
3797 && GET_MODE (SET_DEST (pat)) == SImode
3798 && (satisfies_constraint_I20 (SET_SRC (pat))
3799 || satisfies_constraint_I28 (SET_SRC (pat))))
3800 && ! satisfies_constraint_I08 (SET_SRC (pat)))
3810 return (GET_CODE (insn) == INSN
3811 && GET_CODE (PATTERN (insn)) == SET
3812 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
3813 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_MOVA
3814 /* Don't match mova_const. */
3815 && GET_CODE (MOVA_LABELREF (insn)) == LABEL_REF);
3818 /* Fix up a mova from a switch that went out of range. */
3820 fixup_mova (rtx mova)
3822 PUT_MODE (XEXP (MOVA_LABELREF (mova), 0), QImode);
3825 SET_SRC (PATTERN (mova)) = MOVA_LABELREF (mova);
3826 INSN_CODE (mova) = -1;
3831 rtx lab = gen_label_rtx ();
3832 rtx wpat, wpat0, wpat1, wsrc, diff;
3836 worker = NEXT_INSN (worker);
3838 && GET_CODE (worker) != CODE_LABEL
3839 && GET_CODE (worker) != JUMP_INSN);
3840 } while (GET_CODE (worker) == NOTE
3841 || recog_memoized (worker) != CODE_FOR_casesi_worker_1);
3842 wpat = PATTERN (worker);
3843 wpat0 = XVECEXP (wpat, 0, 0);
3844 wpat1 = XVECEXP (wpat, 0, 1);
3845 wsrc = SET_SRC (wpat0);
3846 PATTERN (worker) = (gen_casesi_worker_2
3847 (SET_DEST (wpat0), XVECEXP (wsrc, 0, 1),
3848 XEXP (XVECEXP (wsrc, 0, 2), 0), lab,
3850 INSN_CODE (worker) = -1;
3851 diff = gen_rtx_MINUS (Pmode, XVECEXP (SET_SRC (PATTERN (mova)), 0, 0),
3852 gen_rtx_LABEL_REF (Pmode, lab));
3853 diff = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, diff), UNSPEC_PIC);
3854 SET_SRC (PATTERN (mova)) = gen_rtx_CONST (Pmode, diff);
3855 INSN_CODE (mova) = -1;
3859 /* NEW_MOVA is a mova we've just encountered while scanning forward. Update
3860 *num_mova, and check if the new mova is not nested within the first one.
3861 return 0 if *first_mova was replaced, 1 if new_mova was replaced,
3862 2 if new_mova has been assigned to *first_mova, -1 otherwise.. */
3864 untangle_mova (int *num_mova, rtx *first_mova, rtx new_mova)
3866 int n_addr = 0; /* Initialization to shut up spurious warning. */
3867 int f_target, n_target = 0; /* Likewise. */
3871 n_addr = INSN_ADDRESSES (INSN_UID (new_mova));
3872 n_target = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (new_mova), 0)));
3873 if (n_addr > n_target || n_addr + 1022 < n_target)
3875 /* Change the mova into a load.
3876 broken_move will then return true for it. */
3877 fixup_mova (new_mova);
3883 *first_mova = new_mova;
3888 = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (*first_mova), 0))))
3893 if (f_target - INSN_ADDRESSES (INSN_UID (*first_mova))
3894 > n_target - n_addr)
3896 fixup_mova (*first_mova);
3901 fixup_mova (new_mova);
3906 /* Find the last barrier from insn FROM which is close enough to hold the
3907 constant pool. If we can't find one, then create one near the end of
3911 find_barrier (int num_mova, rtx mova, rtx from)
3920 int leading_mova = num_mova;
3921 rtx barrier_before_mova = 0, found_barrier = 0, good_barrier = 0;
3926 /* For HImode: range is 510, add 4 because pc counts from address of
3927 second instruction after this one, subtract 2 for the jump instruction
3928 that we may need to emit before the table, subtract 2 for the instruction
3929 that fills the jump delay slot (in very rare cases, reorg will take an
3930 instruction from after the constant pool or will leave the delay slot
3931 empty). This gives 510.
3932 For SImode: range is 1020, add 4 because pc counts from address of
3933 second instruction after this one, subtract 2 in case pc is 2 byte
3934 aligned, subtract 2 for the jump instruction that we may need to emit
3935 before the table, subtract 2 for the instruction that fills the jump
3936 delay slot. This gives 1018. */
3938 /* The branch will always be shortened now that the reference address for
3939 forward branches is the successor address, thus we need no longer make
3940 adjustments to the [sh]i_limit for -O0. */
3945 while (from && count_si < si_limit && count_hi < hi_limit)
3947 int inc = get_attr_length (from);
3950 /* If this is a label that existed at the time of the compute_alignments
3951 call, determine the alignment. N.B. When find_barrier recurses for
3952 an out-of-reach mova, we might see labels at the start of previously
3953 inserted constant tables. */
3954 if (GET_CODE (from) == CODE_LABEL
3955 && CODE_LABEL_NUMBER (from) <= max_labelno_before_reorg)
3958 new_align = 1 << label_to_alignment (from);
3959 else if (GET_CODE (prev_nonnote_insn (from)) == BARRIER)
3960 new_align = 1 << barrier_align (from);
3965 /* In case we are scanning a constant table because of recursion, check
3966 for explicit alignments. If the table is long, we might be forced
3967 to emit the new table in front of it; the length of the alignment
3968 might be the last straw. */
3969 else if (GET_CODE (from) == INSN
3970 && GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
3971 && XINT (PATTERN (from), 1) == UNSPECV_ALIGN)
3972 new_align = INTVAL (XVECEXP (PATTERN (from), 0, 0));
3973 /* When we find the end of a constant table, paste the new constant
3974 at the end. That is better than putting it in front because
3975 this way, we don't need extra alignment for adding a 4-byte-aligned
3976 mov(a) label to a 2/4 or 8/4 byte aligned table. */
3977 else if (GET_CODE (from) == INSN
3978 && GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
3979 && XINT (PATTERN (from), 1) == UNSPECV_CONST_END)
3982 if (GET_CODE (from) == BARRIER)
3986 found_barrier = from;
3988 /* If we are at the end of the function, or in front of an alignment
3989 instruction, we need not insert an extra alignment. We prefer
3990 this kind of barrier. */
3991 if (barrier_align (from) > 2)
3992 good_barrier = from;
3994 /* If we are at the end of a hot/cold block, dump the constants
3996 next = NEXT_INSN (from);
3999 && NOTE_KIND (next) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
4003 if (broken_move (from))
4006 enum machine_mode mode;
4008 pat = PATTERN (from);
4009 if (GET_CODE (pat) == PARALLEL)
4010 pat = XVECEXP (pat, 0, 0);
4011 src = SET_SRC (pat);
4012 dst = SET_DEST (pat);
4013 mode = GET_MODE (dst);
4015 /* We must explicitly check the mode, because sometimes the
4016 front end will generate code to load unsigned constants into
4017 HImode targets without properly sign extending them. */
4019 || (mode == SImode && hi_const (src) && REGNO (dst) != FPUL_REG))
4022 /* We put the short constants before the long constants, so
4023 we must count the length of short constants in the range
4024 for the long constants. */
4025 /* ??? This isn't optimal, but is easy to do. */
4030 /* We dump DF/DI constants before SF/SI ones, because
4031 the limit is the same, but the alignment requirements
4032 are higher. We may waste up to 4 additional bytes
4033 for alignment, and the DF/DI constant may have
4034 another SF/SI constant placed before it. */
4035 if (TARGET_SHCOMPACT
4037 && (mode == DFmode || mode == DImode))
4042 while (si_align > 2 && found_si + si_align - 2 > count_si)
4044 if (found_si > count_si)
4045 count_si = found_si;
4046 found_si += GET_MODE_SIZE (mode);
4048 si_limit -= GET_MODE_SIZE (mode);
4054 switch (untangle_mova (&num_mova, &mova, from))
4056 case 0: return find_barrier (0, 0, mova);
4061 = good_barrier ? good_barrier : found_barrier;
4065 if (found_si > count_si)
4066 count_si = found_si;
4068 else if (GET_CODE (from) == JUMP_INSN
4069 && (GET_CODE (PATTERN (from)) == ADDR_VEC
4070 || GET_CODE (PATTERN (from)) == ADDR_DIFF_VEC))
4072 if ((num_mova > 1 && GET_MODE (prev_nonnote_insn (from)) == VOIDmode)
4074 && (prev_nonnote_insn (from)
4075 == XEXP (MOVA_LABELREF (mova), 0))))
4077 if (barrier_align (next_real_insn (from)) == align_jumps_log)
4079 /* We have just passed the barrier in front of the
4080 ADDR_DIFF_VEC, which is stored in found_barrier. Since
4081 the ADDR_DIFF_VEC is accessed as data, just like our pool
4082 constants, this is a good opportunity to accommodate what
4083 we have gathered so far.
4084 If we waited any longer, we could end up at a barrier in
4085 front of code, which gives worse cache usage for separated
4086 instruction / data caches. */
4087 good_barrier = found_barrier;
4092 rtx body = PATTERN (from);
4093 inc = XVECLEN (body, 1) * GET_MODE_SIZE (GET_MODE (body));
4096 /* For the SH1, we generate alignments even after jumps-around-jumps. */
4097 else if (GET_CODE (from) == JUMP_INSN
4099 && ! TARGET_SMALLCODE)
4105 if (new_align > si_align)
4107 si_limit -= (count_si - 1) & (new_align - si_align);
4108 si_align = new_align;
4110 count_si = (count_si + new_align - 1) & -new_align;
4115 if (new_align > hi_align)
4117 hi_limit -= (count_hi - 1) & (new_align - hi_align);
4118 hi_align = new_align;
4120 count_hi = (count_hi + new_align - 1) & -new_align;
4122 from = NEXT_INSN (from);
4129 /* Try as we might, the leading mova is out of range. Change
4130 it into a load (which will become a pcload) and retry. */
4132 return find_barrier (0, 0, mova);
4136 /* Insert the constant pool table before the mova instruction,
4137 to prevent the mova label reference from going out of range. */
4139 good_barrier = found_barrier = barrier_before_mova;
4145 if (good_barrier && next_real_insn (found_barrier))
4146 found_barrier = good_barrier;
4150 /* We didn't find a barrier in time to dump our stuff,
4151 so we'll make one. */
4152 rtx label = gen_label_rtx ();
4154 /* If we exceeded the range, then we must back up over the last
4155 instruction we looked at. Otherwise, we just need to undo the
4156 NEXT_INSN at the end of the loop. */
4157 if (PREV_INSN (from) != orig
4158 && (count_hi > hi_limit || count_si > si_limit))
4159 from = PREV_INSN (PREV_INSN (from));
4161 from = PREV_INSN (from);
4163 /* Walk back to be just before any jump or label.
4164 Putting it before a label reduces the number of times the branch
4165 around the constant pool table will be hit. Putting it before
4166 a jump makes it more likely that the bra delay slot will be
4168 while (GET_CODE (from) == JUMP_INSN || GET_CODE (from) == NOTE
4169 || GET_CODE (from) == CODE_LABEL)
4170 from = PREV_INSN (from);
4172 from = emit_jump_insn_after (gen_jump (label), from);
4173 JUMP_LABEL (from) = label;
4174 LABEL_NUSES (label) = 1;
4175 found_barrier = emit_barrier_after (from);
4176 emit_label_after (label, found_barrier);
4179 return found_barrier;
4182 /* If the instruction INSN is implemented by a special function, and we can
4183 positively find the register that is used to call the sfunc, and this
4184 register is not used anywhere else in this instruction - except as the
4185 destination of a set, return this register; else, return 0. */
4187 sfunc_uses_reg (rtx insn)
4190 rtx pattern, part, reg_part, reg;
4192 if (GET_CODE (insn) != INSN)
4194 pattern = PATTERN (insn);
4195 if (GET_CODE (pattern) != PARALLEL || get_attr_type (insn) != TYPE_SFUNC)
4198 for (reg_part = 0, i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
4200 part = XVECEXP (pattern, 0, i);
4201 if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == SImode)
4206 reg = XEXP (reg_part, 0);
4207 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
4209 part = XVECEXP (pattern, 0, i);
4210 if (part == reg_part || GET_CODE (part) == CLOBBER)
4212 if (reg_mentioned_p (reg, ((GET_CODE (part) == SET
4213 && GET_CODE (SET_DEST (part)) == REG)
4214 ? SET_SRC (part) : part)))
4220 /* See if the only way in which INSN uses REG is by calling it, or by
4221 setting it while calling it. Set *SET to a SET rtx if the register
4225 noncall_uses_reg (rtx reg, rtx insn, rtx *set)
4231 reg2 = sfunc_uses_reg (insn);
4232 if (reg2 && REGNO (reg2) == REGNO (reg))
4234 pattern = single_set (insn);
4236 && GET_CODE (SET_DEST (pattern)) == REG
4237 && REGNO (reg) == REGNO (SET_DEST (pattern)))
4241 if (GET_CODE (insn) != CALL_INSN)
4243 /* We don't use rtx_equal_p because we don't care if the mode is
4245 pattern = single_set (insn);
4247 && GET_CODE (SET_DEST (pattern)) == REG
4248 && REGNO (reg) == REGNO (SET_DEST (pattern)))
4254 par = PATTERN (insn);
4255 if (GET_CODE (par) == PARALLEL)
4256 for (i = XVECLEN (par, 0) - 1; i >= 0; i--)
4258 part = XVECEXP (par, 0, i);
4259 if (GET_CODE (part) != SET && reg_mentioned_p (reg, part))
4262 return reg_mentioned_p (reg, SET_SRC (pattern));
4268 pattern = PATTERN (insn);
4270 if (GET_CODE (pattern) == PARALLEL)
4274 for (i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
4275 if (reg_mentioned_p (reg, XVECEXP (pattern, 0, i)))
4277 pattern = XVECEXP (pattern, 0, 0);
4280 if (GET_CODE (pattern) == SET)
4282 if (reg_mentioned_p (reg, SET_DEST (pattern)))
4284 /* We don't use rtx_equal_p, because we don't care if the
4285 mode is different. */
4286 if (GET_CODE (SET_DEST (pattern)) != REG
4287 || REGNO (reg) != REGNO (SET_DEST (pattern)))
4293 pattern = SET_SRC (pattern);
4296 if (GET_CODE (pattern) != CALL
4297 || GET_CODE (XEXP (pattern, 0)) != MEM
4298 || ! rtx_equal_p (reg, XEXP (XEXP (pattern, 0), 0)))
4304 /* Given a X, a pattern of an insn or a part of it, return a mask of used
4305 general registers. Bits 0..15 mean that the respective registers
4306 are used as inputs in the instruction. Bits 16..31 mean that the
4307 registers 0..15, respectively, are used as outputs, or are clobbered.
4308 IS_DEST should be set to 16 if X is the destination of a SET, else to 0. */
4310 regs_used (rtx x, int is_dest)
4318 code = GET_CODE (x);
4323 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
4324 << (REGNO (x) + is_dest));
4328 rtx y = SUBREG_REG (x);
4330 if (GET_CODE (y) != REG)
4333 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
4335 subreg_regno_offset (REGNO (y),
4338 GET_MODE (x)) + is_dest));
4342 return regs_used (SET_SRC (x), 0) | regs_used (SET_DEST (x), 16);
4344 /* If there was a return value, it must have been indicated with USE. */
4359 fmt = GET_RTX_FORMAT (code);
4361 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4366 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4367 used |= regs_used (XVECEXP (x, i, j), is_dest);
4369 else if (fmt[i] == 'e')
4370 used |= regs_used (XEXP (x, i), is_dest);
4375 /* Create an instruction that prevents redirection of a conditional branch
4376 to the destination of the JUMP with address ADDR.
4377 If the branch needs to be implemented as an indirect jump, try to find
4378 a scratch register for it.
4379 If NEED_BLOCK is 0, don't do anything unless we need a scratch register.
4380 If any preceding insn that doesn't fit into a delay slot is good enough,
4381 pass 1. Pass 2 if a definite blocking insn is needed.
4382 -1 is used internally to avoid deep recursion.
4383 If a blocking instruction is made or recognized, return it. */
4386 gen_block_redirect (rtx jump, int addr, int need_block)
4389 rtx prev = prev_nonnote_insn (jump);
4392 /* First, check if we already have an instruction that satisfies our need. */
4393 if (prev && GET_CODE (prev) == INSN && ! INSN_DELETED_P (prev))
4395 if (INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
4397 if (GET_CODE (PATTERN (prev)) == USE
4398 || GET_CODE (PATTERN (prev)) == CLOBBER
4399 || get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
4401 else if ((need_block &= ~1) < 0)
4403 else if (recog_memoized (prev) == CODE_FOR_block_branch_redirect)
4406 if (GET_CODE (PATTERN (jump)) == RETURN)
4410 /* Reorg even does nasty things with return insns that cause branches
4411 to go out of range - see find_end_label and callers. */
4412 return emit_insn_before (gen_block_branch_redirect (const0_rtx) , jump);
4414 /* We can't use JUMP_LABEL here because it might be undefined
4415 when not optimizing. */
4416 dest = XEXP (SET_SRC (PATTERN (jump)), 0);
4417 /* If the branch is out of range, try to find a scratch register for it. */
4419 && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
4423 /* Don't look for the stack pointer as a scratch register,
4424 it would cause trouble if an interrupt occurred. */
4425 unsigned try = 0x7fff, used;
4426 int jump_left = flag_expensive_optimizations + 1;
4428 /* It is likely that the most recent eligible instruction is wanted for
4429 the delay slot. Therefore, find out which registers it uses, and
4430 try to avoid using them. */
4432 for (scan = jump; (scan = PREV_INSN (scan)); )
4436 if (INSN_DELETED_P (scan))
4438 code = GET_CODE (scan);
4439 if (code == CODE_LABEL || code == JUMP_INSN)
4442 && GET_CODE (PATTERN (scan)) != USE
4443 && GET_CODE (PATTERN (scan)) != CLOBBER
4444 && get_attr_in_delay_slot (scan) == IN_DELAY_SLOT_YES)
4446 try &= ~regs_used (PATTERN (scan), 0);
4450 for (used = dead = 0, scan = JUMP_LABEL (jump);
4451 (scan = NEXT_INSN (scan)); )
4455 if (INSN_DELETED_P (scan))
4457 code = GET_CODE (scan);
4460 used |= regs_used (PATTERN (scan), 0);
4461 if (code == CALL_INSN)
4462 used |= regs_used (CALL_INSN_FUNCTION_USAGE (scan), 0);
4463 dead |= (used >> 16) & ~used;
4469 if (code == JUMP_INSN)
4471 if (jump_left-- && simplejump_p (scan))
4472 scan = JUMP_LABEL (scan);
4478 /* Mask out the stack pointer again, in case it was
4479 the only 'free' register we have found. */
4482 /* If the immediate destination is still in range, check for possible
4483 threading with a jump beyond the delay slot insn.
4484 Don't check if we are called recursively; the jump has been or will be
4485 checked in a different invocation then. */
4487 else if (optimize && need_block >= 0)
4489 rtx next = next_active_insn (next_active_insn (dest));
4490 if (next && GET_CODE (next) == JUMP_INSN
4491 && GET_CODE (PATTERN (next)) == SET
4492 && recog_memoized (next) == CODE_FOR_jump_compact)
4494 dest = JUMP_LABEL (next);
4496 && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
4498 gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), -1);
4504 rtx reg = gen_rtx_REG (SImode, exact_log2 (dead & -dead));
4506 /* It would be nice if we could convert the jump into an indirect
4507 jump / far branch right now, and thus exposing all constituent
4508 instructions to further optimization. However, reorg uses
4509 simplejump_p to determine if there is an unconditional jump where
4510 it should try to schedule instructions from the target of the
4511 branch; simplejump_p fails for indirect jumps even if they have
4513 rtx insn = emit_insn_before (gen_indirect_jump_scratch
4514 (reg, GEN_INT (INSN_UID (JUMP_LABEL (jump))))
4516 /* ??? We would like this to have the scope of the jump, but that
4517 scope will change when a delay slot insn of an inner scope is added.
4518 Hence, after delay slot scheduling, we'll have to expect
4519 NOTE_INSN_BLOCK_END notes between the indirect_jump_scratch and
4522 INSN_LOCATOR (insn) = INSN_LOCATOR (jump);
4523 INSN_CODE (insn) = CODE_FOR_indirect_jump_scratch;
4526 else if (need_block)
4527 /* We can't use JUMP_LABEL here because it might be undefined
4528 when not optimizing. */
4529 return emit_insn_before (gen_block_branch_redirect
4530 (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))))
4535 #define CONDJUMP_MIN -252
4536 #define CONDJUMP_MAX 262
4539 /* A label (to be placed) in front of the jump
4540 that jumps to our ultimate destination. */
4542 /* Where we are going to insert it if we cannot move the jump any farther,
4543 or the jump itself if we have picked up an existing jump. */
4545 /* The ultimate destination. */
4547 struct far_branch *prev;
4548 /* If the branch has already been created, its address;
4549 else the address of its first prospective user. */
4553 static void gen_far_branch (struct far_branch *);
4554 enum mdep_reorg_phase_e mdep_reorg_phase;
4556 gen_far_branch (struct far_branch *bp)
4558 rtx insn = bp->insert_place;
4560 rtx label = gen_label_rtx ();
4563 emit_label_after (label, insn);
4566 jump = emit_jump_insn_after (gen_jump (bp->far_label), insn);
4567 LABEL_NUSES (bp->far_label)++;
4570 jump = emit_jump_insn_after (gen_return (), insn);
4571 /* Emit a barrier so that reorg knows that any following instructions
4572 are not reachable via a fall-through path.
4573 But don't do this when not optimizing, since we wouldn't suppress the
4574 alignment for the barrier then, and could end up with out-of-range
4575 pc-relative loads. */
4577 emit_barrier_after (jump);
4578 emit_label_after (bp->near_label, insn);
4579 JUMP_LABEL (jump) = bp->far_label;
4580 ok = invert_jump (insn, label, 1);
4583 /* If we are branching around a jump (rather than a return), prevent
4584 reorg from using an insn from the jump target as the delay slot insn -
4585 when reorg did this, it pessimized code (we rather hide the delay slot)
4586 and it could cause branches to go out of range. */
4589 (gen_stuff_delay_slot
4590 (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))),
4591 GEN_INT (recog_memoized (insn) == CODE_FOR_branch_false)),
4593 /* Prevent reorg from undoing our splits. */
4594 gen_block_redirect (jump, bp->address += 2, 2);
4597 /* Fix up ADDR_DIFF_VECs. */
4599 fixup_addr_diff_vecs (rtx first)
4603 for (insn = first; insn; insn = NEXT_INSN (insn))
4605 rtx vec_lab, pat, prev, prevpat, x, braf_label;
4607 if (GET_CODE (insn) != JUMP_INSN
4608 || GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
4610 pat = PATTERN (insn);
4611 vec_lab = XEXP (XEXP (pat, 0), 0);
4613 /* Search the matching casesi_jump_2. */
4614 for (prev = vec_lab; ; prev = PREV_INSN (prev))
4616 if (GET_CODE (prev) != JUMP_INSN)
4618 prevpat = PATTERN (prev);
4619 if (GET_CODE (prevpat) != PARALLEL || XVECLEN (prevpat, 0) != 2)
4621 x = XVECEXP (prevpat, 0, 1);
4622 if (GET_CODE (x) != USE)
4625 if (GET_CODE (x) == LABEL_REF && XEXP (x, 0) == vec_lab)
4628 /* FIXME: This is a bug in the optimizer, but it seems harmless
4629 to just avoid panicing. */
4633 /* Emit the reference label of the braf where it belongs, right after
4634 the casesi_jump_2 (i.e. braf). */
4635 braf_label = XEXP (XEXP (SET_SRC (XVECEXP (prevpat, 0, 0)), 1), 0);
4636 emit_label_after (braf_label, prev);
4638 /* Fix up the ADDR_DIF_VEC to be relative
4639 to the reference address of the braf. */
4640 XEXP (XEXP (pat, 0), 0) = braf_label;
4644 /* BARRIER_OR_LABEL is either a BARRIER or a CODE_LABEL immediately following
4645 a barrier. Return the base 2 logarithm of the desired alignment. */
4647 barrier_align (rtx barrier_or_label)
4649 rtx next = next_real_insn (barrier_or_label), pat, prev;
4650 int slot, credit, jump_to_next = 0;
4655 pat = PATTERN (next);
4657 if (GET_CODE (pat) == ADDR_DIFF_VEC)
4660 if (GET_CODE (pat) == UNSPEC_VOLATILE && XINT (pat, 1) == UNSPECV_ALIGN)
4661 /* This is a barrier in front of a constant table. */
4664 prev = prev_real_insn (barrier_or_label);
4665 if (GET_CODE (PATTERN (prev)) == ADDR_DIFF_VEC)
4667 pat = PATTERN (prev);
4668 /* If this is a very small table, we want to keep the alignment after
4669 the table to the minimum for proper code alignment. */
4670 return ((TARGET_SMALLCODE
4671 || ((unsigned) XVECLEN (pat, 1) * GET_MODE_SIZE (GET_MODE (pat))
4672 <= (unsigned) 1 << (CACHE_LOG - 2)))
4673 ? 1 << TARGET_SHMEDIA : align_jumps_log);
4676 if (TARGET_SMALLCODE)
4679 if (! TARGET_SH2 || ! optimize)
4680 return align_jumps_log;
4682 /* When fixing up pcloads, a constant table might be inserted just before
4683 the basic block that ends with the barrier. Thus, we can't trust the
4684 instruction lengths before that. */
4685 if (mdep_reorg_phase > SH_FIXUP_PCLOAD)
4687 /* Check if there is an immediately preceding branch to the insn beyond
4688 the barrier. We must weight the cost of discarding useful information
4689 from the current cache line when executing this branch and there is
4690 an alignment, against that of fetching unneeded insn in front of the
4691 branch target when there is no alignment. */
4693 /* There are two delay_slot cases to consider. One is the simple case
4694 where the preceding branch is to the insn beyond the barrier (simple
4695 delay slot filling), and the other is where the preceding branch has
4696 a delay slot that is a duplicate of the insn after the barrier
4697 (fill_eager_delay_slots) and the branch is to the insn after the insn
4698 after the barrier. */
4700 /* PREV is presumed to be the JUMP_INSN for the barrier under
4701 investigation. Skip to the insn before it. */
4702 prev = prev_real_insn (prev);
4704 for (slot = 2, credit = (1 << (CACHE_LOG - 2)) + 2;
4705 credit >= 0 && prev && GET_CODE (prev) == INSN;
4706 prev = prev_real_insn (prev))
4709 if (GET_CODE (PATTERN (prev)) == USE
4710 || GET_CODE (PATTERN (prev)) == CLOBBER)
4712 if (GET_CODE (PATTERN (prev)) == SEQUENCE)
4714 prev = XVECEXP (PATTERN (prev), 0, 1);
4715 if (INSN_UID (prev) == INSN_UID (next))
4717 /* Delay slot was filled with insn at jump target. */
4724 get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
4726 credit -= get_attr_length (prev);
4729 && GET_CODE (prev) == JUMP_INSN
4730 && JUMP_LABEL (prev))
4734 || next_real_insn (JUMP_LABEL (prev)) == next
4735 /* If relax_delay_slots() decides NEXT was redundant
4736 with some previous instruction, it will have
4737 redirected PREV's jump to the following insn. */
4738 || JUMP_LABEL (prev) == next_nonnote_insn (next)
4739 /* There is no upper bound on redundant instructions
4740 that might have been skipped, but we must not put an
4741 alignment where none had been before. */
4742 || (x = (NEXT_INSN (NEXT_INSN (PREV_INSN (prev)))),
4744 && (INSN_CODE (x) == CODE_FOR_block_branch_redirect
4745 || INSN_CODE (x) == CODE_FOR_indirect_jump_scratch
4746 || INSN_CODE (x) == CODE_FOR_stuff_delay_slot))))
4748 rtx pat = PATTERN (prev);
4749 if (GET_CODE (pat) == PARALLEL)
4750 pat = XVECEXP (pat, 0, 0);
4751 if (credit - slot >= (GET_CODE (SET_SRC (pat)) == PC ? 2 : 0))
4757 return align_jumps_log;
4760 /* If we are inside a phony loop, almost any kind of label can turn up as the
4761 first one in the loop. Aligning a braf label causes incorrect switch
4762 destination addresses; we can detect braf labels because they are
4763 followed by a BARRIER.
4764 Applying loop alignment to small constant or switch tables is a waste
4765 of space, so we suppress this too. */
4767 sh_loop_align (rtx label)
4772 next = next_nonnote_insn (next);
4773 while (next && GET_CODE (next) == CODE_LABEL);
4777 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC
4778 || recog_memoized (next) == CODE_FOR_consttable_2)
4781 return align_loops_log;
4784 /* Do a final pass over the function, just before delayed branch
4790 rtx first, insn, mova = NULL_RTX;
4792 rtx r0_rtx = gen_rtx_REG (Pmode, 0);
4793 rtx r0_inc_rtx = gen_rtx_POST_INC (Pmode, r0_rtx);
4795 first = get_insns ();
4796 max_labelno_before_reorg = max_label_num ();
4798 /* We must split call insns before introducing `mova's. If we're
4799 optimizing, they'll have already been split. Otherwise, make
4800 sure we don't split them too late. */
4802 split_all_insns_noflow ();
4807 /* If relaxing, generate pseudo-ops to associate function calls with
4808 the symbols they call. It does no harm to not generate these
4809 pseudo-ops. However, when we can generate them, it enables to
4810 linker to potentially relax the jsr to a bsr, and eliminate the
4811 register load and, possibly, the constant pool entry. */
4813 mdep_reorg_phase = SH_INSERT_USES_LABELS;
4816 /* Remove all REG_LABEL_OPERAND notes. We want to use them for our
4817 own purposes. This works because none of the remaining passes
4818 need to look at them.
4820 ??? But it may break in the future. We should use a machine
4821 dependent REG_NOTE, or some other approach entirely. */
4822 for (insn = first; insn; insn = NEXT_INSN (insn))
4828 while ((note = find_reg_note (insn, REG_LABEL_OPERAND,
4830 remove_note (insn, note);
4834 for (insn = first; insn; insn = NEXT_INSN (insn))
4836 rtx pattern, reg, link, set, scan, dies, label;
4837 int rescan = 0, foundinsn = 0;
4839 if (GET_CODE (insn) == CALL_INSN)
4841 pattern = PATTERN (insn);
4843 if (GET_CODE (pattern) == PARALLEL)
4844 pattern = XVECEXP (pattern, 0, 0);
4845 if (GET_CODE (pattern) == SET)
4846 pattern = SET_SRC (pattern);
4848 if (GET_CODE (pattern) != CALL
4849 || GET_CODE (XEXP (pattern, 0)) != MEM)
4852 reg = XEXP (XEXP (pattern, 0), 0);
4856 reg = sfunc_uses_reg (insn);
4861 if (GET_CODE (reg) != REG)
4864 /* Try scanning backward to find where the register is set. */
4866 for (scan = PREV_INSN (insn);
4867 scan && GET_CODE (scan) != CODE_LABEL;
4868 scan = PREV_INSN (scan))
4870 if (! INSN_P (scan))
4873 if (! reg_mentioned_p (reg, scan))
4876 if (noncall_uses_reg (reg, scan, &set))
4889 /* The register is set at LINK. */
4891 /* We can only optimize the function call if the register is
4892 being set to a symbol. In theory, we could sometimes
4893 optimize calls to a constant location, but the assembler
4894 and linker do not support that at present. */
4895 if (GET_CODE (SET_SRC (set)) != SYMBOL_REF
4896 && GET_CODE (SET_SRC (set)) != LABEL_REF)
4899 /* Scan forward from LINK to the place where REG dies, and
4900 make sure that the only insns which use REG are
4901 themselves function calls. */
4903 /* ??? This doesn't work for call targets that were allocated
4904 by reload, since there may not be a REG_DEAD note for the
4908 for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan))
4912 /* Don't try to trace forward past a CODE_LABEL if we haven't
4913 seen INSN yet. Ordinarily, we will only find the setting insn
4914 if it is in the same basic block. However,
4915 cross-jumping can insert code labels in between the load and
4916 the call, and can result in situations where a single call
4917 insn may have two targets depending on where we came from. */
4919 if (GET_CODE (scan) == CODE_LABEL && ! foundinsn)
4922 if (! INSN_P (scan))
4925 /* Don't try to trace forward past a JUMP. To optimize
4926 safely, we would have to check that all the
4927 instructions at the jump destination did not use REG. */
4929 if (GET_CODE (scan) == JUMP_INSN)
4932 if (! reg_mentioned_p (reg, scan))
4935 if (noncall_uses_reg (reg, scan, &scanset))
4942 && (GET_CODE (scan) == CALL_INSN || sfunc_uses_reg (scan)))
4944 /* There is a function call to this register other
4945 than the one we are checking. If we optimize
4946 this call, we need to rescan again below. */
4950 /* ??? We shouldn't have to worry about SCANSET here.
4951 We should just be able to check for a REG_DEAD note
4952 on a function call. However, the REG_DEAD notes are
4953 apparently not dependable around libcalls; c-torture
4954 execute/920501-2 is a test case. If SCANSET is set,
4955 then this insn sets the register, so it must have
4956 died earlier. Unfortunately, this will only handle
4957 the cases in which the register is, in fact, set in a
4960 /* ??? We shouldn't have to use FOUNDINSN here.
4961 This dates back to when we used LOG_LINKS to find
4962 the most recent insn which sets the register. */
4966 || find_reg_note (scan, REG_DEAD, reg)))
4975 /* Either there was a branch, or some insn used REG
4976 other than as a function call address. */
4980 /* Create a code label, and put it in a REG_LABEL_OPERAND note
4981 on the insn which sets the register, and on each call insn
4982 which uses the register. In final_prescan_insn we look for
4983 the REG_LABEL_OPERAND notes, and output the appropriate label
4986 label = gen_label_rtx ();
4987 REG_NOTES (link) = gen_rtx_INSN_LIST (REG_LABEL_OPERAND, label,
4989 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL_OPERAND, label,
4998 scan = NEXT_INSN (scan);
5000 && ((GET_CODE (scan) == CALL_INSN
5001 && reg_mentioned_p (reg, scan))
5002 || ((reg2 = sfunc_uses_reg (scan))
5003 && REGNO (reg2) == REGNO (reg))))
5005 = gen_rtx_INSN_LIST (REG_LABEL_OPERAND, label,
5008 while (scan != dies);
5014 fixup_addr_diff_vecs (first);
5018 mdep_reorg_phase = SH_SHORTEN_BRANCHES0;
5019 shorten_branches (first);
5022 /* Scan the function looking for move instructions which have to be
5023 changed to pc-relative loads and insert the literal tables. */
5024 label_ref_list_pool = create_alloc_pool ("label references list",
5025 sizeof (struct label_ref_list_d),
5027 mdep_reorg_phase = SH_FIXUP_PCLOAD;
5028 for (insn = first, num_mova = 0; insn; insn = NEXT_INSN (insn))
5032 /* ??? basic block reordering can move a switch table dispatch
5033 below the switch table. Check if that has happened.
5034 We only have the addresses available when optimizing; but then,
5035 this check shouldn't be needed when not optimizing. */
5036 if (!untangle_mova (&num_mova, &mova, insn))
5042 else if (GET_CODE (insn) == JUMP_INSN
5043 && GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
5045 /* ??? loop invariant motion can also move a mova out of a
5046 loop. Since loop does this code motion anyway, maybe we
5047 should wrap UNSPEC_MOVA into a CONST, so that reload can
5050 && GET_MODE (prev_nonnote_insn (insn)) == VOIDmode)
5051 || (prev_nonnote_insn (insn)
5052 == XEXP (MOVA_LABELREF (mova), 0))))
5059 /* Some code might have been inserted between the mova and
5060 its ADDR_DIFF_VEC. Check if the mova is still in range. */
5061 for (scan = mova, total = 0; scan != insn; scan = NEXT_INSN (scan))
5062 total += get_attr_length (scan);
5064 /* range of mova is 1020, add 4 because pc counts from address of
5065 second instruction after this one, subtract 2 in case pc is 2
5066 byte aligned. Possible alignment needed for the ADDR_DIFF_VEC
5067 cancels out with alignment effects of the mova itself. */
5070 /* Change the mova into a load, and restart scanning
5071 there. broken_move will then return true for mova. */
5076 if (broken_move (insn)
5077 || (GET_CODE (insn) == INSN
5078 && recog_memoized (insn) == CODE_FOR_casesi_worker_2))
5081 /* Scan ahead looking for a barrier to stick the constant table
5083 rtx barrier = find_barrier (num_mova, mova, insn);
5084 rtx last_float_move = NULL_RTX, last_float = 0, *last_float_addr = NULL;
5085 int need_aligned_label = 0;
5087 if (num_mova && ! mova_p (mova))
5089 /* find_barrier had to change the first mova into a
5090 pcload; thus, we have to start with this new pcload. */
5094 /* Now find all the moves between the points and modify them. */
5095 for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
5097 if (GET_CODE (scan) == CODE_LABEL)
5099 if (GET_CODE (scan) == INSN
5100 && recog_memoized (scan) == CODE_FOR_casesi_worker_2)
5101 need_aligned_label = 1;
5102 if (broken_move (scan))
5104 rtx *patp = &PATTERN (scan), pat = *patp;
5108 enum machine_mode mode;
5110 if (GET_CODE (pat) == PARALLEL)
5111 patp = &XVECEXP (pat, 0, 0), pat = *patp;
5112 src = SET_SRC (pat);
5113 dst = SET_DEST (pat);
5114 mode = GET_MODE (dst);
5116 if (mode == SImode && hi_const (src)
5117 && REGNO (dst) != FPUL_REG)
5122 while (GET_CODE (dst) == SUBREG)
5124 offset += subreg_regno_offset (REGNO (SUBREG_REG (dst)),
5125 GET_MODE (SUBREG_REG (dst)),
5128 dst = SUBREG_REG (dst);
5130 dst = gen_rtx_REG (HImode, REGNO (dst) + offset);
5132 if (GET_CODE (dst) == REG && FP_ANY_REGISTER_P (REGNO (dst)))
5134 /* This must be an insn that clobbers r0. */
5135 rtx *clobberp = &XVECEXP (PATTERN (scan), 0,
5136 XVECLEN (PATTERN (scan), 0)
5138 rtx clobber = *clobberp;
5140 gcc_assert (GET_CODE (clobber) == CLOBBER
5141 && rtx_equal_p (XEXP (clobber, 0), r0_rtx));
5144 && reg_set_between_p (r0_rtx, last_float_move, scan))
5148 && GET_MODE_SIZE (mode) != 4
5149 && GET_MODE_SIZE (GET_MODE (last_float)) == 4)
5151 lab = add_constant (src, mode, last_float);
5153 emit_insn_before (gen_mova (lab), scan);
5156 /* There will be a REG_UNUSED note for r0 on
5157 LAST_FLOAT_MOVE; we have to change it to REG_INC,
5158 lest reorg:mark_target_live_regs will not
5159 consider r0 to be used, and we end up with delay
5160 slot insn in front of SCAN that clobbers r0. */
5162 = find_regno_note (last_float_move, REG_UNUSED, 0);
5164 /* If we are not optimizing, then there may not be
5167 PUT_MODE (note, REG_INC);
5169 *last_float_addr = r0_inc_rtx;
5171 last_float_move = scan;
5173 newsrc = gen_const_mem (mode,
5174 (((TARGET_SH4 && ! TARGET_FMOVD)
5175 || REGNO (dst) == FPUL_REG)
5178 last_float_addr = &XEXP (newsrc, 0);
5180 /* Remove the clobber of r0. */
5181 *clobberp = gen_rtx_CLOBBER (GET_MODE (clobber),
5182 gen_rtx_SCRATCH (Pmode));
5184 /* This is a mova needing a label. Create it. */
5185 else if (GET_CODE (src) == UNSPEC
5186 && XINT (src, 1) == UNSPEC_MOVA
5187 && GET_CODE (XVECEXP (src, 0, 0)) == CONST)
5189 lab = add_constant (XVECEXP (src, 0, 0), mode, 0);
5190 newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
5191 newsrc = gen_rtx_UNSPEC (SImode,
5192 gen_rtvec (1, newsrc),
5197 lab = add_constant (src, mode, 0);
5198 newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
5199 newsrc = gen_const_mem (mode, newsrc);
5201 *patp = gen_rtx_SET (VOIDmode, dst, newsrc);
5202 INSN_CODE (scan) = -1;
5205 dump_table (need_aligned_label ? insn : 0, barrier);
5209 free_alloc_pool (label_ref_list_pool);
5210 for (insn = first; insn; insn = NEXT_INSN (insn))
5211 PUT_MODE (insn, VOIDmode);
5213 mdep_reorg_phase = SH_SHORTEN_BRANCHES1;
5214 INSN_ADDRESSES_FREE ();
5215 split_branches (first);
5217 /* The INSN_REFERENCES_ARE_DELAYED in sh.h is problematic because it
5218 also has an effect on the register that holds the address of the sfunc.
5219 Insert an extra dummy insn in front of each sfunc that pretends to
5220 use this register. */
5221 if (flag_delayed_branch)
5223 for (insn = first; insn; insn = NEXT_INSN (insn))
5225 rtx reg = sfunc_uses_reg (insn);
5229 emit_insn_before (gen_use_sfunc_addr (reg), insn);
5233 /* fpscr is not actually a user variable, but we pretend it is for the
5234 sake of the previous optimization passes, since we want it handled like
5235 one. However, we don't have any debugging information for it, so turn
5236 it into a non-user variable now. */
5238 REG_USERVAR_P (get_fpscr_rtx ()) = 0;
5240 mdep_reorg_phase = SH_AFTER_MDEP_REORG;
5244 get_dest_uid (rtx label, int max_uid)
5246 rtx dest = next_real_insn (label);
5249 /* This can happen for an undefined label. */
5251 dest_uid = INSN_UID (dest);
5252 /* If this is a newly created branch redirection blocking instruction,
5253 we cannot index the branch_uid or insn_addresses arrays with its
5254 uid. But then, we won't need to, because the actual destination is
5255 the following branch. */
5256 while (dest_uid >= max_uid)
5258 dest = NEXT_INSN (dest);
5259 dest_uid = INSN_UID (dest);
5261 if (GET_CODE (dest) == JUMP_INSN && GET_CODE (PATTERN (dest)) == RETURN)
5266 /* Split condbranches that are out of range. Also add clobbers for
5267 scratch registers that are needed in far jumps.
5268 We do this before delay slot scheduling, so that it can take our
5269 newly created instructions into account. It also allows us to
5270 find branches with common targets more easily. */
5273 split_branches (rtx first)
5276 struct far_branch **uid_branch, *far_branch_list = 0;
5277 int max_uid = get_max_uid ();
5280 /* Find out which branches are out of range. */
5281 shorten_branches (first);
5283 uid_branch = (struct far_branch **) alloca (max_uid * sizeof *uid_branch);
5284 memset ((char *) uid_branch, 0, max_uid * sizeof *uid_branch);
5286 for (insn = first; insn; insn = NEXT_INSN (insn))
5287 if (! INSN_P (insn))
5289 else if (INSN_DELETED_P (insn))
5291 /* Shorten_branches would split this instruction again,
5292 so transform it into a note. */
5293 SET_INSN_DELETED (insn);
5295 else if (GET_CODE (insn) == JUMP_INSN
5296 /* Don't mess with ADDR_DIFF_VEC */
5297 && (GET_CODE (PATTERN (insn)) == SET
5298 || GET_CODE (PATTERN (insn)) == RETURN))
5300 enum attr_type type = get_attr_type (insn);
5301 if (type == TYPE_CBRANCH)
5305 if (get_attr_length (insn) > 4)
5307 rtx src = SET_SRC (PATTERN (insn));
5308 rtx olabel = XEXP (XEXP (src, 1), 0);
5309 int addr = INSN_ADDRESSES (INSN_UID (insn));
5311 int dest_uid = get_dest_uid (olabel, max_uid);
5312 struct far_branch *bp = uid_branch[dest_uid];
5314 /* redirect_jump needs a valid JUMP_LABEL, and it might delete
5315 the label if the LABEL_NUSES count drops to zero. There is
5316 always a jump_optimize pass that sets these values, but it
5317 proceeds to delete unreferenced code, and then if not
5318 optimizing, to un-delete the deleted instructions, thus
5319 leaving labels with too low uses counts. */
5322 JUMP_LABEL (insn) = olabel;
5323 LABEL_NUSES (olabel)++;
5327 bp = (struct far_branch *) alloca (sizeof *bp);
5328 uid_branch[dest_uid] = bp;
5329 bp->prev = far_branch_list;
5330 far_branch_list = bp;
5332 = XEXP (XEXP (SET_SRC (PATTERN (insn)), 1), 0);
5333 LABEL_NUSES (bp->far_label)++;
5337 label = bp->near_label;
5338 if (! label && bp->address - addr >= CONDJUMP_MIN)
5340 rtx block = bp->insert_place;
5342 if (GET_CODE (PATTERN (block)) == RETURN)
5343 block = PREV_INSN (block);
5345 block = gen_block_redirect (block,
5347 label = emit_label_after (gen_label_rtx (),
5349 bp->near_label = label;
5351 else if (label && ! NEXT_INSN (label))
5353 if (addr + 2 - bp->address <= CONDJUMP_MAX)
5354 bp->insert_place = insn;
5356 gen_far_branch (bp);
5360 || (NEXT_INSN (label) && bp->address - addr < CONDJUMP_MIN))
5362 bp->near_label = label = gen_label_rtx ();
5363 bp->insert_place = insn;
5366 ok = redirect_jump (insn, label, 0);
5371 /* get_attr_length (insn) == 2 */
5372 /* Check if we have a pattern where reorg wants to redirect
5373 the branch to a label from an unconditional branch that
5375 /* We can't use JUMP_LABEL here because it might be undefined
5376 when not optimizing. */
5377 /* A syntax error might cause beyond to be NULL_RTX. */
5379 = next_active_insn (XEXP (XEXP (SET_SRC (PATTERN (insn)), 1),
5383 && (GET_CODE (beyond) == JUMP_INSN
5384 || ((beyond = next_active_insn (beyond))
5385 && GET_CODE (beyond) == JUMP_INSN))
5386 && GET_CODE (PATTERN (beyond)) == SET
5387 && recog_memoized (beyond) == CODE_FOR_jump_compact
5389 (INSN_UID (XEXP (SET_SRC (PATTERN (beyond)), 0)))
5390 - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
5392 gen_block_redirect (beyond,
5393 INSN_ADDRESSES (INSN_UID (beyond)), 1);
5396 next = next_active_insn (insn);
5398 if ((GET_CODE (next) == JUMP_INSN
5399 || ((next = next_active_insn (next))
5400 && GET_CODE (next) == JUMP_INSN))
5401 && GET_CODE (PATTERN (next)) == SET
5402 && recog_memoized (next) == CODE_FOR_jump_compact
5404 (INSN_UID (XEXP (SET_SRC (PATTERN (next)), 0)))
5405 - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
5407 gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), 1);
5409 else if (type == TYPE_JUMP || type == TYPE_RETURN)
5411 int addr = INSN_ADDRESSES (INSN_UID (insn));
5414 struct far_branch *bp;
5416 if (type == TYPE_JUMP)
5418 far_label = XEXP (SET_SRC (PATTERN (insn)), 0);
5419 dest_uid = get_dest_uid (far_label, max_uid);
5422 /* Parse errors can lead to labels outside
5424 if (! NEXT_INSN (far_label))
5429 JUMP_LABEL (insn) = far_label;
5430 LABEL_NUSES (far_label)++;
5432 redirect_jump (insn, NULL_RTX, 1);
5436 bp = uid_branch[dest_uid];
5439 bp = (struct far_branch *) alloca (sizeof *bp);
5440 uid_branch[dest_uid] = bp;
5441 bp->prev = far_branch_list;
5442 far_branch_list = bp;
5444 bp->far_label = far_label;
5446 LABEL_NUSES (far_label)++;
5448 else if (bp->near_label && ! NEXT_INSN (bp->near_label))
5449 if (addr - bp->address <= CONDJUMP_MAX)
5450 emit_label_after (bp->near_label, PREV_INSN (insn));
5453 gen_far_branch (bp);
5459 bp->insert_place = insn;
5461 emit_insn_before (gen_block_branch_redirect (const0_rtx), insn);
5463 gen_block_redirect (insn, addr, bp->near_label ? 2 : 0);
5466 /* Generate all pending far branches,
5467 and free our references to the far labels. */
5468 while (far_branch_list)
5470 if (far_branch_list->near_label
5471 && ! NEXT_INSN (far_branch_list->near_label))
5472 gen_far_branch (far_branch_list);
5474 && far_branch_list->far_label
5475 && ! --LABEL_NUSES (far_branch_list->far_label))
5476 delete_insn (far_branch_list->far_label);
5477 far_branch_list = far_branch_list->prev;
5480 /* Instruction length information is no longer valid due to the new
5481 instructions that have been generated. */
5482 init_insn_lengths ();
5485 /* Dump out instruction addresses, which is useful for debugging the
5486 constant pool table stuff.
5488 If relaxing, output the label and pseudo-ops used to link together
5489 calls and the instruction which set the registers. */
5491 /* ??? The addresses printed by this routine for insns are nonsense for
5492 insns which are inside of a sequence where none of the inner insns have
5493 variable length. This is because the second pass of shorten_branches
5494 does not bother to update them. */
5497 final_prescan_insn (rtx insn, rtx *opvec ATTRIBUTE_UNUSED,
5498 int noperands ATTRIBUTE_UNUSED)
5500 if (TARGET_DUMPISIZE)
5501 fprintf (asm_out_file, "\n! at %04x\n", INSN_ADDRESSES (INSN_UID (insn)));
5507 note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX);
5512 pattern = PATTERN (insn);
5513 if (GET_CODE (pattern) == PARALLEL)
5514 pattern = XVECEXP (pattern, 0, 0);
5515 switch (GET_CODE (pattern))
5518 if (GET_CODE (SET_SRC (pattern)) != CALL
5519 && get_attr_type (insn) != TYPE_SFUNC)
5521 targetm.asm_out.internal_label
5522 (asm_out_file, "L", CODE_LABEL_NUMBER (XEXP (note, 0)));
5525 /* else FALLTHROUGH */
5527 asm_fprintf (asm_out_file, "\t.uses %LL%d\n",
5528 CODE_LABEL_NUMBER (XEXP (note, 0)));
5538 /* Dump out any constants accumulated in the final pass. These will
5542 output_jump_label_table (void)
5548 fprintf (asm_out_file, "\t.align 2\n");
5549 for (i = 0; i < pool_size; i++)
5551 pool_node *p = &pool_vector[i];
5553 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5554 CODE_LABEL_NUMBER (p->label));
5555 output_asm_insn (".long %O0", &p->value);
5563 /* A full frame looks like:
5567 [ if current_function_anonymous_args
5580 local-0 <- fp points here. */
5582 /* Number of bytes pushed for anonymous args, used to pass information
5583 between expand_prologue and expand_epilogue. */
5585 /* Adjust the stack by SIZE bytes. REG holds the rtl of the register to be
5586 adjusted. If epilogue_p is zero, this is for a prologue; otherwise, it's
5587 for an epilogue and a negative value means that it's for a sibcall
5588 epilogue. If LIVE_REGS_MASK is nonzero, it points to a HARD_REG_SET of
5589 all the registers that are about to be restored, and hence dead. */
5592 output_stack_adjust (int size, rtx reg, int epilogue_p,
5593 HARD_REG_SET *live_regs_mask)
5595 rtx (*emit_fn) (rtx) = epilogue_p ? &emit_insn : &frame_insn;
5598 HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
5600 /* This test is bogus, as output_stack_adjust is used to re-align the
5603 gcc_assert (!(size % align));
5606 if (CONST_OK_FOR_ADD (size))
5607 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size)));
5608 /* Try to do it with two partial adjustments; however, we must make
5609 sure that the stack is properly aligned at all times, in case
5610 an interrupt occurs between the two partial adjustments. */
5611 else if (CONST_OK_FOR_ADD (size / 2 & -align)
5612 && CONST_OK_FOR_ADD (size - (size / 2 & -align)))
5614 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size / 2 & -align)));
5615 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size - (size / 2 & -align))));
5621 int temp = epilogue_p ? 7 : (TARGET_SH5 ? 0 : 1);
5624 /* If TEMP is invalid, we could temporarily save a general
5625 register to MACL. However, there is currently no need
5626 to handle this case, so just die when we see it. */
5628 || current_function_interrupt
5629 || ! call_really_used_regs[temp] || fixed_regs[temp])
5631 if (temp < 0 && ! current_function_interrupt
5632 && (TARGET_SHMEDIA || epilogue_p >= 0))
5635 COPY_HARD_REG_SET (temps, call_used_reg_set);
5636 AND_COMPL_HARD_REG_SET (temps, call_fixed_reg_set);
5640 if (crtl->return_rtx)
5642 enum machine_mode mode;
5643 mode = GET_MODE (crtl->return_rtx);
5644 if (BASE_RETURN_VALUE_REG (mode) == FIRST_RET_REG)
5645 nreg = HARD_REGNO_NREGS (FIRST_RET_REG, mode);
5647 for (i = 0; i < nreg; i++)
5648 CLEAR_HARD_REG_BIT (temps, FIRST_RET_REG + i);
5649 if (crtl->calls_eh_return)
5651 CLEAR_HARD_REG_BIT (temps, EH_RETURN_STACKADJ_REGNO);
5652 for (i = 0; i <= 3; i++)
5653 CLEAR_HARD_REG_BIT (temps, EH_RETURN_DATA_REGNO (i));
5656 if (TARGET_SHMEDIA && epilogue_p < 0)
5657 for (i = FIRST_TARGET_REG; i <= LAST_TARGET_REG; i++)
5658 CLEAR_HARD_REG_BIT (temps, i);
5659 if (epilogue_p <= 0)
5661 for (i = FIRST_PARM_REG;
5662 i < FIRST_PARM_REG + NPARM_REGS (SImode); i++)
5663 CLEAR_HARD_REG_BIT (temps, i);
5664 if (cfun->static_chain_decl != NULL)
5665 CLEAR_HARD_REG_BIT (temps, STATIC_CHAIN_REGNUM);
5667 temp = scavenge_reg (&temps);
5669 if (temp < 0 && live_regs_mask)
5673 COPY_HARD_REG_SET (temps, *live_regs_mask);
5674 CLEAR_HARD_REG_BIT (temps, REGNO (reg));
5675 temp = scavenge_reg (&temps);
5679 rtx adj_reg, tmp_reg, mem;
5681 /* If we reached here, the most likely case is the (sibcall)
5682 epilogue for non SHmedia. Put a special push/pop sequence
5683 for such case as the last resort. This looks lengthy but
5684 would not be problem because it seems to be very
5687 gcc_assert (!TARGET_SHMEDIA && epilogue_p);
5690 /* ??? There is still the slight possibility that r4 or
5691 r5 have been reserved as fixed registers or assigned
5692 as global registers, and they change during an
5693 interrupt. There are possible ways to handle this:
5695 - If we are adjusting the frame pointer (r14), we can do
5696 with a single temp register and an ordinary push / pop
5698 - Grab any call-used or call-saved registers (i.e. not
5699 fixed or globals) for the temps we need. We might
5700 also grab r14 if we are adjusting the stack pointer.
5701 If we can't find enough available registers, issue
5702 a diagnostic and die - the user must have reserved
5703 way too many registers.
5704 But since all this is rather unlikely to happen and
5705 would require extra testing, we just die if r4 / r5
5706 are not available. */
5707 gcc_assert (!fixed_regs[4] && !fixed_regs[5]
5708 && !global_regs[4] && !global_regs[5]);
5710 adj_reg = gen_rtx_REG (GET_MODE (reg), 4);
5711 tmp_reg = gen_rtx_REG (GET_MODE (reg), 5);
5712 emit_move_insn (gen_tmp_stack_mem (Pmode, reg), adj_reg);
5713 emit_insn (GEN_MOV (adj_reg, GEN_INT (size)));
5714 emit_insn (GEN_ADD3 (adj_reg, adj_reg, reg));
5715 mem = gen_tmp_stack_mem (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
5716 emit_move_insn (mem, tmp_reg);
5717 emit_move_insn (tmp_reg, gen_tmp_stack_mem (Pmode, reg));
5718 mem = gen_tmp_stack_mem (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
5719 emit_move_insn (mem, tmp_reg);
5720 emit_move_insn (reg, adj_reg);
5721 mem = gen_tmp_stack_mem (Pmode, gen_rtx_POST_INC (Pmode, reg));
5722 emit_move_insn (adj_reg, mem);
5723 mem = gen_tmp_stack_mem (Pmode, gen_rtx_POST_INC (Pmode, reg));
5724 emit_move_insn (tmp_reg, mem);
5725 /* Tell flow the insns that pop r4/r5 aren't dead. */
5726 emit_insn (gen_rtx_USE (VOIDmode, tmp_reg));
5727 emit_insn (gen_rtx_USE (VOIDmode, adj_reg));
5730 const_reg = gen_rtx_REG (GET_MODE (reg), temp);
5732 /* If SIZE is negative, subtract the positive value.
5733 This sometimes allows a constant pool entry to be shared
5734 between prologue and epilogue code. */
5737 emit_insn (GEN_MOV (const_reg, GEN_INT (-size)));
5738 insn = emit_fn (GEN_SUB3 (reg, reg, const_reg));
5742 emit_insn (GEN_MOV (const_reg, GEN_INT (size)));
5743 insn = emit_fn (GEN_ADD3 (reg, reg, const_reg));
5747 = (gen_rtx_EXPR_LIST
5748 (REG_FRAME_RELATED_EXPR,
5749 gen_rtx_SET (VOIDmode, reg,
5750 gen_rtx_PLUS (SImode, reg, GEN_INT (size))),
5760 RTX_FRAME_RELATED_P (x) = 1;
5764 /* Output RTL to push register RN onto the stack. */
5771 x = gen_push_fpul ();
5772 else if (rn == FPSCR_REG)
5773 x = gen_push_fpscr ();
5774 else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && ! TARGET_FPU_SINGLE
5775 && FP_OR_XD_REGISTER_P (rn))
5777 if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
5779 x = gen_push_4 (gen_rtx_REG (DFmode, rn));
5781 else if (TARGET_SH2E && FP_REGISTER_P (rn))
5782 x = gen_push_e (gen_rtx_REG (SFmode, rn));
5784 x = gen_push (gen_rtx_REG (SImode, rn));
5788 = gen_rtx_EXPR_LIST (REG_INC,
5789 gen_rtx_REG (SImode, STACK_POINTER_REGNUM), 0);
5793 /* Output RTL to pop register RN from the stack. */
5800 x = gen_pop_fpul ();
5801 else if (rn == FPSCR_REG)
5802 x = gen_pop_fpscr ();
5803 else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && ! TARGET_FPU_SINGLE
5804 && FP_OR_XD_REGISTER_P (rn))
5806 if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
5808 x = gen_pop_4 (gen_rtx_REG (DFmode, rn));
5810 else if (TARGET_SH2E && FP_REGISTER_P (rn))
5811 x = gen_pop_e (gen_rtx_REG (SFmode, rn));
5813 x = gen_pop (gen_rtx_REG (SImode, rn));
5817 = gen_rtx_EXPR_LIST (REG_INC,
5818 gen_rtx_REG (SImode, STACK_POINTER_REGNUM), 0);
5821 /* Generate code to push the regs specified in the mask. */
5824 push_regs (HARD_REG_SET *mask, int interrupt_handler)
5826 int i = interrupt_handler ? LAST_BANKED_REG + 1 : 0;
5829 /* Push PR last; this gives better latencies after the prologue, and
5830 candidates for the return delay slot when there are no general
5831 registers pushed. */
5832 for (; i < FIRST_PSEUDO_REGISTER; i++)
5834 /* If this is an interrupt handler, and the SZ bit varies,
5835 and we have to push any floating point register, we need
5836 to switch to the correct precision first. */
5837 if (i == FIRST_FP_REG && interrupt_handler && TARGET_FMOVD
5838 && hard_reg_set_intersect_p (*mask, reg_class_contents[DF_REGS]))
5840 HARD_REG_SET unsaved;
5843 COMPL_HARD_REG_SET (unsaved, *mask);
5844 fpscr_set_from_mem (NORMAL_MODE (FP_MODE), unsaved);
5848 && (i != FPSCR_REG || ! skip_fpscr)
5849 && TEST_HARD_REG_BIT (*mask, i))
5851 /* If the ISR has RESBANK attribute assigned, don't push any of
5852 the following registers - R0-R14, MACH, MACL and GBR. */
5853 if (! (sh_cfun_resbank_handler_p ()
5854 && ((i >= FIRST_GENERAL_REG && i < LAST_GENERAL_REG)
5862 /* Push banked registers last to improve delay slot opportunities. */
5863 if (interrupt_handler)
5864 for (i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
5865 if (TEST_HARD_REG_BIT (*mask, i))
5868 /* Don't push PR register for an ISR with RESBANK attribute assigned. */
5869 if (TEST_HARD_REG_BIT (*mask, PR_REG) && !sh_cfun_resbank_handler_p ())
5873 /* Calculate how much extra space is needed to save all callee-saved
5875 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
5878 shmedia_target_regs_stack_space (HARD_REG_SET *live_regs_mask)
5881 int stack_space = 0;
5882 int interrupt_handler = sh_cfun_interrupt_handler_p ();
5884 for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
5885 if ((! call_really_used_regs[reg] || interrupt_handler)
5886 && ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
5887 /* Leave space to save this target register on the stack,
5888 in case target register allocation wants to use it. */
5889 stack_space += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
5893 /* Decide whether we should reserve space for callee-save target registers,
5894 in case target register allocation wants to use them. REGS_SAVED is
5895 the space, in bytes, that is already required for register saves.
5896 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
5899 shmedia_reserve_space_for_target_registers_p (int regs_saved,
5900 HARD_REG_SET *live_regs_mask)
5904 return shmedia_target_regs_stack_space (live_regs_mask) <= regs_saved;
5907 /* Decide how much space to reserve for callee-save target registers
5908 in case target register allocation wants to use them.
5909 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
5912 shmedia_target_regs_stack_adjust (HARD_REG_SET *live_regs_mask)
5914 if (shmedia_space_reserved_for_target_registers)
5915 return shmedia_target_regs_stack_space (live_regs_mask);
5920 /* Work out the registers which need to be saved, both as a mask and a
5921 count of saved words. Return the count.
5923 If doing a pragma interrupt function, then push all regs used by the
5924 function, and if we call another function (we can tell by looking at PR),
5925 make sure that all the regs it clobbers are safe too. */
5928 calc_live_regs (HARD_REG_SET *live_regs_mask)
5933 bool interrupt_or_trapa_handler, trapa_handler, interrupt_handler;
5934 bool nosave_low_regs;
5935 int pr_live, has_call;
5937 attrs = DECL_ATTRIBUTES (current_function_decl);
5938 interrupt_or_trapa_handler = sh_cfun_interrupt_handler_p ();
5939 trapa_handler = lookup_attribute ("trapa_handler", attrs) != NULL_TREE;
5940 interrupt_handler = interrupt_or_trapa_handler && ! trapa_handler;
5941 nosave_low_regs = lookup_attribute ("nosave_low_regs", attrs) != NULL_TREE;
5943 CLEAR_HARD_REG_SET (*live_regs_mask);
5944 if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && interrupt_handler
5945 && df_regs_ever_live_p (FPSCR_REG))
5946 target_flags &= ~MASK_FPU_SINGLE;
5947 /* If we can save a lot of saves by switching to double mode, do that. */
5948 else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && TARGET_FPU_SINGLE)
5949 for (count = 0, reg = FIRST_FP_REG; reg <= LAST_FP_REG; reg += 2)
5950 if (df_regs_ever_live_p (reg) && df_regs_ever_live_p (reg+1)
5951 && (! call_really_used_regs[reg]
5952 || interrupt_handler)
5955 target_flags &= ~MASK_FPU_SINGLE;
5958 /* PR_MEDIA_REG is a general purpose register, thus global_alloc already
5959 knows how to use it. That means the pseudo originally allocated for
5960 the initial value can become the PR_MEDIA_REG hard register, as seen for
5961 execute/20010122-1.c:test9. */
5963 /* ??? this function is called from initial_elimination_offset, hence we
5964 can't use the result of sh_media_register_for_return here. */
5965 pr_live = sh_pr_n_sets ();
5968 rtx pr_initial = has_hard_reg_initial_val (Pmode, PR_REG);
5969 pr_live = (pr_initial
5970 ? (GET_CODE (pr_initial) != REG
5971 || REGNO (pr_initial) != (PR_REG))
5972 : df_regs_ever_live_p (PR_REG));
5973 /* For Shcompact, if not optimizing, we end up with a memory reference
5974 using the return address pointer for __builtin_return_address even
5975 though there is no actual need to put the PR register on the stack. */
5976 pr_live |= df_regs_ever_live_p (RETURN_ADDRESS_POINTER_REGNUM);
5978 /* Force PR to be live if the prologue has to call the SHmedia
5979 argument decoder or register saver. */
5980 if (TARGET_SHCOMPACT
5981 && ((crtl->args.info.call_cookie
5982 & ~ CALL_COOKIE_RET_TRAMP (1))
5983 || crtl->saves_all_registers))
5985 has_call = TARGET_SHMEDIA ? ! leaf_function_p () : pr_live;
5986 for (count = 0, reg = FIRST_PSEUDO_REGISTER; reg-- != 0; )
5988 if (reg == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG)
5991 ? (/* Need to save all the regs ever live. */
5992 (df_regs_ever_live_p (reg)
5993 || (call_really_used_regs[reg]
5994 && (! fixed_regs[reg] || reg == MACH_REG || reg == MACL_REG
5995 || reg == PIC_OFFSET_TABLE_REGNUM)
5997 || (TARGET_SHMEDIA && has_call
5998 && REGISTER_NATURAL_MODE (reg) == SImode
5999 && (GENERAL_REGISTER_P (reg) || TARGET_REGISTER_P (reg))))
6000 && reg != STACK_POINTER_REGNUM && reg != ARG_POINTER_REGNUM
6001 && reg != RETURN_ADDRESS_POINTER_REGNUM
6002 && reg != T_REG && reg != GBR_REG
6003 /* Push fpscr only on targets which have FPU */
6004 && (reg != FPSCR_REG || TARGET_FPU_ANY))
6005 : (/* Only push those regs which are used and need to be saved. */
6008 && crtl->args.info.call_cookie
6009 && reg == PIC_OFFSET_TABLE_REGNUM)
6010 || (df_regs_ever_live_p (reg)
6011 && (!call_really_used_regs[reg]
6012 || (trapa_handler && reg == FPSCR_REG && TARGET_FPU_ANY)))
6013 || (crtl->calls_eh_return
6014 && (reg == EH_RETURN_DATA_REGNO (0)
6015 || reg == EH_RETURN_DATA_REGNO (1)
6016 || reg == EH_RETURN_DATA_REGNO (2)
6017 || reg == EH_RETURN_DATA_REGNO (3)))
6018 || ((reg == MACL_REG || reg == MACH_REG)
6019 && df_regs_ever_live_p (reg)
6020 && sh_cfun_attr_renesas_p ())
6023 SET_HARD_REG_BIT (*live_regs_mask, reg);
6024 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
6026 if ((TARGET_SH4 || TARGET_SH2A_DOUBLE || TARGET_SH5) && TARGET_FMOVD
6027 && GET_MODE_CLASS (REGISTER_NATURAL_MODE (reg)) == MODE_FLOAT)
6029 if (FP_REGISTER_P (reg))
6031 if (! TARGET_FPU_SINGLE && ! df_regs_ever_live_p (reg ^ 1))
6033 SET_HARD_REG_BIT (*live_regs_mask, (reg ^ 1));
6034 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg ^ 1));
6037 else if (XD_REGISTER_P (reg))
6039 /* Must switch to double mode to access these registers. */
6040 target_flags &= ~MASK_FPU_SINGLE;
6044 if (nosave_low_regs && reg == R8_REG)
6047 /* If we have a target register optimization pass after prologue / epilogue
6048 threading, we need to assume all target registers will be live even if
6050 if (flag_branch_target_load_optimize2
6051 && TARGET_SAVE_ALL_TARGET_REGS
6052 && shmedia_space_reserved_for_target_registers)
6053 for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
6054 if ((! call_really_used_regs[reg] || interrupt_handler)
6055 && ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
6057 SET_HARD_REG_BIT (*live_regs_mask, reg);
6058 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
6060 /* If this is an interrupt handler, we don't have any call-clobbered
6061 registers we can conveniently use for target register save/restore.
6062 Make sure we save at least one general purpose register when we need
6063 to save target registers. */
6064 if (interrupt_handler
6065 && hard_reg_set_intersect_p (*live_regs_mask,
6066 reg_class_contents[TARGET_REGS])
6067 && ! hard_reg_set_intersect_p (*live_regs_mask,
6068 reg_class_contents[GENERAL_REGS]))
6070 SET_HARD_REG_BIT (*live_regs_mask, R0_REG);
6071 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (R0_REG));
6077 /* Code to generate prologue and epilogue sequences */
6079 /* PUSHED is the number of bytes that are being pushed on the
6080 stack for register saves. Return the frame size, padded
6081 appropriately so that the stack stays properly aligned. */
6082 static HOST_WIDE_INT
6083 rounded_frame_size (int pushed)
6085 HOST_WIDE_INT size = get_frame_size ();
6086 HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
6088 return ((size + pushed + align - 1) & -align) - pushed;
6091 /* Choose a call-clobbered target-branch register that remains
6092 unchanged along the whole function. We set it up as the return
6093 value in the prologue. */
6095 sh_media_register_for_return (void)
6100 if (! current_function_is_leaf)
6102 if (lookup_attribute ("interrupt_handler",
6103 DECL_ATTRIBUTES (current_function_decl)))
6105 if (sh_cfun_interrupt_handler_p ())
6108 tr0_used = flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
6110 for (regno = FIRST_TARGET_REG + tr0_used; regno <= LAST_TARGET_REG; regno++)
6111 if (call_really_used_regs[regno] && ! df_regs_ever_live_p (regno))
6117 /* The maximum registers we need to save are:
6118 - 62 general purpose registers (r15 is stack pointer, r63 is zero)
6119 - 32 floating point registers (for each pair, we save none,
6120 one single precision value, or a double precision value).
6121 - 8 target registers
6122 - add 1 entry for a delimiter. */
6123 #define MAX_SAVED_REGS (62+32+8)
6125 typedef struct save_entry_s
6134 /* There will be a delimiter entry with VOIDmode both at the start and the
6135 end of a filled in schedule. The end delimiter has the offset of the
6136 save with the smallest (i.e. most negative) offset. */
6137 typedef struct save_schedule_s
6139 save_entry entries[MAX_SAVED_REGS + 2];
6140 int temps[MAX_TEMPS+1];
6143 /* Fill in SCHEDULE according to LIVE_REGS_MASK. If RESTORE is nonzero,
6144 use reverse order. Returns the last entry written to (not counting
6145 the delimiter). OFFSET_BASE is a number to be added to all offset
6149 sh5_schedule_saves (HARD_REG_SET *live_regs_mask, save_schedule *schedule,
6153 save_entry *entry = schedule->entries;
6157 if (! current_function_interrupt)
6158 for (i = FIRST_GENERAL_REG; tmpx < MAX_TEMPS && i <= LAST_GENERAL_REG; i++)
6159 if (call_really_used_regs[i] && ! fixed_regs[i] && i != PR_MEDIA_REG
6160 && ! FUNCTION_ARG_REGNO_P (i)
6161 && i != FIRST_RET_REG
6162 && ! (cfun->static_chain_decl != NULL && i == STATIC_CHAIN_REGNUM)
6163 && ! (crtl->calls_eh_return
6164 && (i == EH_RETURN_STACKADJ_REGNO
6165 || ((unsigned) i >= EH_RETURN_DATA_REGNO (0)
6166 && (unsigned) i <= EH_RETURN_DATA_REGNO (3)))))
6167 schedule->temps[tmpx++] = i;
6169 entry->mode = VOIDmode;
6170 entry->offset = offset_base;
6172 /* We loop twice: first, we save 8-byte aligned registers in the
6173 higher addresses, that are known to be aligned. Then, we
6174 proceed to saving 32-bit registers that don't need 8-byte
6176 If this is an interrupt function, all registers that need saving
6177 need to be saved in full. moreover, we need to postpone saving
6178 target registers till we have saved some general purpose registers
6179 we can then use as scratch registers. */
6180 offset = offset_base;
6181 for (align = 1; align >= 0; align--)
6183 for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
6184 if (TEST_HARD_REG_BIT (*live_regs_mask, i))
6186 enum machine_mode mode = REGISTER_NATURAL_MODE (i);
6189 if (current_function_interrupt)
6191 if (TARGET_REGISTER_P (i))
6193 if (GENERAL_REGISTER_P (i))
6196 if (mode == SFmode && (i % 2) == 1
6197 && ! TARGET_FPU_SINGLE && FP_REGISTER_P (i)
6198 && (TEST_HARD_REG_BIT (*live_regs_mask, (i ^ 1))))
6205 /* If we're doing the aligned pass and this is not aligned,
6206 or we're doing the unaligned pass and this is aligned,
6208 if ((GET_MODE_SIZE (mode) % (STACK_BOUNDARY / BITS_PER_UNIT) == 0)
6212 if (current_function_interrupt
6213 && GENERAL_REGISTER_P (i)
6214 && tmpx < MAX_TEMPS)
6215 schedule->temps[tmpx++] = i;
6217 offset -= GET_MODE_SIZE (mode);
6220 entry->offset = offset;
6223 if (align && current_function_interrupt)
6224 for (i = LAST_TARGET_REG; i >= FIRST_TARGET_REG; i--)
6225 if (TEST_HARD_REG_BIT (*live_regs_mask, i))
6227 offset -= GET_MODE_SIZE (DImode);
6229 entry->mode = DImode;
6230 entry->offset = offset;
6235 entry->mode = VOIDmode;
6236 entry->offset = offset;
6237 schedule->temps[tmpx] = -1;
6242 sh_expand_prologue (void)
6244 HARD_REG_SET live_regs_mask;
6247 int save_flags = target_flags;
6250 = lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl));
6252 current_function_interrupt = sh_cfun_interrupt_handler_p ();
6254 /* We have pretend args if we had an object sent partially in registers
6255 and partially on the stack, e.g. a large structure. */
6256 pretend_args = crtl->args.pretend_args_size;
6257 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl)
6258 && (NPARM_REGS(SImode)
6259 > crtl->args.info.arg_count[(int) SH_ARG_INT]))
6261 output_stack_adjust (-pretend_args
6262 - crtl->args.info.stack_regs * 8,
6263 stack_pointer_rtx, 0, NULL);
6265 if (TARGET_SHCOMPACT && flag_pic && crtl->args.info.call_cookie)
6266 /* We're going to use the PIC register to load the address of the
6267 incoming-argument decoder and/or of the return trampoline from
6268 the GOT, so make sure the PIC register is preserved and
6270 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
6272 if (TARGET_SHCOMPACT
6273 && (crtl->args.info.call_cookie & ~ CALL_COOKIE_RET_TRAMP(1)))
6277 /* First, make all registers with incoming arguments that will
6278 be pushed onto the stack live, so that register renaming
6279 doesn't overwrite them. */
6280 for (reg = 0; reg < NPARM_REGS (SImode); reg++)
6281 if (CALL_COOKIE_STACKSEQ_GET (crtl->args.info.call_cookie)
6282 >= NPARM_REGS (SImode) - reg)
6283 for (; reg < NPARM_REGS (SImode); reg++)
6284 emit_insn (gen_shcompact_preserve_incoming_args
6285 (gen_rtx_REG (SImode, FIRST_PARM_REG + reg)));
6286 else if (CALL_COOKIE_INT_REG_GET
6287 (crtl->args.info.call_cookie, reg) == 1)
6288 emit_insn (gen_shcompact_preserve_incoming_args
6289 (gen_rtx_REG (SImode, FIRST_PARM_REG + reg)));
6291 emit_move_insn (gen_rtx_REG (Pmode, MACL_REG),
6293 emit_move_insn (gen_rtx_REG (SImode, R0_REG),
6294 GEN_INT (crtl->args.info.call_cookie));
6295 emit_move_insn (gen_rtx_REG (SImode, MACH_REG),
6296 gen_rtx_REG (SImode, R0_REG));
6298 else if (TARGET_SHMEDIA)
6300 int tr = sh_media_register_for_return ();
6303 emit_move_insn (gen_rtx_REG (DImode, tr),
6304 gen_rtx_REG (DImode, PR_MEDIA_REG));
6307 /* Emit the code for SETUP_VARARGS. */
6310 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
6312 /* Push arg regs as if they'd been provided by caller in stack. */
6313 for (i = 0; i < NPARM_REGS(SImode); i++)
6315 int rn = NPARM_REGS(SImode) + FIRST_PARM_REG - i - 1;
6318 if (i >= (NPARM_REGS(SImode)
6319 - crtl->args.info.arg_count[(int) SH_ARG_INT]
6323 RTX_FRAME_RELATED_P (insn) = 0;
6328 /* If we're supposed to switch stacks at function entry, do so now. */
6331 /* The argument specifies a variable holding the address of the
6332 stack the interrupt function should switch to/from at entry/exit. */
6334 = ggc_strdup (TREE_STRING_POINTER (TREE_VALUE (sp_switch_attr)));
6335 rtx sp_switch = gen_rtx_SYMBOL_REF (Pmode, s);
6337 emit_insn (gen_sp_switch_1 (sp_switch));
6340 d = calc_live_regs (&live_regs_mask);
6341 /* ??? Maybe we could save some switching if we can move a mode switch
6342 that already happens to be at the function start into the prologue. */
6343 if (target_flags != save_flags && ! current_function_interrupt)
6344 emit_insn (gen_toggle_sz ());
6348 int offset_base, offset;
6350 int offset_in_r0 = -1;
6352 int tregs_space = shmedia_target_regs_stack_adjust (&live_regs_mask);
6353 int total_size, save_size;
6354 save_schedule schedule;
6358 if (call_really_used_regs[R0_REG] && ! fixed_regs[R0_REG]
6359 && ! current_function_interrupt)
6360 r0 = gen_rtx_REG (Pmode, R0_REG);
6362 /* D is the actual number of bytes that we need for saving registers,
6363 however, in initial_elimination_offset we have committed to using
6364 an additional TREGS_SPACE amount of bytes - in order to keep both
6365 addresses to arguments supplied by the caller and local variables
6366 valid, we must keep this gap. Place it between the incoming
6367 arguments and the actually saved registers in a bid to optimize
6368 locality of reference. */
6369 total_size = d + tregs_space;
6370 total_size += rounded_frame_size (total_size);
6371 save_size = total_size - rounded_frame_size (d);
6372 if (save_size % (STACK_BOUNDARY / BITS_PER_UNIT))
6373 d_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
6374 - save_size % (STACK_BOUNDARY / BITS_PER_UNIT));
6376 /* If adjusting the stack in a single step costs nothing extra, do so.
6377 I.e. either if a single addi is enough, or we need a movi anyway,
6378 and we don't exceed the maximum offset range (the test for the
6379 latter is conservative for simplicity). */
6381 && (CONST_OK_FOR_I10 (-total_size)
6382 || (! CONST_OK_FOR_I10 (-(save_size + d_rounding))
6383 && total_size <= 2044)))
6384 d_rounding = total_size - save_size;
6386 offset_base = d + d_rounding;
6388 output_stack_adjust (-(save_size + d_rounding), stack_pointer_rtx,
6391 sh5_schedule_saves (&live_regs_mask, &schedule, offset_base);
6392 tmp_pnt = schedule.temps;
6393 for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
6395 enum machine_mode mode = entry->mode;
6396 unsigned int reg = entry->reg;
6397 rtx reg_rtx, mem_rtx, pre_dec = NULL_RTX;
6400 offset = entry->offset;
6402 reg_rtx = gen_rtx_REG (mode, reg);
6404 mem_rtx = gen_frame_mem (mode,
6405 gen_rtx_PLUS (Pmode,
6409 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_pre_dec);
6416 if (HAVE_PRE_DECREMENT
6417 && (offset_in_r0 - offset == GET_MODE_SIZE (mode)
6418 || mem_rtx == NULL_RTX
6419 || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
6421 pre_dec = gen_frame_mem (mode, gen_rtx_PRE_DEC (Pmode, r0));
6423 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (pre_dec, 0),
6432 offset += GET_MODE_SIZE (mode);
6436 if (mem_rtx != NULL_RTX)
6439 if (offset_in_r0 == -1)
6441 emit_move_insn (r0, GEN_INT (offset));
6442 offset_in_r0 = offset;
6444 else if (offset != offset_in_r0)
6449 GEN_INT (offset - offset_in_r0)));
6450 offset_in_r0 += offset - offset_in_r0;
6453 if (pre_dec != NULL_RTX)
6459 (Pmode, r0, stack_pointer_rtx));
6463 offset -= GET_MODE_SIZE (mode);
6464 offset_in_r0 -= GET_MODE_SIZE (mode);
6469 mem_rtx = gen_frame_mem (mode, r0);
6471 mem_rtx = gen_frame_mem (mode,
6472 gen_rtx_PLUS (Pmode,
6476 /* We must not use an r0-based address for target-branch
6477 registers or for special registers without pre-dec
6478 memory addresses, since we store their values in r0
6480 gcc_assert (!TARGET_REGISTER_P (reg)
6481 && ((reg != PR_REG && !SPECIAL_REGISTER_P (reg))
6482 || mem_rtx == pre_dec));
6485 orig_reg_rtx = reg_rtx;
6486 if (TARGET_REGISTER_P (reg)
6487 || ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
6488 && mem_rtx != pre_dec))
6490 rtx tmp_reg = gen_rtx_REG (GET_MODE (reg_rtx), *tmp_pnt);
6492 emit_move_insn (tmp_reg, reg_rtx);
6494 if (REGNO (tmp_reg) == R0_REG)
6498 gcc_assert (!refers_to_regno_p
6499 (R0_REG, R0_REG+1, mem_rtx, (rtx *) 0));
6502 if (*++tmp_pnt <= 0)
6503 tmp_pnt = schedule.temps;
6510 /* Mark as interesting for dwarf cfi generator */
6511 insn = emit_move_insn (mem_rtx, reg_rtx);
6512 RTX_FRAME_RELATED_P (insn) = 1;
6513 /* If we use an intermediate register for the save, we can't
6514 describe this exactly in cfi as a copy of the to-be-saved
6515 register into the temporary register and then the temporary
6516 register on the stack, because the temporary register can
6517 have a different natural size than the to-be-saved register.
6518 Thus, we gloss over the intermediate copy and pretend we do
6519 a direct save from the to-be-saved register. */
6520 if (REGNO (reg_rtx) != reg)
6524 set = gen_rtx_SET (VOIDmode, mem_rtx, orig_reg_rtx);
6525 note_rtx = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, set,
6527 REG_NOTES (insn) = note_rtx;
6530 if (TARGET_SHCOMPACT && (offset_in_r0 != -1))
6532 rtx reg_rtx = gen_rtx_REG (mode, reg);
6534 rtx mem_rtx = gen_frame_mem (mode,
6535 gen_rtx_PLUS (Pmode,
6539 set = gen_rtx_SET (VOIDmode, mem_rtx, reg_rtx);
6540 note_rtx = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, set,
6542 REG_NOTES (insn) = note_rtx;
6547 gcc_assert (entry->offset == d_rounding);
6550 push_regs (&live_regs_mask, current_function_interrupt);
6552 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
6553 emit_insn (gen_GOTaddr2picreg ());
6555 if (SHMEDIA_REGS_STACK_ADJUST ())
6557 /* This must NOT go through the PLT, otherwise mach and macl
6558 may be clobbered. */
6559 function_symbol (gen_rtx_REG (Pmode, R0_REG),
6561 ? "__GCC_push_shmedia_regs"
6562 : "__GCC_push_shmedia_regs_nofpu"), SFUNC_GOT);
6563 emit_insn (gen_shmedia_save_restore_regs_compact
6564 (GEN_INT (-SHMEDIA_REGS_STACK_ADJUST ())));
6567 if (target_flags != save_flags && ! current_function_interrupt)
6568 emit_insn (gen_toggle_sz ());
6570 target_flags = save_flags;
6572 output_stack_adjust (-rounded_frame_size (d) + d_rounding,
6573 stack_pointer_rtx, 0, NULL);
6575 if (frame_pointer_needed)
6576 frame_insn (GEN_MOV (hard_frame_pointer_rtx, stack_pointer_rtx));
6578 if (TARGET_SHCOMPACT
6579 && (crtl->args.info.call_cookie & ~ CALL_COOKIE_RET_TRAMP(1)))
6581 /* This must NOT go through the PLT, otherwise mach and macl
6582 may be clobbered. */
6583 function_symbol (gen_rtx_REG (Pmode, R0_REG),
6584 "__GCC_shcompact_incoming_args", SFUNC_GOT);
6585 emit_insn (gen_shcompact_incoming_args ());
6590 sh_expand_epilogue (bool sibcall_p)
6592 HARD_REG_SET live_regs_mask;
6596 int save_flags = target_flags;
6597 int frame_size, save_size;
6598 int fpscr_deferred = 0;
6599 int e = sibcall_p ? -1 : 1;
6601 d = calc_live_regs (&live_regs_mask);
6604 frame_size = rounded_frame_size (d);
6608 int tregs_space = shmedia_target_regs_stack_adjust (&live_regs_mask);
6610 if (d % (STACK_BOUNDARY / BITS_PER_UNIT))
6611 d_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
6612 - d % (STACK_BOUNDARY / BITS_PER_UNIT));
6614 total_size = d + tregs_space;
6615 total_size += rounded_frame_size (total_size);
6616 save_size = total_size - frame_size;
6618 /* If adjusting the stack in a single step costs nothing extra, do so.
6619 I.e. either if a single addi is enough, or we need a movi anyway,
6620 and we don't exceed the maximum offset range (the test for the
6621 latter is conservative for simplicity). */
6623 && ! frame_pointer_needed
6624 && (CONST_OK_FOR_I10 (total_size)
6625 || (! CONST_OK_FOR_I10 (save_size + d_rounding)
6626 && total_size <= 2044)))
6627 d_rounding = frame_size;
6629 frame_size -= d_rounding;
6632 if (frame_pointer_needed)
6634 /* We must avoid scheduling the epilogue with previous basic blocks
6635 when exception handling is enabled. See PR/18032. */
6636 if (flag_exceptions)
6637 emit_insn (gen_blockage ());
6638 output_stack_adjust (frame_size, hard_frame_pointer_rtx, e,
6641 /* We must avoid moving the stack pointer adjustment past code
6642 which reads from the local frame, else an interrupt could
6643 occur after the SP adjustment and clobber data in the local
6645 emit_insn (gen_blockage ());
6646 emit_insn (GEN_MOV (stack_pointer_rtx, hard_frame_pointer_rtx));
6648 else if (frame_size)
6650 /* We must avoid moving the stack pointer adjustment past code
6651 which reads from the local frame, else an interrupt could
6652 occur after the SP adjustment and clobber data in the local
6654 emit_insn (gen_blockage ());
6655 output_stack_adjust (frame_size, stack_pointer_rtx, e, &live_regs_mask);
6658 if (SHMEDIA_REGS_STACK_ADJUST ())
6660 function_symbol (gen_rtx_REG (Pmode, R0_REG),
6662 ? "__GCC_pop_shmedia_regs"
6663 : "__GCC_pop_shmedia_regs_nofpu"), SFUNC_GOT);
6664 /* This must NOT go through the PLT, otherwise mach and macl
6665 may be clobbered. */
6666 emit_insn (gen_shmedia_save_restore_regs_compact
6667 (GEN_INT (SHMEDIA_REGS_STACK_ADJUST ())));
6670 /* Pop all the registers. */
6672 if (target_flags != save_flags && ! current_function_interrupt)
6673 emit_insn (gen_toggle_sz ());
6676 int offset_base, offset;
6677 int offset_in_r0 = -1;
6679 rtx r0 = gen_rtx_REG (Pmode, R0_REG);
6680 save_schedule schedule;
6684 entry = sh5_schedule_saves (&live_regs_mask, &schedule, d_rounding);
6685 offset_base = -entry[1].offset + d_rounding;
6686 tmp_pnt = schedule.temps;
6687 for (; entry->mode != VOIDmode; entry--)
6689 enum machine_mode mode = entry->mode;
6690 int reg = entry->reg;
6691 rtx reg_rtx, mem_rtx, post_inc = NULL_RTX, insn;
6693 offset = offset_base + entry->offset;
6694 reg_rtx = gen_rtx_REG (mode, reg);
6696 mem_rtx = gen_frame_mem (mode,
6697 gen_rtx_PLUS (Pmode,
6701 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_post_inc);
6707 if (HAVE_POST_INCREMENT
6708 && (offset == offset_in_r0
6709 || (offset + GET_MODE_SIZE (mode) != d + d_rounding
6710 && mem_rtx == NULL_RTX)
6711 || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
6713 post_inc = gen_frame_mem (mode, gen_rtx_POST_INC (Pmode, r0));
6715 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (post_inc, 0),
6718 post_inc = NULL_RTX;
6727 if (mem_rtx != NULL_RTX)
6730 if (offset_in_r0 == -1)
6732 emit_move_insn (r0, GEN_INT (offset));
6733 offset_in_r0 = offset;
6735 else if (offset != offset_in_r0)
6740 GEN_INT (offset - offset_in_r0)));
6741 offset_in_r0 += offset - offset_in_r0;
6744 if (post_inc != NULL_RTX)
6750 (Pmode, r0, stack_pointer_rtx));
6756 offset_in_r0 += GET_MODE_SIZE (mode);
6759 mem_rtx = gen_frame_mem (mode, r0);
6761 mem_rtx = gen_frame_mem (mode,
6762 gen_rtx_PLUS (Pmode,
6766 gcc_assert ((reg != PR_REG && !SPECIAL_REGISTER_P (reg))
6767 || mem_rtx == post_inc);
6770 if ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
6771 && mem_rtx != post_inc)
6773 insn = emit_move_insn (r0, mem_rtx);
6776 else if (TARGET_REGISTER_P (reg))
6778 rtx tmp_reg = gen_rtx_REG (mode, *tmp_pnt);
6780 /* Give the scheduler a bit of freedom by using up to
6781 MAX_TEMPS registers in a round-robin fashion. */
6782 insn = emit_move_insn (tmp_reg, mem_rtx);
6785 tmp_pnt = schedule.temps;
6788 insn = emit_move_insn (reg_rtx, mem_rtx);
6791 gcc_assert (entry->offset + offset_base == d + d_rounding);
6793 else /* ! TARGET_SH5 */
6798 /* For an ISR with RESBANK attribute assigned, don't pop PR
6800 if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG)
6801 && !sh_cfun_resbank_handler_p ())
6803 if (!frame_pointer_needed)
6804 emit_insn (gen_blockage ());
6808 /* Banked registers are poped first to avoid being scheduled in the
6809 delay slot. RTE switches banks before the ds instruction. */
6810 if (current_function_interrupt)
6812 for (i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
6813 if (TEST_HARD_REG_BIT (live_regs_mask, i))
6814 pop (LAST_BANKED_REG - i);
6816 last_reg = FIRST_PSEUDO_REGISTER - LAST_BANKED_REG - 1;
6819 last_reg = FIRST_PSEUDO_REGISTER;
6821 for (i = 0; i < last_reg; i++)
6823 int j = (FIRST_PSEUDO_REGISTER - 1) - i;
6825 if (j == FPSCR_REG && current_function_interrupt && TARGET_FMOVD
6826 && hard_reg_set_intersect_p (live_regs_mask,
6827 reg_class_contents[DF_REGS]))
6829 /* For an ISR with RESBANK attribute assigned, don't pop
6830 following registers, R0-R14, MACH, MACL and GBR. */
6831 else if (j != PR_REG && TEST_HARD_REG_BIT (live_regs_mask, j)
6832 && ! (sh_cfun_resbank_handler_p ()
6833 && ((j >= FIRST_GENERAL_REG
6834 && j < LAST_GENERAL_REG)
6840 if (j == FIRST_FP_REG && fpscr_deferred)
6844 if (target_flags != save_flags && ! current_function_interrupt)
6845 emit_insn (gen_toggle_sz ());
6846 target_flags = save_flags;
6848 output_stack_adjust (crtl->args.pretend_args_size
6849 + save_size + d_rounding
6850 + crtl->args.info.stack_regs * 8,
6851 stack_pointer_rtx, e, NULL);
6853 if (crtl->calls_eh_return)
6854 emit_insn (GEN_ADD3 (stack_pointer_rtx, stack_pointer_rtx,
6855 EH_RETURN_STACKADJ_RTX));
6857 /* Switch back to the normal stack if necessary. */
6858 if (lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl)))
6859 emit_insn (gen_sp_switch_2 ());
6861 /* Tell flow the insn that pops PR isn't dead. */
6862 /* PR_REG will never be live in SHmedia mode, and we don't need to
6863 USE PR_MEDIA_REG, since it will be explicitly copied to TR0_REG
6864 by the return pattern. */
6865 if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG))
6866 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, PR_REG)));
6869 static int sh_need_epilogue_known = 0;
6872 sh_need_epilogue (void)
6874 if (! sh_need_epilogue_known)
6879 sh_expand_epilogue (0);
6880 epilogue = get_insns ();
6882 sh_need_epilogue_known = (epilogue == NULL ? -1 : 1);
6884 return sh_need_epilogue_known > 0;
6887 /* Emit code to change the current function's return address to RA.
6888 TEMP is available as a scratch register, if needed. */
6891 sh_set_return_address (rtx ra, rtx tmp)
6893 HARD_REG_SET live_regs_mask;
6895 int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
6898 d = calc_live_regs (&live_regs_mask);
6900 /* If pr_reg isn't life, we can set it (or the register given in
6901 sh_media_register_for_return) directly. */
6902 if (! TEST_HARD_REG_BIT (live_regs_mask, pr_reg))
6908 int rr_regno = sh_media_register_for_return ();
6913 rr = gen_rtx_REG (DImode, rr_regno);
6916 rr = gen_rtx_REG (SImode, pr_reg);
6918 emit_insn (GEN_MOV (rr, ra));
6919 /* Tell flow the register for return isn't dead. */
6920 emit_insn (gen_rtx_USE (VOIDmode, rr));
6927 save_schedule schedule;
6930 entry = sh5_schedule_saves (&live_regs_mask, &schedule, 0);
6931 offset = entry[1].offset;
6932 for (; entry->mode != VOIDmode; entry--)
6933 if (entry->reg == pr_reg)
6936 /* We can't find pr register. */
6940 offset = entry->offset - offset;
6941 pr_offset = (rounded_frame_size (d) + offset
6942 + SHMEDIA_REGS_STACK_ADJUST ());
6945 pr_offset = rounded_frame_size (d);
6947 emit_insn (GEN_MOV (tmp, GEN_INT (pr_offset)));
6948 emit_insn (GEN_ADD3 (tmp, tmp, hard_frame_pointer_rtx));
6950 tmp = gen_frame_mem (Pmode, tmp);
6951 emit_insn (GEN_MOV (tmp, ra));
6954 /* Clear variables at function end. */
6957 sh_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6958 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6960 sh_need_epilogue_known = 0;
6964 sh_builtin_saveregs (void)
6966 /* First unnamed integer register. */
6967 int first_intreg = crtl->args.info.arg_count[(int) SH_ARG_INT];
6968 /* Number of integer registers we need to save. */
6969 int n_intregs = MAX (0, NPARM_REGS (SImode) - first_intreg);
6970 /* First unnamed SFmode float reg */
6971 int first_floatreg = crtl->args.info.arg_count[(int) SH_ARG_FLOAT];
6972 /* Number of SFmode float regs to save. */
6973 int n_floatregs = MAX (0, NPARM_REGS (SFmode) - first_floatreg);
6976 alias_set_type alias_set;
6982 int pushregs = n_intregs;
6984 while (pushregs < NPARM_REGS (SImode) - 1
6985 && (CALL_COOKIE_INT_REG_GET
6986 (crtl->args.info.call_cookie,
6987 NPARM_REGS (SImode) - pushregs)
6990 crtl->args.info.call_cookie
6991 &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
6996 if (pushregs == NPARM_REGS (SImode))
6997 crtl->args.info.call_cookie
6998 |= (CALL_COOKIE_INT_REG (0, 1)
6999 | CALL_COOKIE_STACKSEQ (pushregs - 1));
7001 crtl->args.info.call_cookie
7002 |= CALL_COOKIE_STACKSEQ (pushregs);
7004 crtl->args.pretend_args_size += 8 * n_intregs;
7006 if (TARGET_SHCOMPACT)
7010 if (! TARGET_SH2E && ! TARGET_SH4 && ! TARGET_SH5)
7012 error ("__builtin_saveregs not supported by this subtarget");
7019 /* Allocate block of memory for the regs. */
7020 /* ??? If n_intregs + n_floatregs == 0, should we allocate at least 1 byte?
7021 Or can assign_stack_local accept a 0 SIZE argument? */
7022 bufsize = (n_intregs * UNITS_PER_WORD) + (n_floatregs * UNITS_PER_WORD);
7025 regbuf = gen_frame_mem (BLKmode, gen_rtx_REG (Pmode, ARG_POINTER_REGNUM));
7026 else if (n_floatregs & 1)
7030 regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
7031 addr = copy_to_mode_reg (Pmode, XEXP (regbuf, 0));
7032 emit_insn (gen_iorsi3 (addr, addr, GEN_INT (UNITS_PER_WORD)));
7033 regbuf = change_address (regbuf, BLKmode, addr);
7035 else if (STACK_BOUNDARY < 64 && TARGET_FPU_DOUBLE && n_floatregs)
7039 regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
7040 addr = copy_to_mode_reg (Pmode, plus_constant (XEXP (regbuf, 0), 4));
7041 mask = copy_to_mode_reg (Pmode, GEN_INT (-8));
7042 emit_insn (gen_andsi3 (addr, addr, mask));
7043 regbuf = change_address (regbuf, BLKmode, addr);
7046 regbuf = assign_stack_local (BLKmode, bufsize, TARGET_FPU_DOUBLE ? 64 : 0);
7047 alias_set = get_varargs_alias_set ();
7048 set_mem_alias_set (regbuf, alias_set);
7051 This is optimized to only save the regs that are necessary. Explicitly
7052 named args need not be saved. */
7054 move_block_from_reg (BASE_ARG_REG (SImode) + first_intreg,
7055 adjust_address (regbuf, BLKmode,
7056 n_floatregs * UNITS_PER_WORD),
7060 /* Return the address of the regbuf. */
7061 return XEXP (regbuf, 0);
7064 This is optimized to only save the regs that are necessary. Explicitly
7065 named args need not be saved.
7066 We explicitly build a pointer to the buffer because it halves the insn
7067 count when not optimizing (otherwise the pointer is built for each reg
7069 We emit the moves in reverse order so that we can use predecrement. */
7071 fpregs = copy_to_mode_reg (Pmode,
7072 plus_constant (XEXP (regbuf, 0),
7073 n_floatregs * UNITS_PER_WORD));
7074 if (TARGET_SH4 || TARGET_SH2A_DOUBLE)
7077 for (regno = NPARM_REGS (DFmode) - 2; regno >= first_floatreg; regno -= 2)
7079 emit_insn (gen_addsi3 (fpregs, fpregs,
7080 GEN_INT (-2 * UNITS_PER_WORD)));
7081 mem = change_address (regbuf, DFmode, fpregs);
7082 emit_move_insn (mem,
7083 gen_rtx_REG (DFmode, BASE_ARG_REG (DFmode) + regno));
7085 regno = first_floatreg;
7088 emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
7089 mem = change_address (regbuf, SFmode, fpregs);
7090 emit_move_insn (mem,
7091 gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno
7092 - (TARGET_LITTLE_ENDIAN != 0)));
7096 for (regno = NPARM_REGS (SFmode) - 1; regno >= first_floatreg; regno--)
7100 emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
7101 mem = change_address (regbuf, SFmode, fpregs);
7102 emit_move_insn (mem,
7103 gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno));
7106 /* Return the address of the regbuf. */
7107 return XEXP (regbuf, 0);
7110 /* Define the `__builtin_va_list' type for the ABI. */
7113 sh_build_builtin_va_list (void)
7115 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7118 if (TARGET_SH5 || (! TARGET_SH2E && ! TARGET_SH4)
7119 || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
7120 return ptr_type_node;
7122 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
7124 f_next_o = build_decl (FIELD_DECL, get_identifier ("__va_next_o"),
7126 f_next_o_limit = build_decl (FIELD_DECL,
7127 get_identifier ("__va_next_o_limit"),
7129 f_next_fp = build_decl (FIELD_DECL, get_identifier ("__va_next_fp"),
7131 f_next_fp_limit = build_decl (FIELD_DECL,
7132 get_identifier ("__va_next_fp_limit"),
7134 f_next_stack = build_decl (FIELD_DECL, get_identifier ("__va_next_stack"),
7137 DECL_FIELD_CONTEXT (f_next_o) = record;
7138 DECL_FIELD_CONTEXT (f_next_o_limit) = record;
7139 DECL_FIELD_CONTEXT (f_next_fp) = record;
7140 DECL_FIELD_CONTEXT (f_next_fp_limit) = record;
7141 DECL_FIELD_CONTEXT (f_next_stack) = record;
7143 TYPE_FIELDS (record) = f_next_o;
7144 TREE_CHAIN (f_next_o) = f_next_o_limit;
7145 TREE_CHAIN (f_next_o_limit) = f_next_fp;
7146 TREE_CHAIN (f_next_fp) = f_next_fp_limit;
7147 TREE_CHAIN (f_next_fp_limit) = f_next_stack;
7149 layout_type (record);
7154 /* Implement `va_start' for varargs and stdarg. */
7157 sh_va_start (tree valist, rtx nextarg)
7159 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7160 tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
7166 expand_builtin_saveregs ();
7167 std_expand_builtin_va_start (valist, nextarg);
7171 if ((! TARGET_SH2E && ! TARGET_SH4)
7172 || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
7174 std_expand_builtin_va_start (valist, nextarg);
7178 f_next_o = TYPE_FIELDS (va_list_type_node);
7179 f_next_o_limit = TREE_CHAIN (f_next_o);
7180 f_next_fp = TREE_CHAIN (f_next_o_limit);
7181 f_next_fp_limit = TREE_CHAIN (f_next_fp);
7182 f_next_stack = TREE_CHAIN (f_next_fp_limit);
7184 next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
7186 next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
7187 valist, f_next_o_limit, NULL_TREE);
7188 next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp), valist, f_next_fp,
7190 next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
7191 valist, f_next_fp_limit, NULL_TREE);
7192 next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
7193 valist, f_next_stack, NULL_TREE);
7195 /* Call __builtin_saveregs. */
7196 u = make_tree (sizetype, expand_builtin_saveregs ());
7197 u = fold_convert (ptr_type_node, u);
7198 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp, u);
7199 TREE_SIDE_EFFECTS (t) = 1;
7200 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7202 nfp = crtl->args.info.arg_count[SH_ARG_FLOAT];
7207 u = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, u,
7208 size_int (UNITS_PER_WORD * nfp));
7209 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp_limit, u);
7210 TREE_SIDE_EFFECTS (t) = 1;
7211 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7213 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_o, u);
7214 TREE_SIDE_EFFECTS (t) = 1;
7215 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7217 nint = crtl->args.info.arg_count[SH_ARG_INT];
7222 u = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, u,
7223 size_int (UNITS_PER_WORD * nint));
7224 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_o_limit, u);
7225 TREE_SIDE_EFFECTS (t) = 1;
7226 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7228 u = make_tree (ptr_type_node, nextarg);
7229 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_stack, u);
7230 TREE_SIDE_EFFECTS (t) = 1;
7231 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7234 /* TYPE is a RECORD_TYPE. If there is only a single nonzero-sized
7235 member, return it. */
7237 find_sole_member (tree type)
7239 tree field, member = NULL_TREE;
7241 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
7243 if (TREE_CODE (field) != FIELD_DECL)
7245 if (!DECL_SIZE (field))
7247 if (integer_zerop (DECL_SIZE (field)))
7255 /* Implement `va_arg'. */
7258 sh_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
7259 tree *post_p ATTRIBUTE_UNUSED)
7261 HOST_WIDE_INT size, rsize;
7262 tree tmp, pptr_type_node;
7263 tree addr, lab_over = NULL, result = NULL;
7264 int pass_by_ref = targetm.calls.must_pass_in_stack (TYPE_MODE (type), type);
7268 type = build_pointer_type (type);
7270 size = int_size_in_bytes (type);
7271 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
7272 pptr_type_node = build_pointer_type (ptr_type_node);
7274 if (! TARGET_SH5 && (TARGET_SH2E || TARGET_SH4)
7275 && ! (TARGET_HITACHI || sh_cfun_attr_renesas_p ()))
7277 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7278 tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
7283 f_next_o = TYPE_FIELDS (va_list_type_node);
7284 f_next_o_limit = TREE_CHAIN (f_next_o);
7285 f_next_fp = TREE_CHAIN (f_next_o_limit);
7286 f_next_fp_limit = TREE_CHAIN (f_next_fp);
7287 f_next_stack = TREE_CHAIN (f_next_fp_limit);
7289 next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
7291 next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
7292 valist, f_next_o_limit, NULL_TREE);
7293 next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp),
7294 valist, f_next_fp, NULL_TREE);
7295 next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
7296 valist, f_next_fp_limit, NULL_TREE);
7297 next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
7298 valist, f_next_stack, NULL_TREE);
7300 /* Structures with a single member with a distinct mode are passed
7301 like their member. This is relevant if the latter has a REAL_TYPE
7302 or COMPLEX_TYPE type. */
7304 while (TREE_CODE (eff_type) == RECORD_TYPE
7305 && (member = find_sole_member (eff_type))
7306 && (TREE_CODE (TREE_TYPE (member)) == REAL_TYPE
7307 || TREE_CODE (TREE_TYPE (member)) == COMPLEX_TYPE
7308 || TREE_CODE (TREE_TYPE (member)) == RECORD_TYPE))
7310 tree field_type = TREE_TYPE (member);
7312 if (TYPE_MODE (eff_type) == TYPE_MODE (field_type))
7313 eff_type = field_type;
7316 gcc_assert ((TYPE_ALIGN (eff_type)
7317 < GET_MODE_ALIGNMENT (TYPE_MODE (field_type)))
7318 || (TYPE_ALIGN (eff_type)
7319 > GET_MODE_BITSIZE (TYPE_MODE (field_type))));
7324 if (TARGET_SH4 || TARGET_SH2A_DOUBLE)
7326 pass_as_float = ((TREE_CODE (eff_type) == REAL_TYPE && size <= 8)
7327 || (TREE_CODE (eff_type) == COMPLEX_TYPE
7328 && TREE_CODE (TREE_TYPE (eff_type)) == REAL_TYPE
7333 pass_as_float = (TREE_CODE (eff_type) == REAL_TYPE && size == 4);
7336 addr = create_tmp_var (pptr_type_node, NULL);
7337 lab_false = create_artificial_label ();
7338 lab_over = create_artificial_label ();
7340 valist = build1 (INDIRECT_REF, ptr_type_node, addr);
7344 tree next_fp_tmp = create_tmp_var (TREE_TYPE (f_next_fp), NULL);
7346 bool is_double = size == 8 && TREE_CODE (eff_type) == REAL_TYPE;
7348 tmp = build1 (ADDR_EXPR, pptr_type_node, next_fp);
7349 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
7350 gimplify_and_add (tmp, pre_p);
7352 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp_tmp, valist);
7353 gimplify_and_add (tmp, pre_p);
7354 tmp = next_fp_limit;
7355 if (size > 4 && !is_double)
7356 tmp = build2 (POINTER_PLUS_EXPR, TREE_TYPE (tmp), tmp,
7357 size_int (4 - size));
7358 tmp = build2 (GE_EXPR, boolean_type_node, next_fp_tmp, tmp);
7359 cmp = build3 (COND_EXPR, void_type_node, tmp,
7360 build1 (GOTO_EXPR, void_type_node, lab_false),
7363 gimplify_and_add (cmp, pre_p);
7365 if (TYPE_ALIGN (eff_type) > BITS_PER_WORD
7366 || (is_double || size == 16))
7368 tmp = fold_convert (sizetype, next_fp_tmp);
7369 tmp = build2 (BIT_AND_EXPR, sizetype, tmp,
7370 size_int (UNITS_PER_WORD));
7371 tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node,
7373 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node,
7375 gimplify_and_add (tmp, pre_p);
7378 gimplify_and_add (cmp, pre_p);
7380 #ifdef FUNCTION_ARG_SCmode_WART
7381 if (TYPE_MODE (eff_type) == SCmode
7382 && TARGET_SH4 && TARGET_LITTLE_ENDIAN)
7384 tree subtype = TREE_TYPE (eff_type);
7388 = std_gimplify_va_arg_expr (next_fp_tmp, subtype, pre_p, NULL);
7389 imag = get_initialized_tmp_var (imag, pre_p, NULL);
7392 = std_gimplify_va_arg_expr (next_fp_tmp, subtype, pre_p, NULL);
7393 real = get_initialized_tmp_var (real, pre_p, NULL);
7395 result = build2 (COMPLEX_EXPR, type, real, imag);
7396 result = get_initialized_tmp_var (result, pre_p, NULL);
7398 #endif /* FUNCTION_ARG_SCmode_WART */
7400 tmp = build1 (GOTO_EXPR, void_type_node, lab_over);
7401 gimplify_and_add (tmp, pre_p);
7403 tmp = build1 (LABEL_EXPR, void_type_node, lab_false);
7404 gimplify_and_add (tmp, pre_p);
7406 tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
7407 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
7408 gimplify_and_add (tmp, pre_p);
7409 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp_tmp, valist);
7410 gimplify_and_add (tmp, pre_p);
7412 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, valist, next_fp_tmp);
7413 gimplify_and_add (tmp, post_p);
7414 valist = next_fp_tmp;
7418 tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node, next_o,
7420 tmp = build2 (GT_EXPR, boolean_type_node, tmp, next_o_limit);
7421 tmp = build3 (COND_EXPR, void_type_node, tmp,
7422 build1 (GOTO_EXPR, void_type_node, lab_false),
7424 gimplify_and_add (tmp, pre_p);
7426 tmp = build1 (ADDR_EXPR, pptr_type_node, next_o);
7427 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
7428 gimplify_and_add (tmp, pre_p);
7430 tmp = build1 (GOTO_EXPR, void_type_node, lab_over);
7431 gimplify_and_add (tmp, pre_p);
7433 tmp = build1 (LABEL_EXPR, void_type_node, lab_false);
7434 gimplify_and_add (tmp, pre_p);
7436 if (size > 4 && ! (TARGET_SH4 || TARGET_SH2A))
7438 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node,
7439 next_o, next_o_limit);
7440 gimplify_and_add (tmp, pre_p);
7443 tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
7444 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
7445 gimplify_and_add (tmp, pre_p);
7450 tmp = build1 (LABEL_EXPR, void_type_node, lab_over);
7451 gimplify_and_add (tmp, pre_p);
7455 /* ??? In va-sh.h, there had been code to make values larger than
7456 size 8 indirect. This does not match the FUNCTION_ARG macros. */
7458 tmp = std_gimplify_va_arg_expr (valist, type, pre_p, NULL);
7461 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, result, tmp);
7462 gimplify_and_add (tmp, pre_p);
7464 tmp = build1 (LABEL_EXPR, void_type_node, lab_over);
7465 gimplify_and_add (tmp, pre_p);
7471 result = build_va_arg_indirect_ref (result);
7477 sh_promote_prototypes (const_tree type)
7483 return ! sh_attr_renesas_p (type);
7486 /* Whether an argument must be passed by reference. On SHcompact, we
7487 pretend arguments wider than 32-bits that would have been passed in
7488 registers are passed by reference, so that an SHmedia trampoline
7489 loads them into the full 64-bits registers. */
7492 shcompact_byref (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
7493 const_tree type, bool named)
7495 unsigned HOST_WIDE_INT size;
7498 size = int_size_in_bytes (type);
7500 size = GET_MODE_SIZE (mode);
7502 if (cum->arg_count[SH_ARG_INT] < NPARM_REGS (SImode)
7504 || GET_SH_ARG_CLASS (mode) == SH_ARG_INT
7505 || (GET_SH_ARG_CLASS (mode) == SH_ARG_FLOAT
7506 && cum->arg_count[SH_ARG_FLOAT] >= NPARM_REGS (SFmode)))
7508 && !SHCOMPACT_FORCE_ON_STACK (mode, type)
7509 && !SH5_WOULD_BE_PARTIAL_NREGS (*cum, mode, type, named))
7516 sh_pass_by_reference (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7517 const_tree type, bool named)
7519 if (targetm.calls.must_pass_in_stack (mode, type))
7522 /* ??? std_gimplify_va_arg_expr passes NULL for cum. That function
7523 wants to know about pass-by-reference semantics for incoming
7528 if (TARGET_SHCOMPACT)
7530 cum->byref = shcompact_byref (cum, mode, type, named);
7531 return cum->byref != 0;
7538 sh_callee_copies (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7539 const_tree type, bool named ATTRIBUTE_UNUSED)
7541 /* ??? How can it possibly be correct to return true only on the
7542 caller side of the equation? Is there someplace else in the
7543 sh backend that's magically producing the copies? */
7544 return (cum->outgoing
7545 && ((mode == BLKmode ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode))
7546 % SH_MIN_ALIGN_FOR_CALLEE_COPY == 0));
7550 sh_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7551 tree type, bool named ATTRIBUTE_UNUSED)
7556 && PASS_IN_REG_P (*cum, mode, type)
7557 && !(TARGET_SH4 || TARGET_SH2A_DOUBLE)
7558 && (ROUND_REG (*cum, mode)
7560 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
7561 : ROUND_ADVANCE (int_size_in_bytes (type)))
7562 > NPARM_REGS (mode)))
7563 words = NPARM_REGS (mode) - ROUND_REG (*cum, mode);
7565 else if (!TARGET_SHCOMPACT
7566 && SH5_WOULD_BE_PARTIAL_NREGS (*cum, mode, type, named))
7567 words = NPARM_REGS (SImode) - cum->arg_count[SH_ARG_INT];
7569 return words * UNITS_PER_WORD;
7573 /* Define where to put the arguments to a function.
7574 Value is zero to push the argument on the stack,
7575 or a hard register in which to store the argument.
7577 MODE is the argument's machine mode.
7578 TYPE is the data type of the argument (as a tree).
7579 This is null for libcalls where that information may
7581 CUM is a variable of type CUMULATIVE_ARGS which gives info about
7582 the preceding args and about the function being called.
7583 NAMED is nonzero if this argument is a named parameter
7584 (otherwise it is an extra parameter matching an ellipsis).
7586 On SH the first args are normally in registers
7587 and the rest are pushed. Any arg that starts within the first
7588 NPARM_REGS words is at least partially passed in a register unless
7589 its data type forbids. */
7593 sh_function_arg (CUMULATIVE_ARGS *ca, enum machine_mode mode,
7594 tree type, int named)
7596 if (! TARGET_SH5 && mode == VOIDmode)
7597 return GEN_INT (ca->renesas_abi ? 1 : 0);
7600 && PASS_IN_REG_P (*ca, mode, type)
7601 && (named || ! (TARGET_HITACHI || ca->renesas_abi)))
7605 if (mode == SCmode && TARGET_SH4 && TARGET_LITTLE_ENDIAN
7606 && (! FUNCTION_ARG_SCmode_WART || (ROUND_REG (*ca, mode) & 1)))
7608 rtx r1 = gen_rtx_EXPR_LIST (VOIDmode,
7609 gen_rtx_REG (SFmode,
7611 + (ROUND_REG (*ca, mode) ^ 1)),
7613 rtx r2 = gen_rtx_EXPR_LIST (VOIDmode,
7614 gen_rtx_REG (SFmode,
7616 + ((ROUND_REG (*ca, mode) + 1) ^ 1)),
7618 return gen_rtx_PARALLEL(SCmode, gen_rtvec(2, r1, r2));
7621 /* If the alignment of a DF value causes an SF register to be
7622 skipped, we will use that skipped register for the next SF
7624 if ((TARGET_HITACHI || ca->renesas_abi)
7625 && ca->free_single_fp_reg
7627 return gen_rtx_REG (mode, ca->free_single_fp_reg);
7629 regno = (BASE_ARG_REG (mode) + ROUND_REG (*ca, mode))
7630 ^ (mode == SFmode && TARGET_SH4
7631 && TARGET_LITTLE_ENDIAN != 0
7632 && ! TARGET_HITACHI && ! ca->renesas_abi);
7633 return gen_rtx_REG (mode, regno);
7639 if (mode == VOIDmode && TARGET_SHCOMPACT)
7640 return GEN_INT (ca->call_cookie);
7642 /* The following test assumes unnamed arguments are promoted to
7644 if (mode == SFmode && ca->free_single_fp_reg)
7645 return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode, ca->free_single_fp_reg);
7647 if ((GET_SH_ARG_CLASS (mode) == SH_ARG_FLOAT)
7648 && (named || ! ca->prototype_p)
7649 && ca->arg_count[(int) SH_ARG_FLOAT] < NPARM_REGS (SFmode))
7651 if (! ca->prototype_p && TARGET_SHMEDIA)
7652 return SH5_PROTOTYPELESS_FLOAT_ARG (*ca, mode);
7654 return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode,
7656 + ca->arg_count[(int) SH_ARG_FLOAT]);
7659 if (ca->arg_count[(int) SH_ARG_INT] < NPARM_REGS (SImode)
7660 && (! TARGET_SHCOMPACT
7661 || (! SHCOMPACT_FORCE_ON_STACK (mode, type)
7662 && ! SH5_WOULD_BE_PARTIAL_NREGS (*ca, mode,
7665 return gen_rtx_REG (mode, (FIRST_PARM_REG
7666 + ca->arg_count[(int) SH_ARG_INT]));
7675 /* Update the data in CUM to advance over an argument
7676 of mode MODE and data type TYPE.
7677 (TYPE is null for libcalls where that information may not be
7681 sh_function_arg_advance (CUMULATIVE_ARGS *ca, enum machine_mode mode,
7682 tree type, int named)
7686 else if (TARGET_SH5)
7688 tree type2 = (ca->byref && type
7691 enum machine_mode mode2 = (ca->byref && type
7694 int dwords = ((ca->byref
7697 ? int_size_in_bytes (type2)
7698 : GET_MODE_SIZE (mode2)) + 7) / 8;
7699 int numregs = MIN (dwords, NPARM_REGS (SImode)
7700 - ca->arg_count[(int) SH_ARG_INT]);
7704 ca->arg_count[(int) SH_ARG_INT] += numregs;
7705 if (TARGET_SHCOMPACT
7706 && SHCOMPACT_FORCE_ON_STACK (mode2, type2))
7709 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
7711 /* N.B. We want this also for outgoing. */
7712 ca->stack_regs += numregs;
7717 ca->stack_regs += numregs;
7718 ca->byref_regs += numregs;
7722 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
7726 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
7729 else if (dwords > numregs)
7731 int pushregs = numregs;
7733 if (TARGET_SHCOMPACT)
7734 ca->stack_regs += numregs;
7735 while (pushregs < NPARM_REGS (SImode) - 1
7736 && (CALL_COOKIE_INT_REG_GET
7738 NPARM_REGS (SImode) - pushregs)
7742 &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
7746 if (numregs == NPARM_REGS (SImode))
7748 |= CALL_COOKIE_INT_REG (0, 1)
7749 | CALL_COOKIE_STACKSEQ (numregs - 1);
7752 |= CALL_COOKIE_STACKSEQ (numregs);
7755 if (GET_SH_ARG_CLASS (mode2) == SH_ARG_FLOAT
7756 && (named || ! ca->prototype_p))
7758 if (mode2 == SFmode && ca->free_single_fp_reg)
7759 ca->free_single_fp_reg = 0;
7760 else if (ca->arg_count[(int) SH_ARG_FLOAT]
7761 < NPARM_REGS (SFmode))
7764 = MIN ((GET_MODE_SIZE (mode2) + 7) / 8 * 2,
7766 - ca->arg_count[(int) SH_ARG_FLOAT]);
7768 ca->arg_count[(int) SH_ARG_FLOAT] += numfpregs;
7770 if (TARGET_SHCOMPACT && ! ca->prototype_p)
7772 if (ca->outgoing && numregs > 0)
7776 |= (CALL_COOKIE_INT_REG
7777 (ca->arg_count[(int) SH_ARG_INT]
7778 - numregs + ((numfpregs - 2) / 2),
7779 4 + (ca->arg_count[(int) SH_ARG_FLOAT]
7782 while (numfpregs -= 2);
7784 else if (mode2 == SFmode && (named)
7785 && (ca->arg_count[(int) SH_ARG_FLOAT]
7786 < NPARM_REGS (SFmode)))
7787 ca->free_single_fp_reg
7788 = FIRST_FP_PARM_REG - numfpregs
7789 + ca->arg_count[(int) SH_ARG_FLOAT] + 1;
7795 if ((TARGET_HITACHI || ca->renesas_abi) && TARGET_FPU_DOUBLE)
7797 /* Note that we've used the skipped register. */
7798 if (mode == SFmode && ca->free_single_fp_reg)
7800 ca->free_single_fp_reg = 0;
7803 /* When we have a DF after an SF, there's an SF register that get
7804 skipped in order to align the DF value. We note this skipped
7805 register, because the next SF value will use it, and not the
7806 SF that follows the DF. */
7808 && ROUND_REG (*ca, DFmode) != ROUND_REG (*ca, SFmode))
7810 ca->free_single_fp_reg = (ROUND_REG (*ca, SFmode)
7811 + BASE_ARG_REG (mode));
7815 if (! ((TARGET_SH4 || TARGET_SH2A) || ca->renesas_abi)
7816 || PASS_IN_REG_P (*ca, mode, type))
7817 (ca->arg_count[(int) GET_SH_ARG_CLASS (mode)]
7818 = (ROUND_REG (*ca, mode)
7820 ? ROUND_ADVANCE (int_size_in_bytes (type))
7821 : ROUND_ADVANCE (GET_MODE_SIZE (mode)))));
7824 /* The Renesas calling convention doesn't quite fit into this scheme since
7825 the address is passed like an invisible argument, but one that is always
7826 passed in memory. */
7828 sh_struct_value_rtx (tree fndecl, int incoming ATTRIBUTE_UNUSED)
7830 if (TARGET_HITACHI || sh_attr_renesas_p (fndecl))
7832 return gen_rtx_REG (Pmode, 2);
7835 /* Worker function for TARGET_RETURN_IN_MEMORY. */
7838 sh_return_in_memory (const_tree type, const_tree fndecl)
7842 if (TYPE_MODE (type) == BLKmode)
7843 return ((unsigned HOST_WIDE_INT) int_size_in_bytes (type)) > 8;
7845 return GET_MODE_SIZE (TYPE_MODE (type)) > 8;
7849 return (TYPE_MODE (type) == BLKmode
7850 || ((TARGET_HITACHI || sh_attr_renesas_p (fndecl))
7851 && TREE_CODE (type) == RECORD_TYPE));
7855 /* We actually emit the code in sh_expand_prologue. We used to use
7856 a static variable to flag that we need to emit this code, but that
7857 doesn't when inlining, when functions are deferred and then emitted
7858 later. Fortunately, we already have two flags that are part of struct
7859 function that tell if a function uses varargs or stdarg. */
7861 sh_setup_incoming_varargs (CUMULATIVE_ARGS *ca,
7862 enum machine_mode mode,
7864 int *pretend_arg_size,
7865 int second_time ATTRIBUTE_UNUSED)
7867 gcc_assert (cfun->stdarg);
7868 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
7870 int named_parm_regs, anon_parm_regs;
7872 named_parm_regs = (ROUND_REG (*ca, mode)
7874 ? ROUND_ADVANCE (int_size_in_bytes (type))
7875 : ROUND_ADVANCE (GET_MODE_SIZE (mode))));
7876 anon_parm_regs = NPARM_REGS (SImode) - named_parm_regs;
7877 if (anon_parm_regs > 0)
7878 *pretend_arg_size = anon_parm_regs * 4;
7883 sh_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
7889 sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *ca)
7891 return ! (TARGET_HITACHI || ca->renesas_abi) && ! TARGET_SH5;
7895 /* Define the offset between two registers, one to be eliminated, and
7896 the other its replacement, at the start of a routine. */
7899 initial_elimination_offset (int from, int to)
7902 int regs_saved_rounding = 0;
7903 int total_saved_regs_space;
7904 int total_auto_space;
7905 int save_flags = target_flags;
7907 HARD_REG_SET live_regs_mask;
7909 shmedia_space_reserved_for_target_registers = false;
7910 regs_saved = calc_live_regs (&live_regs_mask);
7911 regs_saved += SHMEDIA_REGS_STACK_ADJUST ();
7913 if (shmedia_reserve_space_for_target_registers_p (regs_saved, &live_regs_mask))
7915 shmedia_space_reserved_for_target_registers = true;
7916 regs_saved += shmedia_target_regs_stack_adjust (&live_regs_mask);
7919 if (TARGET_SH5 && regs_saved % (STACK_BOUNDARY / BITS_PER_UNIT))
7920 regs_saved_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
7921 - regs_saved % (STACK_BOUNDARY / BITS_PER_UNIT));
7923 total_auto_space = rounded_frame_size (regs_saved) - regs_saved_rounding;
7924 copy_flags = target_flags;
7925 target_flags = save_flags;
7927 total_saved_regs_space = regs_saved + regs_saved_rounding;
7929 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7930 return total_saved_regs_space + total_auto_space
7931 + crtl->args.info.byref_regs * 8;
7933 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
7934 return total_saved_regs_space + total_auto_space
7935 + crtl->args.info.byref_regs * 8;
7937 /* Initial gap between fp and sp is 0. */
7938 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
7941 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
7942 return rounded_frame_size (0);
7944 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7945 return rounded_frame_size (0);
7947 gcc_assert (from == RETURN_ADDRESS_POINTER_REGNUM
7948 && (to == HARD_FRAME_POINTER_REGNUM
7949 || to == STACK_POINTER_REGNUM));
7952 int n = total_saved_regs_space;
7953 int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
7954 save_schedule schedule;
7957 n += total_auto_space;
7959 /* If it wasn't saved, there's not much we can do. */
7960 if (! TEST_HARD_REG_BIT (live_regs_mask, pr_reg))
7963 target_flags = copy_flags;
7965 sh5_schedule_saves (&live_regs_mask, &schedule, n);
7966 for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
7967 if (entry->reg == pr_reg)
7969 target_flags = save_flags;
7970 return entry->offset;
7975 return total_auto_space;
7978 /* Insert any deferred function attributes from earlier pragmas. */
7980 sh_insert_attributes (tree node, tree *attributes)
7984 if (TREE_CODE (node) != FUNCTION_DECL)
7987 /* We are only interested in fields. */
7991 /* Append the attributes to the deferred attributes. */
7992 *sh_deferred_function_attributes_tail = *attributes;
7993 attrs = sh_deferred_function_attributes;
7997 /* Some attributes imply or require the interrupt attribute. */
7998 if (!lookup_attribute ("interrupt_handler", attrs)
7999 && !lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (node)))
8001 /* If we have a trapa_handler, but no interrupt_handler attribute,
8002 insert an interrupt_handler attribute. */
8003 if (lookup_attribute ("trapa_handler", attrs) != NULL_TREE)
8004 /* We can't use sh_pr_interrupt here because that's not in the
8007 = tree_cons (get_identifier("interrupt_handler"), NULL_TREE, attrs);
8008 /* However, for sp_switch, trap_exit, nosave_low_regs and resbank,
8009 if the interrupt attribute is missing, we ignore the attribute
8011 else if (lookup_attribute ("sp_switch", attrs)
8012 || lookup_attribute ("trap_exit", attrs)
8013 || lookup_attribute ("nosave_low_regs", attrs)
8014 || lookup_attribute ("resbank", attrs))
8018 for (tail = attributes; attrs; attrs = TREE_CHAIN (attrs))
8020 if (is_attribute_p ("sp_switch", TREE_PURPOSE (attrs))
8021 || is_attribute_p ("trap_exit", TREE_PURPOSE (attrs))
8022 || is_attribute_p ("nosave_low_regs", TREE_PURPOSE (attrs))
8023 || is_attribute_p ("resbank", TREE_PURPOSE (attrs)))
8024 warning (OPT_Wattributes,
8025 "%qs attribute only applies to interrupt functions",
8026 IDENTIFIER_POINTER (TREE_PURPOSE (attrs)));
8029 *tail = tree_cons (TREE_PURPOSE (attrs), NULL_TREE,
8031 tail = &TREE_CHAIN (*tail);
8034 attrs = *attributes;
8038 /* Install the processed list. */
8039 *attributes = attrs;
8041 /* Clear deferred attributes. */
8042 sh_deferred_function_attributes = NULL_TREE;
8043 sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
8048 /* Supported attributes:
8050 interrupt_handler -- specifies this function is an interrupt handler.
8052 trapa_handler - like above, but don't save all registers.
8054 sp_switch -- specifies an alternate stack for an interrupt handler
8057 trap_exit -- use a trapa to exit an interrupt function instead of
8060 nosave_low_regs - don't save r0..r7 in an interrupt handler.
8061 This is useful on the SH3 and upwards,
8062 which has a separate set of low regs for User and Supervisor modes.
8063 This should only be used for the lowest level of interrupts. Higher levels
8064 of interrupts must save the registers in case they themselves are
8067 renesas -- use Renesas calling/layout conventions (functions and
8070 resbank -- In case of an ISR, use a register bank to save registers
8071 R0-R14, MACH, MACL, GBR and PR. This is useful only on SH2A targets.
8074 const struct attribute_spec sh_attribute_table[] =
8076 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
8077 { "interrupt_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
8078 { "sp_switch", 1, 1, true, false, false, sh_handle_sp_switch_attribute },
8079 { "trap_exit", 1, 1, true, false, false, sh_handle_trap_exit_attribute },
8080 { "renesas", 0, 0, false, true, false, sh_handle_renesas_attribute },
8081 { "trapa_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
8082 { "nosave_low_regs", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
8083 { "resbank", 0, 0, true, false, false, sh_handle_resbank_handler_attribute },
8084 { "function_vector", 1, 1, true, false, false, sh2a_handle_function_vector_handler_attribute },
8086 /* Symbian support adds three new attributes:
8087 dllexport - for exporting a function/variable that will live in a dll
8088 dllimport - for importing a function/variable from a dll
8090 Microsoft allows multiple declspecs in one __declspec, separating
8091 them with spaces. We do NOT support this. Instead, use __declspec
8093 { "dllimport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
8094 { "dllexport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
8096 { NULL, 0, 0, false, false, false, NULL }
8099 /* Handle a 'resbank' attribute. */
8101 sh_handle_resbank_handler_attribute (tree * node, tree name,
8102 tree args ATTRIBUTE_UNUSED,
8103 int flags ATTRIBUTE_UNUSED,
8104 bool * no_add_attrs)
8108 warning (OPT_Wattributes, "%qs attribute is supported only for SH2A",
8109 IDENTIFIER_POINTER (name));
8110 *no_add_attrs = true;
8112 if (TREE_CODE (*node) != FUNCTION_DECL)
8114 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8115 IDENTIFIER_POINTER (name));
8116 *no_add_attrs = true;
8122 /* Handle an "interrupt_handler" attribute; arguments as in
8123 struct attribute_spec.handler. */
8125 sh_handle_interrupt_handler_attribute (tree *node, tree name,
8126 tree args ATTRIBUTE_UNUSED,
8127 int flags ATTRIBUTE_UNUSED,
8130 if (TREE_CODE (*node) != FUNCTION_DECL)
8132 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8133 IDENTIFIER_POINTER (name));
8134 *no_add_attrs = true;
8136 else if (TARGET_SHCOMPACT)
8138 error ("attribute interrupt_handler is not compatible with -m5-compact");
8139 *no_add_attrs = true;
8145 /* Handle an 'function_vector' attribute; arguments as in
8146 struct attribute_spec.handler. */
8148 sh2a_handle_function_vector_handler_attribute (tree * node, tree name,
8149 tree args ATTRIBUTE_UNUSED,
8150 int flags ATTRIBUTE_UNUSED,
8151 bool * no_add_attrs)
8155 warning (OPT_Wattributes, "%qs attribute only applies to SH2A",
8156 IDENTIFIER_POINTER (name));
8157 *no_add_attrs = true;
8159 else if (TREE_CODE (*node) != FUNCTION_DECL)
8161 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8162 IDENTIFIER_POINTER (name));
8163 *no_add_attrs = true;
8165 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
8167 /* The argument must be a constant integer. */
8168 warning (OPT_Wattributes,
8169 "`%s' attribute argument not an integer constant",
8170 IDENTIFIER_POINTER (name));
8171 *no_add_attrs = true;
8173 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
8175 /* The argument value must be between 0 to 255. */
8176 warning (OPT_Wattributes,
8177 "`%s' attribute argument should be between 0 to 255",
8178 IDENTIFIER_POINTER (name));
8179 *no_add_attrs = true;
8184 /* Returns 1 if current function has been assigned the attribute
8185 'function_vector'. */
8187 sh2a_is_function_vector_call (rtx x)
8189 if (GET_CODE (x) == SYMBOL_REF
8190 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
8192 tree tr = SYMBOL_REF_DECL (x);
8194 if (sh2a_function_vector_p (tr))
8201 /* Returns the function vector number, if the the attribute
8202 'function_vector' is assigned, otherwise returns zero. */
8204 sh2a_get_function_vector_number (rtx x)
8209 if ((GET_CODE (x) == SYMBOL_REF)
8210 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
8212 t = SYMBOL_REF_DECL (x);
8214 if (TREE_CODE (t) != FUNCTION_DECL)
8217 list = SH_ATTRIBUTES (t);
8220 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
8222 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
8226 list = TREE_CHAIN (list);
8235 /* Handle an "sp_switch" attribute; arguments as in
8236 struct attribute_spec.handler. */
8238 sh_handle_sp_switch_attribute (tree *node, tree name, tree args,
8239 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
8241 if (TREE_CODE (*node) != FUNCTION_DECL)
8243 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8244 IDENTIFIER_POINTER (name));
8245 *no_add_attrs = true;
8247 else if (TREE_CODE (TREE_VALUE (args)) != STRING_CST)
8249 /* The argument must be a constant string. */
8250 warning (OPT_Wattributes, "%qs attribute argument not a string constant",
8251 IDENTIFIER_POINTER (name));
8252 *no_add_attrs = true;
8258 /* Handle an "trap_exit" attribute; arguments as in
8259 struct attribute_spec.handler. */
8261 sh_handle_trap_exit_attribute (tree *node, tree name, tree args,
8262 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
8264 if (TREE_CODE (*node) != FUNCTION_DECL)
8266 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8267 IDENTIFIER_POINTER (name));
8268 *no_add_attrs = true;
8270 /* The argument specifies a trap number to be used in a trapa instruction
8271 at function exit (instead of an rte instruction). */
8272 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
8274 /* The argument must be a constant integer. */
8275 warning (OPT_Wattributes, "%qs attribute argument not an "
8276 "integer constant", IDENTIFIER_POINTER (name));
8277 *no_add_attrs = true;
8284 sh_handle_renesas_attribute (tree *node ATTRIBUTE_UNUSED,
8285 tree name ATTRIBUTE_UNUSED,
8286 tree args ATTRIBUTE_UNUSED,
8287 int flags ATTRIBUTE_UNUSED,
8288 bool *no_add_attrs ATTRIBUTE_UNUSED)
8293 /* True if __attribute__((renesas)) or -mrenesas. */
8295 sh_attr_renesas_p (const_tree td)
8302 td = TREE_TYPE (td);
8303 if (td == error_mark_node)
8305 return (lookup_attribute ("renesas", TYPE_ATTRIBUTES (td))
8309 /* True if __attribute__((renesas)) or -mrenesas, for the current
8312 sh_cfun_attr_renesas_p (void)
8314 return sh_attr_renesas_p (current_function_decl);
8318 sh_cfun_interrupt_handler_p (void)
8320 return (lookup_attribute ("interrupt_handler",
8321 DECL_ATTRIBUTES (current_function_decl))
8325 /* Returns 1 if FUNC has been assigned the attribute
8326 "function_vector". */
8328 sh2a_function_vector_p (tree func)
8331 if (TREE_CODE (func) != FUNCTION_DECL)
8334 list = SH_ATTRIBUTES (func);
8337 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
8340 list = TREE_CHAIN (list);
8345 /* Returns TRUE if given tree has the "resbank" attribute. */
8348 sh_cfun_resbank_handler_p (void)
8350 return ((lookup_attribute ("resbank",
8351 DECL_ATTRIBUTES (current_function_decl))
8353 && (lookup_attribute ("interrupt_handler",
8354 DECL_ATTRIBUTES (current_function_decl))
8355 != NULL_TREE) && TARGET_SH2A);
8358 /* Implement TARGET_CHECK_PCH_TARGET_FLAGS. */
8361 sh_check_pch_target_flags (int old_flags)
8363 if ((old_flags ^ target_flags) & (MASK_SH1 | MASK_SH2 | MASK_SH3
8364 | MASK_SH_E | MASK_HARD_SH4
8365 | MASK_FPU_SINGLE | MASK_SH4))
8366 return _("created and used with different architectures / ABIs");
8367 if ((old_flags ^ target_flags) & MASK_HITACHI)
8368 return _("created and used with different ABIs");
8369 if ((old_flags ^ target_flags) & MASK_LITTLE_ENDIAN)
8370 return _("created and used with different endianness");
8374 /* Predicates used by the templates. */
8376 /* Returns 1 if OP is MACL, MACH or PR. The input must be a REG rtx.
8377 Used only in general_movsrc_operand. */
8380 system_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8392 /* Nonzero if OP is a floating point value with value 0.0. */
8395 fp_zero_operand (rtx op)
8399 if (GET_MODE (op) != SFmode)
8402 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
8403 return REAL_VALUES_EQUAL (r, dconst0) && ! REAL_VALUE_MINUS_ZERO (r);
8406 /* Nonzero if OP is a floating point value with value 1.0. */
8409 fp_one_operand (rtx op)
8413 if (GET_MODE (op) != SFmode)
8416 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
8417 return REAL_VALUES_EQUAL (r, dconst1);
8420 /* For -m4 and -m4-single-only, mode switching is used. If we are
8421 compiling without -mfmovd, movsf_ie isn't taken into account for
8422 mode switching. We could check in machine_dependent_reorg for
8423 cases where we know we are in single precision mode, but there is
8424 interface to find that out during reload, so we must avoid
8425 choosing an fldi alternative during reload and thus failing to
8426 allocate a scratch register for the constant loading. */
8430 return ! TARGET_SH4 || TARGET_FMOVD || reload_completed;
8434 tertiary_reload_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8436 enum rtx_code code = GET_CODE (op);
8437 return code == MEM || (TARGET_SH4 && code == CONST_DOUBLE);
8440 /* Return the TLS type for TLS symbols, 0 for otherwise. */
8442 tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8444 if (GET_CODE (op) != SYMBOL_REF)
8446 return SYMBOL_REF_TLS_MODEL (op);
8449 /* Return the destination address of a branch. */
8452 branch_dest (rtx branch)
8454 rtx dest = SET_SRC (PATTERN (branch));
8457 if (GET_CODE (dest) == IF_THEN_ELSE)
8458 dest = XEXP (dest, 1);
8459 dest = XEXP (dest, 0);
8460 dest_uid = INSN_UID (dest);
8461 return INSN_ADDRESSES (dest_uid);
8464 /* Return nonzero if REG is not used after INSN.
8465 We assume REG is a reload reg, and therefore does
8466 not live past labels. It may live past calls or jumps though. */
8468 reg_unused_after (rtx reg, rtx insn)
8473 /* If the reg is set by this instruction, then it is safe for our
8474 case. Disregard the case where this is a store to memory, since
8475 we are checking a register used in the store address. */
8476 set = single_set (insn);
8477 if (set && GET_CODE (SET_DEST (set)) != MEM
8478 && reg_overlap_mentioned_p (reg, SET_DEST (set)))
8481 while ((insn = NEXT_INSN (insn)))
8487 code = GET_CODE (insn);
8490 /* If this is a label that existed before reload, then the register
8491 if dead here. However, if this is a label added by reorg, then
8492 the register may still be live here. We can't tell the difference,
8493 so we just ignore labels completely. */
8494 if (code == CODE_LABEL)
8499 if (code == JUMP_INSN)
8502 /* If this is a sequence, we must handle them all at once.
8503 We could have for instance a call that sets the target register,
8504 and an insn in a delay slot that uses the register. In this case,
8505 we must return 0. */
8506 else if (code == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
8511 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8513 rtx this_insn = XVECEXP (PATTERN (insn), 0, i);
8514 rtx set = single_set (this_insn);
8516 if (GET_CODE (this_insn) == CALL_INSN)
8518 else if (GET_CODE (this_insn) == JUMP_INSN)
8520 if (INSN_ANNULLED_BRANCH_P (this_insn))
8525 if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
8527 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
8529 if (GET_CODE (SET_DEST (set)) != MEM)
8535 && reg_overlap_mentioned_p (reg, PATTERN (this_insn)))
8540 else if (code == JUMP_INSN)
8544 set = single_set (insn);
8545 if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
8547 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
8548 return GET_CODE (SET_DEST (set)) != MEM;
8549 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
8552 if (code == CALL_INSN && call_really_used_regs[REGNO (reg)])
8560 static GTY(()) rtx fpscr_rtx;
8562 get_fpscr_rtx (void)
8566 fpscr_rtx = gen_rtx_REG (PSImode, FPSCR_REG);
8567 REG_USERVAR_P (fpscr_rtx) = 1;
8568 mark_user_reg (fpscr_rtx);
8570 if (! reload_completed || mdep_reorg_phase != SH_AFTER_MDEP_REORG)
8571 mark_user_reg (fpscr_rtx);
8575 static GTY(()) tree fpscr_values;
8578 emit_fpu_switch (rtx scratch, int index)
8582 if (fpscr_values == NULL)
8586 t = build_index_type (integer_one_node);
8587 t = build_array_type (integer_type_node, t);
8588 t = build_decl (VAR_DECL, get_identifier ("__fpscr_values"), t);
8589 DECL_ARTIFICIAL (t) = 1;
8590 DECL_IGNORED_P (t) = 1;
8591 DECL_EXTERNAL (t) = 1;
8592 TREE_STATIC (t) = 1;
8593 TREE_PUBLIC (t) = 1;
8599 src = DECL_RTL (fpscr_values);
8600 if (!can_create_pseudo_p ())
8602 emit_move_insn (scratch, XEXP (src, 0));
8604 emit_insn (gen_addsi3 (scratch, scratch, GEN_INT (index * 4)));
8605 src = adjust_automodify_address (src, PSImode, scratch, index * 4);
8608 src = adjust_address (src, PSImode, index * 4);
8610 dst = get_fpscr_rtx ();
8611 emit_move_insn (dst, src);
8615 emit_sf_insn (rtx pat)
8621 emit_df_insn (rtx pat)
8627 expand_sf_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
8629 emit_sf_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
8633 expand_sf_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
8635 emit_sf_insn ((*fun) (operands[0], operands[1], operands[2],
8640 expand_df_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
8642 emit_df_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
8646 expand_df_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
8648 emit_df_insn ((*fun) (operands[0], operands[1], operands[2],
8652 static rtx get_free_reg (HARD_REG_SET);
8654 /* This function returns a register to use to load the address to load
8655 the fpscr from. Currently it always returns r1 or r7, but when we are
8656 able to use pseudo registers after combine, or have a better mechanism
8657 for choosing a register, it should be done here. */
8658 /* REGS_LIVE is the liveness information for the point for which we
8659 need this allocation. In some bare-bones exit blocks, r1 is live at the
8660 start. We can even have all of r0..r3 being live:
8661 __complex__ long long f (double d) { if (d == 0) return 2; else return 3; }
8662 INSN before which new insns are placed with will clobber the register
8663 we return. If a basic block consists only of setting the return value
8664 register to a pseudo and using that register, the return value is not
8665 live before or after this block, yet we we'll insert our insns right in
8669 get_free_reg (HARD_REG_SET regs_live)
8671 if (! TEST_HARD_REG_BIT (regs_live, 1))
8672 return gen_rtx_REG (Pmode, 1);
8674 /* Hard reg 1 is live; since this is a SMALL_REGISTER_CLASSES target,
8675 there shouldn't be anything but a jump before the function end. */
8676 gcc_assert (!TEST_HARD_REG_BIT (regs_live, 7));
8677 return gen_rtx_REG (Pmode, 7);
8680 /* This function will set the fpscr from memory.
8681 MODE is the mode we are setting it to. */
8683 fpscr_set_from_mem (int mode, HARD_REG_SET regs_live)
8685 enum attr_fp_mode fp_mode = mode;
8686 enum attr_fp_mode norm_mode = ACTUAL_NORMAL_MODE (FP_MODE);
8689 addr_reg = !can_create_pseudo_p () ? get_free_reg (regs_live) : NULL_RTX;
8690 emit_fpu_switch (addr_reg, fp_mode == norm_mode);
8693 /* Is the given character a logical line separator for the assembler? */
8694 #ifndef IS_ASM_LOGICAL_LINE_SEPARATOR
8695 #define IS_ASM_LOGICAL_LINE_SEPARATOR(C, STR) ((C) == ';')
8699 sh_insn_length_adjustment (rtx insn)
8701 /* Instructions with unfilled delay slots take up an extra two bytes for
8702 the nop in the delay slot. */
8703 if (((GET_CODE (insn) == INSN
8704 && GET_CODE (PATTERN (insn)) != USE
8705 && GET_CODE (PATTERN (insn)) != CLOBBER)
8706 || GET_CODE (insn) == CALL_INSN
8707 || (GET_CODE (insn) == JUMP_INSN
8708 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
8709 && GET_CODE (PATTERN (insn)) != ADDR_VEC))
8710 && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE
8711 && get_attr_needs_delay_slot (insn) == NEEDS_DELAY_SLOT_YES)
8714 /* SH2e has a bug that prevents the use of annulled branches, so if
8715 the delay slot is not filled, we'll have to put a NOP in it. */
8716 if (sh_cpu == CPU_SH2E
8717 && GET_CODE (insn) == JUMP_INSN
8718 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
8719 && GET_CODE (PATTERN (insn)) != ADDR_VEC
8720 && get_attr_type (insn) == TYPE_CBRANCH
8721 && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE)
8724 /* sh-dsp parallel processing insn take four bytes instead of two. */
8726 if (GET_CODE (insn) == INSN)
8729 rtx body = PATTERN (insn);
8730 const char *template;
8732 int maybe_label = 1;
8734 if (GET_CODE (body) == ASM_INPUT)
8735 template = XSTR (body, 0);
8736 else if (asm_noperands (body) >= 0)
8738 = decode_asm_operands (body, NULL, NULL, NULL, NULL, NULL);
8747 while (c == ' ' || c == '\t');
8748 /* all sh-dsp parallel-processing insns start with p.
8749 The only non-ppi sh insn starting with p is pref.
8750 The only ppi starting with pr is prnd. */
8751 if ((c == 'p' || c == 'P') && strncasecmp ("re", template, 2))
8753 /* The repeat pseudo-insn expands two three insns, a total of
8754 six bytes in size. */
8755 else if ((c == 'r' || c == 'R')
8756 && ! strncasecmp ("epeat", template, 5))
8758 while (c && c != '\n'
8759 && ! IS_ASM_LOGICAL_LINE_SEPARATOR (c, template))
8761 /* If this is a label, it is obviously not a ppi insn. */
8762 if (c == ':' && maybe_label)
8767 else if (c == '\'' || c == '"')
8772 maybe_label = c != ':';
8780 /* Return TRUE if X references a SYMBOL_REF or LABEL_REF whose symbol
8781 isn't protected by a PIC unspec. */
8783 nonpic_symbol_mentioned_p (rtx x)
8785 register const char *fmt;
8788 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF
8789 || GET_CODE (x) == PC)
8792 /* We don't want to look into the possible MEM location of a
8793 CONST_DOUBLE, since we're not going to use it, in general. */
8794 if (GET_CODE (x) == CONST_DOUBLE)
8797 if (GET_CODE (x) == UNSPEC
8798 && (XINT (x, 1) == UNSPEC_PIC
8799 || XINT (x, 1) == UNSPEC_GOT
8800 || XINT (x, 1) == UNSPEC_GOTOFF
8801 || XINT (x, 1) == UNSPEC_GOTPLT
8802 || XINT (x, 1) == UNSPEC_GOTTPOFF
8803 || XINT (x, 1) == UNSPEC_DTPOFF
8804 || XINT (x, 1) == UNSPEC_PLT))
8807 fmt = GET_RTX_FORMAT (GET_CODE (x));
8808 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8814 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8815 if (nonpic_symbol_mentioned_p (XVECEXP (x, i, j)))
8818 else if (fmt[i] == 'e' && nonpic_symbol_mentioned_p (XEXP (x, i)))
8825 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
8826 @GOTOFF in `reg'. */
8828 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
8831 if (tls_symbolic_operand (orig, Pmode))
8834 if (GET_CODE (orig) == LABEL_REF
8835 || (GET_CODE (orig) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (orig)))
8838 reg = gen_reg_rtx (Pmode);
8840 emit_insn (gen_symGOTOFF2reg (reg, orig));
8843 else if (GET_CODE (orig) == SYMBOL_REF)
8846 reg = gen_reg_rtx (Pmode);
8848 emit_insn (gen_symGOT2reg (reg, orig));
8854 /* Mark the use of a constant in the literal table. If the constant
8855 has multiple labels, make it unique. */
8857 mark_constant_pool_use (rtx x)
8859 rtx insn, lab, pattern;
8864 switch (GET_CODE (x))
8874 /* Get the first label in the list of labels for the same constant
8875 and delete another labels in the list. */
8877 for (insn = PREV_INSN (x); insn; insn = PREV_INSN (insn))
8879 if (GET_CODE (insn) != CODE_LABEL
8880 || LABEL_REFS (insn) != NEXT_INSN (insn))
8885 for (insn = LABEL_REFS (lab); insn; insn = LABEL_REFS (insn))
8886 INSN_DELETED_P (insn) = 1;
8888 /* Mark constants in a window. */
8889 for (insn = NEXT_INSN (x); insn; insn = NEXT_INSN (insn))
8891 if (GET_CODE (insn) != INSN)
8894 pattern = PATTERN (insn);
8895 if (GET_CODE (pattern) != UNSPEC_VOLATILE)
8898 switch (XINT (pattern, 1))
8900 case UNSPECV_CONST2:
8901 case UNSPECV_CONST4:
8902 case UNSPECV_CONST8:
8903 XVECEXP (pattern, 0, 1) = const1_rtx;
8905 case UNSPECV_WINDOW_END:
8906 if (XVECEXP (pattern, 0, 0) == x)
8909 case UNSPECV_CONST_END:
8919 /* Return true if it's possible to redirect BRANCH1 to the destination
8920 of an unconditional jump BRANCH2. We only want to do this if the
8921 resulting branch will have a short displacement. */
8923 sh_can_redirect_branch (rtx branch1, rtx branch2)
8925 if (flag_expensive_optimizations && simplejump_p (branch2))
8927 rtx dest = XEXP (SET_SRC (single_set (branch2)), 0);
8931 for (distance = 0, insn = NEXT_INSN (branch1);
8932 insn && distance < 256;
8933 insn = PREV_INSN (insn))
8938 distance += get_attr_length (insn);
8940 for (distance = 0, insn = NEXT_INSN (branch1);
8941 insn && distance < 256;
8942 insn = NEXT_INSN (insn))
8947 distance += get_attr_length (insn);
8953 /* Return nonzero if register old_reg can be renamed to register new_reg. */
8955 sh_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
8956 unsigned int new_reg)
8958 /* Interrupt functions can only use registers that have already been
8959 saved by the prologue, even if they would normally be
8962 if (sh_cfun_interrupt_handler_p () && !df_regs_ever_live_p (new_reg))
8968 /* Function to update the integer COST
8969 based on the relationship between INSN that is dependent on
8970 DEP_INSN through the dependence LINK. The default is to make no
8971 adjustment to COST. This can be used for example to specify to
8972 the scheduler that an output- or anti-dependence does not incur
8973 the same cost as a data-dependence. The return value should be
8974 the new value for COST. */
8976 sh_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx dep_insn, int cost)
8982 /* On SHmedia, if the dependence is an anti-dependence or
8983 output-dependence, there is no cost. */
8984 if (REG_NOTE_KIND (link) != 0)
8986 /* However, dependencies between target register loads and
8987 uses of the register in a subsequent block that are separated
8988 by a conditional branch are not modelled - we have to do with
8989 the anti-dependency between the target register load and the
8990 conditional branch that ends the current block. */
8991 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
8992 && GET_CODE (PATTERN (dep_insn)) == SET
8993 && (get_attr_type (dep_insn) == TYPE_PT_MEDIA
8994 || get_attr_type (dep_insn) == TYPE_PTABS_MEDIA)
8995 && get_attr_type (insn) == TYPE_CBRANCH_MEDIA)
8997 int orig_cost = cost;
8998 rtx note = find_reg_note (insn, REG_BR_PROB, 0);
8999 rtx target = ((! note
9000 || INTVAL (XEXP (note, 0)) * 2 < REG_BR_PROB_BASE)
9001 ? insn : JUMP_LABEL (insn));
9002 /* On the likely path, the branch costs 1, on the unlikely path,
9006 target = next_active_insn (target);
9007 while (target && ! flow_dependent_p (target, dep_insn)
9009 /* If two branches are executed in immediate succession, with the
9010 first branch properly predicted, this causes a stall at the
9011 second branch, hence we won't need the target for the
9012 second branch for two cycles after the launch of the first
9014 if (cost > orig_cost - 2)
9015 cost = orig_cost - 2;
9021 else if (get_attr_is_mac_media (insn)
9022 && get_attr_is_mac_media (dep_insn))
9025 else if (! reload_completed
9026 && GET_CODE (PATTERN (insn)) == SET
9027 && GET_CODE (SET_SRC (PATTERN (insn))) == FLOAT
9028 && GET_CODE (PATTERN (dep_insn)) == SET
9029 && fp_arith_reg_operand (SET_SRC (PATTERN (dep_insn)), VOIDmode)
9032 /* Schedule the ptabs for a casesi_jump_media in preference to stuff
9033 that is needed at the target. */
9034 else if (get_attr_type (insn) == TYPE_JUMP_MEDIA
9035 && ! flow_dependent_p (insn, dep_insn))
9038 else if (REG_NOTE_KIND (link) == 0)
9040 enum attr_type type;
9043 if (recog_memoized (insn) < 0
9044 || recog_memoized (dep_insn) < 0)
9047 dep_set = single_set (dep_insn);
9049 /* The latency that we specify in the scheduling description refers
9050 to the actual output, not to an auto-increment register; for that,
9051 the latency is one. */
9052 if (dep_set && MEM_P (SET_SRC (dep_set)) && cost > 1)
9054 rtx set = single_set (insn);
9057 && !reg_mentioned_p (SET_DEST (dep_set), SET_SRC (set))
9058 && (!MEM_P (SET_DEST (set))
9059 || !reg_mentioned_p (SET_DEST (dep_set),
9060 XEXP (SET_DEST (set), 0))))
9063 /* The only input for a call that is timing-critical is the
9064 function's address. */
9065 if (GET_CODE (insn) == CALL_INSN)
9067 rtx call = PATTERN (insn);
9069 if (GET_CODE (call) == PARALLEL)
9070 call = XVECEXP (call, 0 ,0);
9071 if (GET_CODE (call) == SET)
9072 call = SET_SRC (call);
9073 if (GET_CODE (call) == CALL && GET_CODE (XEXP (call, 0)) == MEM
9074 /* sibcalli_thunk uses a symbol_ref in an unspec. */
9075 && (GET_CODE (XEXP (XEXP (call, 0), 0)) == UNSPEC
9076 || ! reg_set_p (XEXP (XEXP (call, 0), 0), dep_insn)))
9077 cost -= TARGET_SH4_300 ? 3 : 6;
9079 /* Likewise, the most timing critical input for an sfuncs call
9080 is the function address. However, sfuncs typically start
9081 using their arguments pretty quickly.
9082 Assume a four cycle delay for SH4 before they are needed.
9083 Cached ST40-300 calls are quicker, so assume only a one
9085 ??? Maybe we should encode the delays till input registers
9086 are needed by sfuncs into the sfunc call insn. */
9087 /* All sfunc calls are parallels with at least four components.
9088 Exploit this to avoid unnecessary calls to sfunc_uses_reg. */
9089 else if (GET_CODE (PATTERN (insn)) == PARALLEL
9090 && XVECLEN (PATTERN (insn), 0) >= 4
9091 && (reg = sfunc_uses_reg (insn)))
9093 if (! reg_set_p (reg, dep_insn))
9094 cost -= TARGET_SH4_300 ? 1 : 4;
9096 if (TARGET_HARD_SH4 && !TARGET_SH4_300)
9098 enum attr_type dep_type = get_attr_type (dep_insn);
9100 if (dep_type == TYPE_FLOAD || dep_type == TYPE_PCFLOAD)
9102 else if ((dep_type == TYPE_LOAD_SI || dep_type == TYPE_PCLOAD_SI)
9103 && (type = get_attr_type (insn)) != TYPE_CALL
9104 && type != TYPE_SFUNC)
9106 /* When the preceding instruction loads the shift amount of
9107 the following SHAD/SHLD, the latency of the load is increased
9109 if (get_attr_type (insn) == TYPE_DYN_SHIFT
9110 && get_attr_any_int_load (dep_insn) == ANY_INT_LOAD_YES
9111 && reg_overlap_mentioned_p (SET_DEST (dep_set),
9112 XEXP (SET_SRC (single_set (insn)),
9115 /* When an LS group instruction with a latency of less than
9116 3 cycles is followed by a double-precision floating-point
9117 instruction, FIPR, or FTRV, the latency of the first
9118 instruction is increased to 3 cycles. */
9120 && get_attr_insn_class (dep_insn) == INSN_CLASS_LS_GROUP
9121 && get_attr_dfp_comp (insn) == DFP_COMP_YES)
9123 /* The lsw register of a double-precision computation is ready one
9125 else if (reload_completed
9126 && get_attr_dfp_comp (dep_insn) == DFP_COMP_YES
9127 && (use_pat = single_set (insn))
9128 && ! regno_use_in (REGNO (SET_DEST (single_set (dep_insn))),
9132 if (get_attr_any_fp_comp (dep_insn) == ANY_FP_COMP_YES
9133 && get_attr_late_fp_use (insn) == LATE_FP_USE_YES)
9136 else if (TARGET_SH4_300)
9138 /* Stores need their input register two cycles later. */
9139 if (dep_set && cost >= 1
9140 && ((type = get_attr_type (insn)) == TYPE_STORE
9141 || type == TYPE_PSTORE
9142 || type == TYPE_FSTORE || type == TYPE_MAC_MEM))
9144 rtx set = single_set (insn);
9146 if (!reg_mentioned_p (SET_SRC (set), XEXP (SET_DEST (set), 0))
9147 && rtx_equal_p (SET_SRC (set), SET_DEST (dep_set)))
9150 /* But don't reduce the cost below 1 if the address depends
9151 on a side effect of dep_insn. */
9153 && modified_in_p (XEXP (SET_DEST (set), 0), dep_insn))
9159 /* An anti-dependence penalty of two applies if the first insn is a double
9160 precision fadd / fsub / fmul. */
9161 else if (!TARGET_SH4_300
9162 && REG_NOTE_KIND (link) == REG_DEP_ANTI
9163 && recog_memoized (dep_insn) >= 0
9164 && (get_attr_type (dep_insn) == TYPE_DFP_ARITH
9165 || get_attr_type (dep_insn) == TYPE_DFP_MUL)
9166 /* A lot of alleged anti-flow dependences are fake,
9167 so check this one is real. */
9168 && flow_dependent_p (dep_insn, insn))
9174 /* Check if INSN is flow-dependent on DEP_INSN. Can also be used to check
9175 if DEP_INSN is anti-flow dependent on INSN. */
9177 flow_dependent_p (rtx insn, rtx dep_insn)
9179 rtx tmp = PATTERN (insn);
9181 note_stores (PATTERN (dep_insn), flow_dependent_p_1, &tmp);
9182 return tmp == NULL_RTX;
9185 /* A helper function for flow_dependent_p called through note_stores. */
9187 flow_dependent_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
9189 rtx * pinsn = (rtx *) data;
9191 if (*pinsn && reg_referenced_p (x, *pinsn))
9195 /* For use by sh_allocate_initial_value. Note that sh.md contains some
9196 'special function' patterns (type sfunc) that clobber pr, but that
9197 do not look like function calls to leaf_function_p. Hence we must
9198 do this extra check. */
9202 return DF_REG_DEF_COUNT (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
9205 /* Return where to allocate pseudo for a given hard register initial
9208 sh_allocate_initial_value (rtx hard_reg)
9212 if (REGNO (hard_reg) == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG))
9214 if (current_function_is_leaf
9215 && ! sh_pr_n_sets ()
9216 && ! (TARGET_SHCOMPACT
9217 && ((crtl->args.info.call_cookie
9218 & ~ CALL_COOKIE_RET_TRAMP (1))
9219 || crtl->saves_all_registers)))
9222 x = gen_frame_mem (Pmode, return_address_pointer_rtx);
9230 /* This function returns "2" to indicate dual issue for the SH4
9231 processor. To be used by the DFA pipeline description. */
9233 sh_issue_rate (void)
9235 if (TARGET_SUPERSCALAR)
9241 /* Functions for ready queue reordering for sched1. */
9243 /* Get weight for mode for a set x. */
9245 find_set_regmode_weight (rtx x, enum machine_mode mode)
9247 if (GET_CODE (x) == CLOBBER && register_operand (SET_DEST (x), mode))
9249 if (GET_CODE (x) == SET && register_operand (SET_DEST (x), mode))
9251 if (GET_CODE (SET_DEST (x)) == REG)
9253 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
9263 /* Get regmode weight for insn. */
9265 find_insn_regmode_weight (rtx insn, enum machine_mode mode)
9267 short reg_weight = 0;
9270 /* Increment weight for each register born here. */
9272 reg_weight += find_set_regmode_weight (x, mode);
9273 if (GET_CODE (x) == PARALLEL)
9276 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
9278 x = XVECEXP (PATTERN (insn), 0, j);
9279 reg_weight += find_set_regmode_weight (x, mode);
9282 /* Decrement weight for each register that dies here. */
9283 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
9285 if (REG_NOTE_KIND (x) == REG_DEAD || REG_NOTE_KIND (x) == REG_UNUSED)
9287 rtx note = XEXP (x, 0);
9288 if (GET_CODE (note) == REG && GET_MODE (note) == mode)
9295 /* Calculate regmode weights for all insns of a basic block. */
9297 find_regmode_weight (basic_block b, enum machine_mode mode)
9299 rtx insn, next_tail, head, tail;
9301 get_ebb_head_tail (b, b, &head, &tail);
9302 next_tail = NEXT_INSN (tail);
9304 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
9306 /* Handle register life information. */
9311 INSN_REGMODE_WEIGHT (insn, mode) =
9312 find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DFmode);
9313 else if (mode == SImode)
9314 INSN_REGMODE_WEIGHT (insn, mode) =
9315 find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DImode);
9319 /* Comparison function for ready queue sorting. */
9321 rank_for_reorder (const void *x, const void *y)
9323 rtx tmp = *(const rtx *) y;
9324 rtx tmp2 = *(const rtx *) x;
9326 /* The insn in a schedule group should be issued the first. */
9327 if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
9328 return SCHED_GROUP_P (tmp2) ? 1 : -1;
9330 /* If insns are equally good, sort by INSN_LUID (original insn order), This
9331 minimizes instruction movement, thus minimizing sched's effect on
9332 register pressure. */
9333 return INSN_LUID (tmp) - INSN_LUID (tmp2);
9336 /* Resort the array A in which only element at index N may be out of order. */
9338 swap_reorder (rtx *a, int n)
9340 rtx insn = a[n - 1];
9343 while (i >= 0 && rank_for_reorder (a + i, &insn) >= 0)
9351 #define SCHED_REORDER(READY, N_READY) \
9354 if ((N_READY) == 2) \
9355 swap_reorder (READY, N_READY); \
9356 else if ((N_READY) > 2) \
9357 qsort (READY, N_READY, sizeof (rtx), rank_for_reorder); \
9361 /* Sort the ready list READY by ascending priority, using the SCHED_REORDER
9364 ready_reorder (rtx *ready, int nready)
9366 SCHED_REORDER (ready, nready);
9369 /* Count life regions of r0 for a block. */
9371 find_r0_life_regions (basic_block b)
9380 if (REGNO_REG_SET_P (df_get_live_in (b), R0_REG))
9393 r0_reg = gen_rtx_REG (SImode, R0_REG);
9398 if (find_regno_note (insn, REG_DEAD, R0_REG))
9404 && (pset = single_set (insn))
9405 && reg_overlap_mentioned_p (r0_reg, SET_DEST (pset))
9406 && !find_regno_note (insn, REG_UNUSED, R0_REG))
9414 insn = NEXT_INSN (insn);
9419 /* Calculate regmode weights for all insns of all basic block. */
9421 sh_md_init_global (FILE *dump ATTRIBUTE_UNUSED,
9422 int verbose ATTRIBUTE_UNUSED,
9427 regmode_weight[0] = (short *) xcalloc (old_max_uid, sizeof (short));
9428 regmode_weight[1] = (short *) xcalloc (old_max_uid, sizeof (short));
9429 r0_life_regions = 0;
9431 FOR_EACH_BB_REVERSE (b)
9433 find_regmode_weight (b, SImode);
9434 find_regmode_weight (b, SFmode);
9435 if (!reload_completed)
9436 r0_life_regions += find_r0_life_regions (b);
9439 CURR_REGMODE_PRESSURE (SImode) = 0;
9440 CURR_REGMODE_PRESSURE (SFmode) = 0;
9446 sh_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
9447 int verbose ATTRIBUTE_UNUSED)
9449 if (regmode_weight[0])
9451 free (regmode_weight[0]);
9452 regmode_weight[0] = NULL;
9454 if (regmode_weight[1])
9456 free (regmode_weight[1]);
9457 regmode_weight[1] = NULL;
9461 /* The scalar modes supported differs from the default version in TImode
9462 for 32-bit SHMEDIA. */
9464 sh_scalar_mode_supported_p (enum machine_mode mode)
9466 if (TARGET_SHMEDIA32 && mode == TImode)
9469 return default_scalar_mode_supported_p (mode);
9472 /* Cache the can_issue_more so that we can return it from reorder2. Also,
9473 keep count of register pressures on SImode and SFmode. */
9475 sh_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
9476 int sched_verbose ATTRIBUTE_UNUSED,
9480 if (GET_CODE (PATTERN (insn)) != USE
9481 && GET_CODE (PATTERN (insn)) != CLOBBER)
9482 cached_can_issue_more = can_issue_more - 1;
9484 cached_can_issue_more = can_issue_more;
9486 if (reload_completed)
9487 return cached_can_issue_more;
9489 CURR_REGMODE_PRESSURE (SImode) += INSN_REGMODE_WEIGHT (insn, SImode);
9490 CURR_REGMODE_PRESSURE (SFmode) += INSN_REGMODE_WEIGHT (insn, SFmode);
9492 return cached_can_issue_more;
9496 sh_md_init (FILE *dump ATTRIBUTE_UNUSED,
9497 int verbose ATTRIBUTE_UNUSED,
9498 int veclen ATTRIBUTE_UNUSED)
9500 CURR_REGMODE_PRESSURE (SImode) = 0;
9501 CURR_REGMODE_PRESSURE (SFmode) = 0;
9504 /* Some magic numbers. */
9505 /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
9506 functions that already have high pressure on r0. */
9507 #define R0_MAX_LIFE_REGIONS 2
9508 /* Register Pressure thresholds for SImode and SFmode registers. */
9509 #define SIMODE_MAX_WEIGHT 5
9510 #define SFMODE_MAX_WEIGHT 10
9512 /* Return true if the pressure is high for MODE. */
9514 high_pressure (enum machine_mode mode)
9516 /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
9517 functions that already have high pressure on r0. */
9518 if (r0_life_regions >= R0_MAX_LIFE_REGIONS)
9522 return (CURR_REGMODE_PRESSURE (SFmode) > SFMODE_MAX_WEIGHT);
9524 return (CURR_REGMODE_PRESSURE (SImode) > SIMODE_MAX_WEIGHT);
9527 /* Reorder ready queue if register pressure is high. */
9529 sh_reorder (FILE *dump ATTRIBUTE_UNUSED,
9530 int sched_verbose ATTRIBUTE_UNUSED,
9533 int clock_var ATTRIBUTE_UNUSED)
9535 if (reload_completed)
9536 return sh_issue_rate ();
9538 if (high_pressure (SFmode) || high_pressure (SImode))
9540 ready_reorder (ready, *n_readyp);
9543 return sh_issue_rate ();
9546 /* Skip cycles if the current register pressure is high. */
9548 sh_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
9549 int sched_verbose ATTRIBUTE_UNUSED,
9550 rtx *ready ATTRIBUTE_UNUSED,
9551 int *n_readyp ATTRIBUTE_UNUSED,
9552 int clock_var ATTRIBUTE_UNUSED)
9554 if (reload_completed)
9555 return cached_can_issue_more;
9557 if (high_pressure(SFmode) || high_pressure (SImode))
9560 return cached_can_issue_more;
9563 /* Skip cycles without sorting the ready queue. This will move insn from
9564 Q->R. If this is the last cycle we are skipping; allow sorting of ready
9565 queue by sh_reorder. */
9567 /* Generally, skipping these many cycles are sufficient for all insns to move
9572 sh_dfa_new_cycle (FILE *sched_dump ATTRIBUTE_UNUSED,
9573 int sched_verbose ATTRIBUTE_UNUSED,
9574 rtx insn ATTRIBUTE_UNUSED,
9579 if (reload_completed)
9584 if ((clock_var - last_clock_var) < MAX_SKIPS)
9589 /* If this is the last cycle we are skipping, allow reordering of R. */
9590 if ((clock_var - last_clock_var) == MAX_SKIPS)
9602 /* SHmedia requires registers for branches, so we can't generate new
9603 branches past reload. */
9605 sh_cannot_modify_jumps_p (void)
9607 return (TARGET_SHMEDIA && (reload_in_progress || reload_completed));
9611 sh_target_reg_class (void)
9613 return TARGET_SHMEDIA ? TARGET_REGS : NO_REGS;
9617 sh_optimize_target_register_callee_saved (bool after_prologue_epilogue_gen)
9624 if (! shmedia_space_reserved_for_target_registers)
9626 if (after_prologue_epilogue_gen && ! TARGET_SAVE_ALL_TARGET_REGS)
9628 if (calc_live_regs (&dummy) >= 6 * 8)
9634 sh_ms_bitfield_layout_p (const_tree record_type ATTRIBUTE_UNUSED)
9636 return (TARGET_SH5 || TARGET_HITACHI || sh_attr_renesas_p (record_type));
9640 On the SH1..SH4, the trampoline looks like
9641 2 0002 D202 mov.l l2,r2
9642 1 0000 D301 mov.l l1,r3
9645 5 0008 00000000 l1: .long area
9646 6 000c 00000000 l2: .long function
9648 SH5 (compact) uses r1 instead of r3 for the static chain. */
9651 /* Emit RTL insns to initialize the variable parts of a trampoline.
9652 FNADDR is an RTX for the address of the function's pure code.
9653 CXT is an RTX for the static chain value for the function. */
9656 sh_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
9658 rtx tramp_mem = gen_frame_mem (BLKmode, tramp);
9660 if (TARGET_SHMEDIA64)
9665 rtx movi1 = GEN_INT (0xcc000010);
9666 rtx shori1 = GEN_INT (0xc8000010);
9669 /* The following trampoline works within a +- 128 KB range for cxt:
9670 ptb/u cxt,tr1; movi fnaddr >> 48,r0; shori fnaddr >> 32,r0;
9671 shori fnaddr >> 16,r0; shori fnaddr,r0; ptabs/l r0,tr0
9672 gettr tr1,r1; blink tr0,r63 */
9673 /* Address rounding makes it hard to compute the exact bounds of the
9674 offset for this trampoline, but we have a rather generous offset
9675 range, so frame_offset should do fine as an upper bound. */
9676 if (cxt == virtual_stack_vars_rtx && frame_offset < 0x20000)
9678 /* ??? could optimize this trampoline initialization
9679 by writing DImode words with two insns each. */
9680 rtx mask = force_reg (DImode, GEN_INT (0x3fffc00));
9681 rtx insn = gen_rtx_MINUS (DImode, cxt, tramp);
9682 insn = gen_rtx_ASHIFT (DImode, insn, GEN_INT (10-2));
9683 insn = gen_rtx_AND (DImode, insn, mask);
9684 /* Or in ptb/u .,tr1 pattern */
9685 insn = gen_rtx_IOR (DImode, insn, gen_int_mode (0xec000010, SImode));
9686 insn = force_operand (insn, NULL_RTX);
9687 insn = gen_lowpart (SImode, insn);
9688 emit_move_insn (change_address (tramp_mem, SImode, NULL_RTX), insn);
9689 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (38));
9690 insn = gen_rtx_AND (DImode, insn, mask);
9691 insn = force_operand (gen_rtx_IOR (DImode, movi1, insn), NULL_RTX);
9692 insn = gen_lowpart (SImode, insn);
9693 emit_move_insn (adjust_address (tramp_mem, SImode, 4), insn);
9694 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (22));
9695 insn = gen_rtx_AND (DImode, insn, mask);
9696 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
9697 insn = gen_lowpart (SImode, insn);
9698 emit_move_insn (adjust_address (tramp_mem, SImode, 8), insn);
9699 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (6));
9700 insn = gen_rtx_AND (DImode, insn, mask);
9701 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
9702 insn = gen_lowpart (SImode, insn);
9703 emit_move_insn (adjust_address (tramp_mem, SImode, 12), insn);
9704 insn = gen_rtx_ASHIFT (DImode, fnaddr, GEN_INT (10));
9705 insn = gen_rtx_AND (DImode, insn, mask);
9706 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
9707 insn = gen_lowpart (SImode, insn);
9708 emit_move_insn (adjust_address (tramp_mem, SImode, 16), insn);
9709 emit_move_insn (adjust_address (tramp_mem, SImode, 20),
9710 GEN_INT (0x6bf10600));
9711 emit_move_insn (adjust_address (tramp_mem, SImode, 24),
9712 GEN_INT (0x4415fc10));
9713 emit_move_insn (adjust_address (tramp_mem, SImode, 28),
9714 GEN_INT (0x4401fff0));
9715 emit_insn (gen_ic_invalidate_line (tramp));
9718 tramp_templ = gen_rtx_SYMBOL_REF (Pmode,"__GCC_nested_trampoline");
9719 fixed_len = TRAMPOLINE_SIZE - 2 * GET_MODE_SIZE (Pmode);
9721 tramp_templ = gen_datalabel_ref (tramp_templ);
9723 src = gen_const_mem (BLKmode, tramp_templ);
9724 set_mem_align (dst, 256);
9725 set_mem_align (src, 64);
9726 emit_block_move (dst, src, GEN_INT (fixed_len), BLOCK_OP_NORMAL);
9728 emit_move_insn (adjust_address (tramp_mem, Pmode, fixed_len), fnaddr);
9729 emit_move_insn (adjust_address (tramp_mem, Pmode,
9730 fixed_len + GET_MODE_SIZE (Pmode)),
9732 emit_insn (gen_ic_invalidate_line (tramp));
9735 else if (TARGET_SHMEDIA)
9737 /* movi fnaddr >> 16,r1; shori fnaddr,r1; ptabs/l r1,tr0
9738 movi cxt >> 16,r1; shori cxt,r1; blink tr0,r63 */
9739 rtx quad0 = gen_reg_rtx (DImode), cxtload = gen_reg_rtx (DImode);
9740 rtx quad1 = gen_reg_rtx (DImode), quad2 = gen_reg_rtx (DImode);
9741 /* movi 0,r1: 0xcc000010 shori 0,r1: c8000010 concatenated,
9742 rotated 10 right, and higher 16 bit of every 32 selected. */
9744 = force_reg (V2HImode, (simplify_gen_subreg
9745 (V2HImode, GEN_INT (0x4330432), SImode, 0)));
9746 rtx ptabs = force_reg (DImode, GEN_INT (0x6bf10600));
9747 rtx blink = force_reg (DImode, GEN_INT (0x4401fff0));
9749 tramp = force_reg (Pmode, tramp);
9750 fnaddr = force_reg (SImode, fnaddr);
9751 cxt = force_reg (SImode, cxt);
9752 emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, quad0, 0),
9753 gen_rtx_SUBREG (V2HImode, fnaddr, 0),
9755 emit_insn (gen_rotrdi3_mextr (quad0, quad0,
9756 GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
9757 emit_insn (gen_ashldi3_media (quad0, quad0, const2_rtx));
9758 emit_move_insn (change_address (tramp_mem, DImode, NULL_RTX), quad0);
9759 emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, cxtload, 0),
9760 gen_rtx_SUBREG (V2HImode, cxt, 0),
9762 emit_insn (gen_rotrdi3_mextr (cxtload, cxtload,
9763 GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
9764 emit_insn (gen_ashldi3_media (cxtload, cxtload, const2_rtx));
9765 if (TARGET_LITTLE_ENDIAN)
9767 emit_insn (gen_mshflo_l_di (quad1, ptabs, cxtload));
9768 emit_insn (gen_mextr4 (quad2, cxtload, blink));
9772 emit_insn (gen_mextr4 (quad1, cxtload, ptabs));
9773 emit_insn (gen_mshflo_l_di (quad2, blink, cxtload));
9775 emit_move_insn (adjust_address (tramp_mem, DImode, 8), quad1);
9776 emit_move_insn (adjust_address (tramp_mem, DImode, 16), quad2);
9777 emit_insn (gen_ic_invalidate_line (tramp));
9780 else if (TARGET_SHCOMPACT)
9782 emit_insn (gen_initialize_trampoline (tramp, cxt, fnaddr));
9785 emit_move_insn (change_address (tramp_mem, SImode, NULL_RTX),
9786 gen_int_mode (TARGET_LITTLE_ENDIAN ? 0xd301d202 : 0xd202d301,
9788 emit_move_insn (adjust_address (tramp_mem, SImode, 4),
9789 gen_int_mode (TARGET_LITTLE_ENDIAN ? 0x0009422b : 0x422b0009,
9791 emit_move_insn (adjust_address (tramp_mem, SImode, 8), cxt);
9792 emit_move_insn (adjust_address (tramp_mem, SImode, 12), fnaddr);
9795 if (!TARGET_INLINE_IC_INVALIDATE
9796 || (!(TARGET_SH4A_ARCH || TARGET_SH4_300) && TARGET_USERMODE))
9797 emit_library_call (function_symbol (NULL, "__ic_invalidate",
9799 0, VOIDmode, 1, tramp, SImode);
9801 emit_insn (gen_ic_invalidate_line (tramp));
9805 /* FIXME: This is overly conservative. A SHcompact function that
9806 receives arguments ``by reference'' will have them stored in its
9807 own stack frame, so it must not pass pointers or references to
9808 these arguments to other functions by means of sibling calls. */
9809 /* If PIC, we cannot make sibling calls to global functions
9810 because the PLT requires r12 to be live. */
9812 sh_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
9815 && (! TARGET_SHCOMPACT
9816 || crtl->args.info.stack_regs == 0)
9817 && ! sh_cfun_interrupt_handler_p ()
9819 || (decl && ! TREE_PUBLIC (decl))
9820 || (decl && DECL_VISIBILITY (decl) != VISIBILITY_DEFAULT)));
9823 /* Machine specific built-in functions. */
9825 struct builtin_description
9827 const enum insn_code icode;
9828 const char *const name;
9832 /* describe number and signedness of arguments; arg[0] == result
9833 (1: unsigned, 2: signed, 4: don't care, 8: pointer 0: no argument */
9834 /* 9: 64-bit pointer, 10: 32-bit pointer */
9835 static const char signature_args[][4] =
9837 #define SH_BLTIN_V2SI2 0
9839 #define SH_BLTIN_V4HI2 1
9841 #define SH_BLTIN_V2SI3 2
9843 #define SH_BLTIN_V4HI3 3
9845 #define SH_BLTIN_V8QI3 4
9847 #define SH_BLTIN_MAC_HISI 5
9849 #define SH_BLTIN_SH_HI 6
9851 #define SH_BLTIN_SH_SI 7
9853 #define SH_BLTIN_V4HI2V2SI 8
9855 #define SH_BLTIN_V4HI2V8QI 9
9857 #define SH_BLTIN_SISF 10
9859 #define SH_BLTIN_LDUA_L 11
9861 #define SH_BLTIN_LDUA_Q 12
9863 #define SH_BLTIN_STUA_L 13
9865 #define SH_BLTIN_STUA_Q 14
9867 #define SH_BLTIN_LDUA_L64 15
9869 #define SH_BLTIN_LDUA_Q64 16
9871 #define SH_BLTIN_STUA_L64 17
9873 #define SH_BLTIN_STUA_Q64 18
9875 #define SH_BLTIN_NUM_SHARED_SIGNATURES 19
9876 #define SH_BLTIN_2 19
9877 #define SH_BLTIN_SU 19
9879 #define SH_BLTIN_3 20
9880 #define SH_BLTIN_SUS 20
9882 #define SH_BLTIN_PSSV 21
9884 #define SH_BLTIN_XXUU 22
9885 #define SH_BLTIN_UUUU 22
9887 #define SH_BLTIN_PV 23
9890 /* mcmv: operands considered unsigned. */
9891 /* mmulsum_wq, msad_ubq: result considered unsigned long long. */
9892 /* mperm: control value considered unsigned int. */
9893 /* mshalds, mshard, mshards, mshlld, mshlrd: shift count is unsigned int. */
9894 /* mshards_q: returns signed short. */
9895 /* nsb: takes long long arg, returns unsigned char. */
9896 static const struct builtin_description bdesc[] =
9898 { CODE_FOR_absv2si2, "__builtin_absv2si2", SH_BLTIN_V2SI2 },
9899 { CODE_FOR_absv4hi2, "__builtin_absv4hi2", SH_BLTIN_V4HI2 },
9900 { CODE_FOR_addv2si3, "__builtin_addv2si3", SH_BLTIN_V2SI3 },
9901 { CODE_FOR_addv4hi3, "__builtin_addv4hi3", SH_BLTIN_V4HI3 },
9902 { CODE_FOR_ssaddv2si3,"__builtin_ssaddv2si3", SH_BLTIN_V2SI3 },
9903 { CODE_FOR_usaddv8qi3,"__builtin_usaddv8qi3", SH_BLTIN_V8QI3 },
9904 { CODE_FOR_ssaddv4hi3,"__builtin_ssaddv4hi3", SH_BLTIN_V4HI3 },
9905 { CODE_FOR_alloco_i, "__builtin_sh_media_ALLOCO", SH_BLTIN_PV },
9906 { CODE_FOR_negcmpeqv8qi,"__builtin_sh_media_MCMPEQ_B", SH_BLTIN_V8QI3 },
9907 { CODE_FOR_negcmpeqv2si,"__builtin_sh_media_MCMPEQ_L", SH_BLTIN_V2SI3 },
9908 { CODE_FOR_negcmpeqv4hi,"__builtin_sh_media_MCMPEQ_W", SH_BLTIN_V4HI3 },
9909 { CODE_FOR_negcmpgtuv8qi,"__builtin_sh_media_MCMPGT_UB", SH_BLTIN_V8QI3 },
9910 { CODE_FOR_negcmpgtv2si,"__builtin_sh_media_MCMPGT_L", SH_BLTIN_V2SI3 },
9911 { CODE_FOR_negcmpgtv4hi,"__builtin_sh_media_MCMPGT_W", SH_BLTIN_V4HI3 },
9912 { CODE_FOR_mcmv, "__builtin_sh_media_MCMV", SH_BLTIN_UUUU },
9913 { CODE_FOR_mcnvs_lw, "__builtin_sh_media_MCNVS_LW", SH_BLTIN_3 },
9914 { CODE_FOR_mcnvs_wb, "__builtin_sh_media_MCNVS_WB", SH_BLTIN_V4HI2V8QI },
9915 { CODE_FOR_mcnvs_wub, "__builtin_sh_media_MCNVS_WUB", SH_BLTIN_V4HI2V8QI },
9916 { CODE_FOR_mextr1, "__builtin_sh_media_MEXTR1", SH_BLTIN_V8QI3 },
9917 { CODE_FOR_mextr2, "__builtin_sh_media_MEXTR2", SH_BLTIN_V8QI3 },
9918 { CODE_FOR_mextr3, "__builtin_sh_media_MEXTR3", SH_BLTIN_V8QI3 },
9919 { CODE_FOR_mextr4, "__builtin_sh_media_MEXTR4", SH_BLTIN_V8QI3 },
9920 { CODE_FOR_mextr5, "__builtin_sh_media_MEXTR5", SH_BLTIN_V8QI3 },
9921 { CODE_FOR_mextr6, "__builtin_sh_media_MEXTR6", SH_BLTIN_V8QI3 },
9922 { CODE_FOR_mextr7, "__builtin_sh_media_MEXTR7", SH_BLTIN_V8QI3 },
9923 { CODE_FOR_mmacfx_wl, "__builtin_sh_media_MMACFX_WL", SH_BLTIN_MAC_HISI },
9924 { CODE_FOR_mmacnfx_wl,"__builtin_sh_media_MMACNFX_WL", SH_BLTIN_MAC_HISI },
9925 { CODE_FOR_mulv2si3, "__builtin_mulv2si3", SH_BLTIN_V2SI3, },
9926 { CODE_FOR_mulv4hi3, "__builtin_mulv4hi3", SH_BLTIN_V4HI3 },
9927 { CODE_FOR_mmulfx_l, "__builtin_sh_media_MMULFX_L", SH_BLTIN_V2SI3 },
9928 { CODE_FOR_mmulfx_w, "__builtin_sh_media_MMULFX_W", SH_BLTIN_V4HI3 },
9929 { CODE_FOR_mmulfxrp_w,"__builtin_sh_media_MMULFXRP_W", SH_BLTIN_V4HI3 },
9930 { CODE_FOR_mmulhi_wl, "__builtin_sh_media_MMULHI_WL", SH_BLTIN_V4HI2V2SI },
9931 { CODE_FOR_mmullo_wl, "__builtin_sh_media_MMULLO_WL", SH_BLTIN_V4HI2V2SI },
9932 { CODE_FOR_mmulsum_wq,"__builtin_sh_media_MMULSUM_WQ", SH_BLTIN_XXUU },
9933 { CODE_FOR_mperm_w, "__builtin_sh_media_MPERM_W", SH_BLTIN_SH_HI },
9934 { CODE_FOR_msad_ubq, "__builtin_sh_media_MSAD_UBQ", SH_BLTIN_XXUU },
9935 { CODE_FOR_mshalds_l, "__builtin_sh_media_MSHALDS_L", SH_BLTIN_SH_SI },
9936 { CODE_FOR_mshalds_w, "__builtin_sh_media_MSHALDS_W", SH_BLTIN_SH_HI },
9937 { CODE_FOR_ashrv2si3, "__builtin_ashrv2si3", SH_BLTIN_SH_SI },
9938 { CODE_FOR_ashrv4hi3, "__builtin_ashrv4hi3", SH_BLTIN_SH_HI },
9939 { CODE_FOR_mshards_q, "__builtin_sh_media_MSHARDS_Q", SH_BLTIN_SUS },
9940 { CODE_FOR_mshfhi_b, "__builtin_sh_media_MSHFHI_B", SH_BLTIN_V8QI3 },
9941 { CODE_FOR_mshfhi_l, "__builtin_sh_media_MSHFHI_L", SH_BLTIN_V2SI3 },
9942 { CODE_FOR_mshfhi_w, "__builtin_sh_media_MSHFHI_W", SH_BLTIN_V4HI3 },
9943 { CODE_FOR_mshflo_b, "__builtin_sh_media_MSHFLO_B", SH_BLTIN_V8QI3 },
9944 { CODE_FOR_mshflo_l, "__builtin_sh_media_MSHFLO_L", SH_BLTIN_V2SI3 },
9945 { CODE_FOR_mshflo_w, "__builtin_sh_media_MSHFLO_W", SH_BLTIN_V4HI3 },
9946 { CODE_FOR_ashlv2si3, "__builtin_ashlv2si3", SH_BLTIN_SH_SI },
9947 { CODE_FOR_ashlv4hi3, "__builtin_ashlv4hi3", SH_BLTIN_SH_HI },
9948 { CODE_FOR_lshrv2si3, "__builtin_lshrv2si3", SH_BLTIN_SH_SI },
9949 { CODE_FOR_lshrv4hi3, "__builtin_lshrv4hi3", SH_BLTIN_SH_HI },
9950 { CODE_FOR_subv2si3, "__builtin_subv2si3", SH_BLTIN_V2SI3 },
9951 { CODE_FOR_subv4hi3, "__builtin_subv4hi3", SH_BLTIN_V4HI3 },
9952 { CODE_FOR_sssubv2si3,"__builtin_sssubv2si3", SH_BLTIN_V2SI3 },
9953 { CODE_FOR_ussubv8qi3,"__builtin_ussubv8qi3", SH_BLTIN_V8QI3 },
9954 { CODE_FOR_sssubv4hi3,"__builtin_sssubv4hi3", SH_BLTIN_V4HI3 },
9955 { CODE_FOR_fcosa_s, "__builtin_sh_media_FCOSA_S", SH_BLTIN_SISF },
9956 { CODE_FOR_fsina_s, "__builtin_sh_media_FSINA_S", SH_BLTIN_SISF },
9957 { CODE_FOR_fipr, "__builtin_sh_media_FIPR_S", SH_BLTIN_3 },
9958 { CODE_FOR_ftrv, "__builtin_sh_media_FTRV_S", SH_BLTIN_3 },
9959 { CODE_FOR_mac_media, "__builtin_sh_media_FMAC_S", SH_BLTIN_3 },
9960 { CODE_FOR_sqrtdf2, "__builtin_sh_media_FSQRT_D", SH_BLTIN_2 },
9961 { CODE_FOR_sqrtsf2, "__builtin_sh_media_FSQRT_S", SH_BLTIN_2 },
9962 { CODE_FOR_fsrra_s, "__builtin_sh_media_FSRRA_S", SH_BLTIN_2 },
9963 { CODE_FOR_ldhi_l, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L },
9964 { CODE_FOR_ldhi_q, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q },
9965 { CODE_FOR_ldlo_l, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L },
9966 { CODE_FOR_ldlo_q, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q },
9967 { CODE_FOR_sthi_l, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L },
9968 { CODE_FOR_sthi_q, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q },
9969 { CODE_FOR_stlo_l, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L },
9970 { CODE_FOR_stlo_q, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q },
9971 { CODE_FOR_ldhi_l64, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L64 },
9972 { CODE_FOR_ldhi_q64, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q64 },
9973 { CODE_FOR_ldlo_l64, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L64 },
9974 { CODE_FOR_ldlo_q64, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q64 },
9975 { CODE_FOR_sthi_l64, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L64 },
9976 { CODE_FOR_sthi_q64, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q64 },
9977 { CODE_FOR_stlo_l64, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L64 },
9978 { CODE_FOR_stlo_q64, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q64 },
9979 { CODE_FOR_nsb, "__builtin_sh_media_NSB", SH_BLTIN_SU },
9980 { CODE_FOR_byterev, "__builtin_sh_media_BYTEREV", SH_BLTIN_2 },
9981 { CODE_FOR_prefetch, "__builtin_sh_media_PREFO", SH_BLTIN_PSSV },
9985 sh_media_init_builtins (void)
9987 tree shared[SH_BLTIN_NUM_SHARED_SIGNATURES];
9988 const struct builtin_description *d;
9990 memset (shared, 0, sizeof shared);
9991 for (d = bdesc; d - bdesc < (int) ARRAY_SIZE (bdesc); d++)
9993 tree type, arg_type = 0;
9994 int signature = d->signature;
9997 if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES && shared[signature])
9998 type = shared[signature];
10001 int has_result = signature_args[signature][0] != 0;
10003 if ((signature_args[signature][1] & 8)
10004 && (((signature_args[signature][1] & 1) && TARGET_SHMEDIA32)
10005 || ((signature_args[signature][1] & 2) && TARGET_SHMEDIA64)))
10007 if (! TARGET_FPU_ANY
10008 && FLOAT_MODE_P (insn_data[d->icode].operand[0].mode))
10010 type = void_list_node;
10013 int arg = signature_args[signature][i];
10014 int opno = i - 1 + has_result;
10017 arg_type = ptr_type_node;
10019 arg_type = (*lang_hooks.types.type_for_mode)
10020 (insn_data[d->icode].operand[opno].mode,
10025 arg_type = void_type_node;
10028 type = tree_cons (NULL_TREE, arg_type, type);
10030 type = build_function_type (arg_type, type);
10031 if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES)
10032 shared[signature] = type;
10034 add_builtin_function (d->name, type, d - bdesc, BUILT_IN_MD,
10039 /* Implements target hook vector_mode_supported_p. */
10041 sh_vector_mode_supported_p (enum machine_mode mode)
10044 && ((mode == V2SFmode)
10045 || (mode == V4SFmode)
10046 || (mode == V16SFmode)))
10049 else if (TARGET_SHMEDIA
10050 && ((mode == V8QImode)
10051 || (mode == V2HImode)
10052 || (mode == V4HImode)
10053 || (mode == V2SImode)))
10059 /* Implements target hook dwarf_calling_convention. Return an enum
10060 of dwarf_calling_convention. */
10062 sh_dwarf_calling_convention (const_tree func)
10064 if (sh_attr_renesas_p (func))
10065 return DW_CC_GNU_renesas_sh;
10067 return DW_CC_normal;
10071 sh_init_builtins (void)
10073 if (TARGET_SHMEDIA)
10074 sh_media_init_builtins ();
10077 /* Expand an expression EXP that calls a built-in function,
10078 with result going to TARGET if that's convenient
10079 (and in mode MODE if that's convenient).
10080 SUBTARGET may be used as the target for computing one of EXP's operands.
10081 IGNORE is nonzero if the value is to be ignored. */
10084 sh_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10085 enum machine_mode mode ATTRIBUTE_UNUSED, int ignore)
10087 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10088 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10089 const struct builtin_description *d = &bdesc[fcode];
10090 enum insn_code icode = d->icode;
10091 int signature = d->signature;
10092 enum machine_mode tmode = VOIDmode;
10097 if (signature_args[signature][0])
10102 tmode = insn_data[icode].operand[0].mode;
10104 || GET_MODE (target) != tmode
10105 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10106 target = gen_reg_rtx (tmode);
10107 op[nop++] = target;
10112 for (i = 1; i <= 3; i++, nop++)
10115 enum machine_mode opmode, argmode;
10118 if (! signature_args[signature][i])
10120 arg = CALL_EXPR_ARG (exp, i - 1);
10121 if (arg == error_mark_node)
10123 if (signature_args[signature][i] & 8)
10126 optype = ptr_type_node;
10130 opmode = insn_data[icode].operand[nop].mode;
10131 optype = (*lang_hooks.types.type_for_mode) (opmode, 0);
10133 argmode = TYPE_MODE (TREE_TYPE (arg));
10134 if (argmode != opmode)
10135 arg = build1 (NOP_EXPR, optype, arg);
10136 op[nop] = expand_expr (arg, NULL_RTX, opmode, 0);
10137 if (! (*insn_data[icode].operand[nop].predicate) (op[nop], opmode))
10138 op[nop] = copy_to_mode_reg (opmode, op[nop]);
10144 pat = (*insn_data[d->icode].genfun) (op[0]);
10147 pat = (*insn_data[d->icode].genfun) (op[0], op[1]);
10150 pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2]);
10153 pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2], op[3]);
10156 gcc_unreachable ();
10165 sh_expand_unop_v2sf (enum rtx_code code, rtx op0, rtx op1)
10167 rtx sel0 = const0_rtx;
10168 rtx sel1 = const1_rtx;
10169 rtx (*fn) (rtx, rtx, rtx, rtx, rtx) = gen_unary_sf_op;
10170 rtx op = gen_rtx_fmt_e (code, SFmode, op1);
10172 emit_insn ((*fn) (op0, op1, op, sel0, sel0));
10173 emit_insn ((*fn) (op0, op1, op, sel1, sel1));
10177 sh_expand_binop_v2sf (enum rtx_code code, rtx op0, rtx op1, rtx op2)
10179 rtx op = gen_rtx_fmt_ee (code, SFmode, op1, op2);
10181 emit_insn (gen_binary_sf_op0 (op0, op1, op2, op));
10182 emit_insn (gen_binary_sf_op1 (op0, op1, op2, op));
10185 /* Return the class of registers for which a mode change from FROM to TO
10188 sh_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
10189 enum reg_class class)
10191 /* We want to enable the use of SUBREGs as a means to
10192 VEC_SELECT a single element of a vector. */
10193 if (to == SFmode && VECTOR_MODE_P (from) && GET_MODE_INNER (from) == SFmode)
10194 return (reg_classes_intersect_p (GENERAL_REGS, class));
10196 if (GET_MODE_SIZE (from) != GET_MODE_SIZE (to))
10198 if (TARGET_LITTLE_ENDIAN)
10200 if (GET_MODE_SIZE (to) < 8 || GET_MODE_SIZE (from) < 8)
10201 return reg_classes_intersect_p (DF_REGS, class);
10205 if (GET_MODE_SIZE (from) < 8)
10206 return reg_classes_intersect_p (DF_HI_REGS, class);
10213 /* If ADDRESS refers to a CODE_LABEL, add NUSES to the number of times
10214 that label is used. */
10217 sh_mark_label (rtx address, int nuses)
10219 if (GOTOFF_P (address))
10221 /* Extract the label or symbol. */
10222 address = XEXP (address, 0);
10223 if (GET_CODE (address) == PLUS)
10224 address = XEXP (address, 0);
10225 address = XVECEXP (address, 0, 0);
10227 if (GET_CODE (address) == LABEL_REF
10228 && GET_CODE (XEXP (address, 0)) == CODE_LABEL)
10229 LABEL_NUSES (XEXP (address, 0)) += nuses;
10232 /* Compute extra cost of moving data between one register class
10235 /* If SECONDARY*_RELOAD_CLASS says something about the src/dst pair, regclass
10236 uses this information. Hence, the general register <-> floating point
10237 register information here is not used for SFmode. */
10240 sh_register_move_cost (enum machine_mode mode,
10241 enum reg_class srcclass, enum reg_class dstclass)
10243 if (dstclass == T_REGS || dstclass == PR_REGS)
10246 if (dstclass == MAC_REGS && srcclass == MAC_REGS)
10249 if (mode == SImode && ! TARGET_SHMEDIA && TARGET_FMOVD
10250 && REGCLASS_HAS_FP_REG (srcclass)
10251 && REGCLASS_HAS_FP_REG (dstclass))
10254 if (REGCLASS_HAS_FP_REG (dstclass) && srcclass == T_REGS)
10255 return ((TARGET_HARD_SH4 && !optimize_size) ? 10 : 7);
10257 if ((REGCLASS_HAS_FP_REG (dstclass) && srcclass == MAC_REGS)
10258 || (dstclass == MAC_REGS && REGCLASS_HAS_FP_REG (srcclass)))
10261 if ((REGCLASS_HAS_FP_REG (dstclass)
10262 && REGCLASS_HAS_GENERAL_REG (srcclass))
10263 || (REGCLASS_HAS_GENERAL_REG (dstclass)
10264 && REGCLASS_HAS_FP_REG (srcclass)))
10265 return ((TARGET_SHMEDIA ? 4 : TARGET_FMOVD ? 8 : 12)
10266 * ((GET_MODE_SIZE (mode) + 7) / 8U));
10268 if ((dstclass == FPUL_REGS
10269 && REGCLASS_HAS_GENERAL_REG (srcclass))
10270 || (srcclass == FPUL_REGS
10271 && REGCLASS_HAS_GENERAL_REG (dstclass)))
10274 if ((dstclass == FPUL_REGS
10275 && (srcclass == PR_REGS || srcclass == MAC_REGS || srcclass == T_REGS))
10276 || (srcclass == FPUL_REGS
10277 && (dstclass == PR_REGS || dstclass == MAC_REGS)))
10280 if ((srcclass == TARGET_REGS && ! REGCLASS_HAS_GENERAL_REG (dstclass))
10281 || ((dstclass) == TARGET_REGS && ! REGCLASS_HAS_GENERAL_REG (srcclass)))
10284 /* ??? ptabs faults on (value & 0x3) == 0x3 */
10286 && ((srcclass) == TARGET_REGS || (srcclass) == SIBCALL_REGS))
10288 if (sh_gettrcost >= 0)
10289 return sh_gettrcost;
10290 else if (!TARGET_PT_FIXED)
10294 if ((srcclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (dstclass))
10295 || (dstclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (srcclass)))
10300 && ! REGCLASS_HAS_GENERAL_REG (srcclass)
10301 && ! REGCLASS_HAS_GENERAL_REG (dstclass)))
10302 return 2 * ((GET_MODE_SIZE (mode) + 7) / 8U);
10304 return 2 * ((GET_MODE_SIZE (mode) + 3) / 4U);
10307 static rtx emit_load_ptr (rtx, rtx);
10310 emit_load_ptr (rtx reg, rtx addr)
10312 rtx mem = gen_const_mem (ptr_mode, addr);
10314 if (Pmode != ptr_mode)
10315 mem = gen_rtx_SIGN_EXTEND (Pmode, mem);
10316 return emit_move_insn (reg, mem);
10320 sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
10321 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10324 CUMULATIVE_ARGS cum;
10325 int structure_value_byref = 0;
10326 rtx this, this_value, sibcall, insns, funexp;
10327 tree funtype = TREE_TYPE (function);
10328 int simple_add = CONST_OK_FOR_ADD (delta);
10330 rtx scratch0, scratch1, scratch2;
10333 reload_completed = 1;
10334 epilogue_completed = 1;
10335 current_function_uses_only_leaf_regs = 1;
10337 emit_note (NOTE_INSN_PROLOGUE_END);
10339 /* Find the "this" pointer. We have such a wide range of ABIs for the
10340 SH that it's best to do this completely machine independently.
10341 "this" is passed as first argument, unless a structure return pointer
10342 comes first, in which case "this" comes second. */
10343 INIT_CUMULATIVE_ARGS (cum, funtype, NULL_RTX, 0, 1);
10344 #ifndef PCC_STATIC_STRUCT_RETURN
10345 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
10346 structure_value_byref = 1;
10347 #endif /* not PCC_STATIC_STRUCT_RETURN */
10348 if (structure_value_byref && sh_struct_value_rtx (function, 0) == 0)
10350 tree ptype = build_pointer_type (TREE_TYPE (funtype));
10352 FUNCTION_ARG_ADVANCE (cum, Pmode, ptype, 1);
10354 this = FUNCTION_ARG (cum, Pmode, ptr_type_node, 1);
10356 /* For SHcompact, we only have r0 for a scratch register: r1 is the
10357 static chain pointer (even if you can't have nested virtual functions
10358 right now, someone might implement them sometime), and the rest of the
10359 registers are used for argument passing, are callee-saved, or reserved. */
10360 /* We need to check call_used_regs / fixed_regs in case -fcall_saved-reg /
10361 -ffixed-reg has been used. */
10362 if (! call_used_regs[0] || fixed_regs[0])
10363 error ("r0 needs to be available as a call-clobbered register");
10364 scratch0 = scratch1 = scratch2 = gen_rtx_REG (Pmode, 0);
10367 if (call_used_regs[1] && ! fixed_regs[1])
10368 scratch1 = gen_rtx_REG (ptr_mode, 1);
10369 /* N.B., if not TARGET_HITACHI, register 2 is used to pass the pointer
10370 pointing where to return struct values. */
10371 if (call_used_regs[3] && ! fixed_regs[3])
10372 scratch2 = gen_rtx_REG (Pmode, 3);
10374 else if (TARGET_SHMEDIA)
10376 for (i = FIRST_GENERAL_REG; i <= LAST_GENERAL_REG; i++)
10377 if (i != REGNO (scratch0) &&
10378 call_used_regs[i] && ! fixed_regs[i] && ! FUNCTION_ARG_REGNO_P (i))
10380 scratch1 = gen_rtx_REG (ptr_mode, i);
10383 if (scratch1 == scratch0)
10384 error ("Need a second call-clobbered general purpose register");
10385 for (i = FIRST_TARGET_REG; i <= LAST_TARGET_REG; i++)
10386 if (call_used_regs[i] && ! fixed_regs[i])
10388 scratch2 = gen_rtx_REG (Pmode, i);
10391 if (scratch2 == scratch0)
10392 error ("Need a call-clobbered target register");
10395 this_value = plus_constant (this, delta);
10397 && (simple_add || scratch0 != scratch1)
10398 && strict_memory_address_p (ptr_mode, this_value))
10400 emit_load_ptr (scratch0, this_value);
10405 ; /* Do nothing. */
10406 else if (simple_add)
10407 emit_move_insn (this, this_value);
10410 emit_move_insn (scratch1, GEN_INT (delta));
10411 emit_insn (gen_add2_insn (this, scratch1));
10419 emit_load_ptr (scratch0, this);
10421 offset_addr = plus_constant (scratch0, vcall_offset);
10422 if (strict_memory_address_p (ptr_mode, offset_addr))
10423 ; /* Do nothing. */
10424 else if (! TARGET_SH5 && scratch0 != scratch1)
10426 /* scratch0 != scratch1, and we have indexed loads. Get better
10427 schedule by loading the offset into r1 and using an indexed
10428 load - then the load of r1 can issue before the load from
10429 (this + delta) finishes. */
10430 emit_move_insn (scratch1, GEN_INT (vcall_offset));
10431 offset_addr = gen_rtx_PLUS (Pmode, scratch0, scratch1);
10433 else if (CONST_OK_FOR_ADD (vcall_offset))
10435 emit_insn (gen_add2_insn (scratch0, GEN_INT (vcall_offset)));
10436 offset_addr = scratch0;
10438 else if (scratch0 != scratch1)
10440 emit_move_insn (scratch1, GEN_INT (vcall_offset));
10441 emit_insn (gen_add2_insn (scratch0, scratch1));
10442 offset_addr = scratch0;
10445 gcc_unreachable (); /* FIXME */
10446 emit_load_ptr (scratch0, offset_addr);
10448 if (Pmode != ptr_mode)
10449 scratch0 = gen_rtx_TRUNCATE (ptr_mode, scratch0);
10450 emit_insn (gen_add2_insn (this, scratch0));
10453 /* Generate a tail call to the target function. */
10454 if (! TREE_USED (function))
10456 assemble_external (function);
10457 TREE_USED (function) = 1;
10459 funexp = XEXP (DECL_RTL (function), 0);
10460 /* If the function is overridden, so is the thunk, hence we don't
10461 need GOT addressing even if this is a public symbol. */
10463 if (TARGET_SH1 && ! flag_weak)
10464 sibcall = gen_sibcalli_thunk (funexp, const0_rtx);
10467 if (TARGET_SH2 && flag_pic)
10469 sibcall = gen_sibcall_pcrel (funexp, const0_rtx);
10470 XEXP (XVECEXP (sibcall, 0, 2), 0) = scratch2;
10474 if (TARGET_SHMEDIA && flag_pic)
10476 funexp = gen_sym2PIC (funexp);
10477 PUT_MODE (funexp, Pmode);
10479 emit_move_insn (scratch2, funexp);
10480 funexp = gen_rtx_MEM (FUNCTION_MODE, scratch2);
10481 sibcall = gen_sibcall (funexp, const0_rtx, NULL_RTX);
10483 sibcall = emit_call_insn (sibcall);
10484 SIBLING_CALL_P (sibcall) = 1;
10485 use_reg (&CALL_INSN_FUNCTION_USAGE (sibcall), this);
10488 /* Run just enough of rest_of_compilation to do scheduling and get
10489 the insns emitted. Note that use_thunk calls
10490 assemble_start_function and assemble_end_function. */
10492 insn_locators_alloc ();
10493 insns = get_insns ();
10498 /* Initialize the bitmap obstacks. */
10499 bitmap_obstack_initialize (NULL);
10500 bitmap_obstack_initialize (®_obstack);
10503 rtl_register_cfg_hooks ();
10504 init_rtl_bb_info (ENTRY_BLOCK_PTR);
10505 init_rtl_bb_info (EXIT_BLOCK_PTR);
10506 ENTRY_BLOCK_PTR->flags |= BB_RTL;
10507 EXIT_BLOCK_PTR->flags |= BB_RTL;
10508 find_basic_blocks (insns);
10510 if (flag_schedule_insns_after_reload)
10512 life_analysis (PROP_FINAL);
10514 split_all_insns (1);
10518 /* We must split jmp insn in PIC case. */
10520 split_all_insns_noflow ();
10527 split_all_insns_noflow ();
10533 if (optimize > 0 && flag_delayed_branch)
10534 dbr_schedule (insns);
10536 shorten_branches (insns);
10537 final_start_function (insns, file, 1);
10538 final (insns, file, 1);
10539 final_end_function ();
10540 free_after_compilation (cfun);
10542 reload_completed = 0;
10543 epilogue_completed = 0;
10547 function_symbol (rtx target, const char *name, enum sh_function_kind kind)
10551 /* If this is not an ordinary function, the name usually comes from a
10552 string literal or an sprintf buffer. Make sure we use the same
10553 string consistently, so that cse will be able to unify address loads. */
10554 if (kind != FUNCTION_ORDINARY)
10555 name = IDENTIFIER_POINTER (get_identifier (name));
10556 sym = gen_rtx_SYMBOL_REF (Pmode, name);
10557 SYMBOL_REF_FLAGS (sym) = SYMBOL_FLAG_FUNCTION;
10561 case FUNCTION_ORDINARY:
10565 rtx reg = target ? target : gen_reg_rtx (Pmode);
10567 emit_insn (gen_symGOT2reg (reg, sym));
10573 /* ??? To allow cse to work, we use GOTOFF relocations.
10574 we could add combiner patterns to transform this into
10575 straight pc-relative calls with sym2PIC / bsrf when
10576 label load and function call are still 1:1 and in the
10577 same basic block during combine. */
10578 rtx reg = target ? target : gen_reg_rtx (Pmode);
10580 emit_insn (gen_symGOTOFF2reg (reg, sym));
10585 if (target && sym != target)
10587 emit_move_insn (target, sym);
10593 /* Find the number of a general purpose register in S. */
10595 scavenge_reg (HARD_REG_SET *s)
10598 for (r = FIRST_GENERAL_REG; r <= LAST_GENERAL_REG; r++)
10599 if (TEST_HARD_REG_BIT (*s, r))
10605 sh_get_pr_initial_val (void)
10609 /* ??? Unfortunately, get_hard_reg_initial_val doesn't always work for the
10610 PR register on SHcompact, because it might be clobbered by the prologue.
10611 We check first if that is known to be the case. */
10612 if (TARGET_SHCOMPACT
10613 && ((crtl->args.info.call_cookie
10614 & ~ CALL_COOKIE_RET_TRAMP (1))
10615 || crtl->saves_all_registers))
10616 return gen_frame_mem (SImode, return_address_pointer_rtx);
10618 /* If we haven't finished rtl generation, there might be a nonlocal label
10619 that we haven't seen yet.
10620 ??? get_hard_reg_initial_val fails if it is called after register
10621 allocation has started, unless it has been called before for the
10622 same register. And even then, we end in trouble if we didn't use
10623 the register in the same basic block before. So call
10624 get_hard_reg_initial_val now and wrap it in an unspec if we might
10625 need to replace it. */
10626 /* ??? We also must do this for TARGET_SH1 in general, because otherwise
10627 combine can put the pseudo returned by get_hard_reg_initial_val into
10628 instructions that need a general purpose registers, which will fail to
10629 be recognized when the pseudo becomes allocated to PR. */
10631 = get_hard_reg_initial_val (Pmode, TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
10633 return gen_rtx_UNSPEC (SImode, gen_rtvec (1, val), UNSPEC_RA);
10638 sh_expand_t_scc (enum rtx_code code, rtx target)
10640 rtx result = target;
10643 if (GET_CODE (sh_compare_op0) != REG || REGNO (sh_compare_op0) != T_REG
10644 || GET_CODE (sh_compare_op1) != CONST_INT)
10646 if (GET_CODE (result) != REG)
10647 result = gen_reg_rtx (SImode);
10648 val = INTVAL (sh_compare_op1);
10649 if ((code == EQ && val == 1) || (code == NE && val == 0))
10650 emit_insn (gen_movt (result));
10651 else if (TARGET_SH2A && ((code == EQ && val == 0)
10652 || (code == NE && val == 1)))
10653 emit_insn (gen_movrt (result));
10654 else if ((code == EQ && val == 0) || (code == NE && val == 1))
10656 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
10657 emit_insn (gen_subc (result, result, result));
10658 emit_insn (gen_addsi3 (result, result, const1_rtx));
10660 else if (code == EQ || code == NE)
10661 emit_insn (gen_move_insn (result, GEN_INT (code == NE)));
10664 if (result != target)
10665 emit_move_insn (target, result);
10669 /* INSN is an sfunc; return the rtx that describes the address used. */
10671 extract_sfunc_addr (rtx insn)
10673 rtx pattern, part = NULL_RTX;
10676 pattern = PATTERN (insn);
10677 len = XVECLEN (pattern, 0);
10678 for (i = 0; i < len; i++)
10680 part = XVECEXP (pattern, 0, i);
10681 if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == Pmode
10682 && GENERAL_REGISTER_P (true_regnum (XEXP (part, 0))))
10683 return XEXP (part, 0);
10685 gcc_assert (GET_CODE (XVECEXP (pattern, 0, 0)) == UNSPEC_VOLATILE);
10686 return XVECEXP (XVECEXP (pattern, 0, 0), 0, 1);
10689 /* Verify that the register in use_sfunc_addr still agrees with the address
10690 used in the sfunc. This prevents fill_slots_from_thread from changing
10692 INSN is the use_sfunc_addr instruction, and REG is the register it
10695 check_use_sfunc_addr (rtx insn, rtx reg)
10697 /* Search for the sfunc. It should really come right after INSN. */
10698 while ((insn = NEXT_INSN (insn)))
10700 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
10702 if (! INSN_P (insn))
10705 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
10706 insn = XVECEXP (PATTERN (insn), 0, 0);
10707 if (GET_CODE (PATTERN (insn)) != PARALLEL
10708 || get_attr_type (insn) != TYPE_SFUNC)
10710 return rtx_equal_p (extract_sfunc_addr (insn), reg);
10712 gcc_unreachable ();
10715 /* This function returns a constant rtx that represents pi / 2**15 in
10716 SFmode. it's used to scale SFmode angles, in radians, to a
10717 fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
10718 maps to 0x10000). */
10720 static GTY(()) rtx sh_fsca_sf2int_rtx;
10723 sh_fsca_sf2int (void)
10725 if (! sh_fsca_sf2int_rtx)
10727 REAL_VALUE_TYPE rv;
10729 real_from_string (&rv, "10430.378350470453");
10730 sh_fsca_sf2int_rtx = const_double_from_real_value (rv, SFmode);
10733 return sh_fsca_sf2int_rtx;
10736 /* This function returns a constant rtx that represents pi / 2**15 in
10737 DFmode. it's used to scale DFmode angles, in radians, to a
10738 fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
10739 maps to 0x10000). */
10741 static GTY(()) rtx sh_fsca_df2int_rtx;
10744 sh_fsca_df2int (void)
10746 if (! sh_fsca_df2int_rtx)
10748 REAL_VALUE_TYPE rv;
10750 real_from_string (&rv, "10430.378350470453");
10751 sh_fsca_df2int_rtx = const_double_from_real_value (rv, DFmode);
10754 return sh_fsca_df2int_rtx;
10757 /* This function returns a constant rtx that represents 2**15 / pi in
10758 SFmode. it's used to scale a fixed-point signed 16.16-bit fraction
10759 of a full circle back to a SFmode value, i.e., 0x10000 maps to
10762 static GTY(()) rtx sh_fsca_int2sf_rtx;
10765 sh_fsca_int2sf (void)
10767 if (! sh_fsca_int2sf_rtx)
10769 REAL_VALUE_TYPE rv;
10771 real_from_string (&rv, "9.587379924285257e-5");
10772 sh_fsca_int2sf_rtx = const_double_from_real_value (rv, SFmode);
10775 return sh_fsca_int2sf_rtx;
10778 /* Initialize the CUMULATIVE_ARGS structure. */
10781 sh_init_cumulative_args (CUMULATIVE_ARGS * pcum,
10783 rtx libname ATTRIBUTE_UNUSED,
10785 signed int n_named_args,
10786 enum machine_mode mode)
10788 pcum->arg_count [(int) SH_ARG_FLOAT] = 0;
10789 pcum->free_single_fp_reg = 0;
10790 pcum->stack_regs = 0;
10791 pcum->byref_regs = 0;
10793 pcum->outgoing = (n_named_args == -1) ? 0 : 1;
10795 /* XXX - Should we check TARGET_HITACHI here ??? */
10796 pcum->renesas_abi = sh_attr_renesas_p (fntype) ? 1 : 0;
10800 pcum->force_mem = ((TARGET_HITACHI || pcum->renesas_abi)
10801 && aggregate_value_p (TREE_TYPE (fntype), fndecl));
10802 pcum->prototype_p = TYPE_ARG_TYPES (fntype) ? TRUE : FALSE;
10803 pcum->arg_count [(int) SH_ARG_INT]
10804 = TARGET_SH5 && aggregate_value_p (TREE_TYPE (fntype), fndecl);
10807 = CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
10808 && pcum->arg_count [(int) SH_ARG_INT] == 0
10809 && (TYPE_MODE (TREE_TYPE (fntype)) == BLKmode
10810 ? int_size_in_bytes (TREE_TYPE (fntype))
10811 : GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (fntype)))) > 4
10812 && (BASE_RETURN_VALUE_REG (TYPE_MODE (TREE_TYPE (fntype)))
10813 == FIRST_RET_REG));
10817 pcum->arg_count [(int) SH_ARG_INT] = 0;
10818 pcum->prototype_p = FALSE;
10819 if (mode != VOIDmode)
10821 pcum->call_cookie =
10822 CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
10823 && GET_MODE_SIZE (mode) > 4
10824 && BASE_RETURN_VALUE_REG (mode) == FIRST_RET_REG);
10826 /* If the default ABI is the Renesas ABI then all library
10827 calls must assume that the library will be using the
10828 Renesas ABI. So if the function would return its result
10829 in memory then we must force the address of this memory
10830 block onto the stack. Ideally we would like to call
10831 targetm.calls.return_in_memory() here but we do not have
10832 the TYPE or the FNDECL available so we synthesize the
10833 contents of that function as best we can. */
10835 (TARGET_DEFAULT & MASK_HITACHI)
10836 && (mode == BLKmode
10837 || (GET_MODE_SIZE (mode) > 4
10838 && !(mode == DFmode
10839 && TARGET_FPU_DOUBLE)));
10843 pcum->call_cookie = 0;
10844 pcum->force_mem = FALSE;
10849 /* Replace any occurrence of FROM(n) in X with TO(n). The function does
10850 not enter into CONST_DOUBLE for the replace.
10852 Note that copying is not done so X must not be shared unless all copies
10853 are to be modified.
10855 This is like replace_rtx, except that we operate on N_REPLACEMENTS
10856 replacements simultaneously - FROM(n) is replacements[n*2] and to(n) is
10857 replacements[n*2+1] - and that we take mode changes into account.
10859 If a replacement is ambiguous, return NULL_RTX.
10861 If MODIFY is zero, don't modify any rtl in place,
10862 just return zero or nonzero for failure / success. */
10865 replace_n_hard_rtx (rtx x, rtx *replacements, int n_replacements, int modify)
10870 /* The following prevents loops occurrence when we change MEM in
10871 CONST_DOUBLE onto the same CONST_DOUBLE. */
10872 if (x != 0 && GET_CODE (x) == CONST_DOUBLE)
10875 for (i = n_replacements - 1; i >= 0 ; i--)
10876 if (x == replacements[i*2] && GET_MODE (x) == GET_MODE (replacements[i*2+1]))
10877 return replacements[i*2+1];
10879 /* Allow this function to make replacements in EXPR_LISTs. */
10883 if (GET_CODE (x) == SUBREG)
10885 rtx new = replace_n_hard_rtx (SUBREG_REG (x), replacements,
10886 n_replacements, modify);
10888 if (GET_CODE (new) == CONST_INT)
10890 x = simplify_subreg (GET_MODE (x), new,
10891 GET_MODE (SUBREG_REG (x)),
10897 SUBREG_REG (x) = new;
10901 else if (GET_CODE (x) == REG)
10903 unsigned regno = REGNO (x);
10904 unsigned nregs = (regno < FIRST_PSEUDO_REGISTER
10905 ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
10906 rtx result = NULL_RTX;
10908 for (i = n_replacements - 1; i >= 0; i--)
10910 rtx from = replacements[i*2];
10911 rtx to = replacements[i*2+1];
10912 unsigned from_regno, from_nregs, to_regno, new_regno;
10914 if (GET_CODE (from) != REG)
10916 from_regno = REGNO (from);
10917 from_nregs = (from_regno < FIRST_PSEUDO_REGISTER
10918 ? HARD_REGNO_NREGS (from_regno, GET_MODE (from)) : 1);
10919 if (regno < from_regno + from_nregs && regno + nregs > from_regno)
10921 if (regno < from_regno
10922 || regno + nregs > from_regno + nregs
10923 || GET_CODE (to) != REG
10926 to_regno = REGNO (to);
10927 if (to_regno < FIRST_PSEUDO_REGISTER)
10929 new_regno = regno + to_regno - from_regno;
10930 if ((unsigned) HARD_REGNO_NREGS (new_regno, GET_MODE (x))
10933 result = gen_rtx_REG (GET_MODE (x), new_regno);
10935 else if (GET_MODE (x) <= GET_MODE (to))
10936 result = gen_lowpart_common (GET_MODE (x), to);
10938 result = gen_lowpart_SUBREG (GET_MODE (x), to);
10941 return result ? result : x;
10943 else if (GET_CODE (x) == ZERO_EXTEND)
10945 rtx new = replace_n_hard_rtx (XEXP (x, 0), replacements,
10946 n_replacements, modify);
10948 if (GET_CODE (new) == CONST_INT)
10950 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
10951 new, GET_MODE (XEXP (x, 0)));
10961 fmt = GET_RTX_FORMAT (GET_CODE (x));
10962 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
10968 new = replace_n_hard_rtx (XEXP (x, i), replacements,
10969 n_replacements, modify);
10975 else if (fmt[i] == 'E')
10976 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
10978 new = replace_n_hard_rtx (XVECEXP (x, i, j), replacements,
10979 n_replacements, modify);
10983 XVECEXP (x, i, j) = new;
10991 sh_gen_truncate (enum machine_mode mode, rtx x, int need_sign_ext)
10993 enum rtx_code code = TRUNCATE;
10995 if (GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
10997 rtx inner = XEXP (x, 0);
10998 enum machine_mode inner_mode = GET_MODE (inner);
11000 if (inner_mode == mode)
11002 else if (GET_MODE_SIZE (inner_mode) >= GET_MODE_SIZE (mode))
11004 else if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode)
11005 && (! need_sign_ext || GET_CODE (x) == SIGN_EXTEND))
11007 code = GET_CODE (x);
11011 return gen_rtx_fmt_e (code, mode, x);
11014 /* called via for_each_rtx after reload, to clean up truncates of
11015 registers that span multiple actual hard registers. */
11017 shmedia_cleanup_truncate (rtx *p, void *n_changes)
11021 if (GET_CODE (x) != TRUNCATE)
11024 if (GET_MODE_SIZE (GET_MODE (reg)) > 8 && GET_CODE (reg) == REG)
11026 enum machine_mode reg_mode = GET_MODE (reg);
11027 XEXP (x, 0) = simplify_subreg (DImode, reg, reg_mode,
11028 subreg_lowpart_offset (DImode, reg_mode));
11029 *(int*) n_changes += 1;
11035 /* Load and store depend on the highpart of the address. However,
11036 set_attr_alternative does not give well-defined results before reload,
11037 so we must look at the rtl ourselves to see if any of the feeding
11038 registers is used in a memref. */
11040 /* Called by sh_contains_memref_p via for_each_rtx. */
11042 sh_contains_memref_p_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
11044 return (GET_CODE (*loc) == MEM);
11047 /* Return nonzero iff INSN contains a MEM. */
11049 sh_contains_memref_p (rtx insn)
11051 return for_each_rtx (&PATTERN (insn), &sh_contains_memref_p_1, NULL);
11054 /* Return nonzero iff INSN loads a banked register. */
11056 sh_loads_bankedreg_p (rtx insn)
11058 if (GET_CODE (PATTERN (insn)) == SET)
11060 rtx op = SET_DEST (PATTERN(insn));
11061 if (REG_P (op) && BANKED_REGISTER_P (REGNO (op)))
11068 /* FNADDR is the MEM expression from a call expander. Return an address
11069 to use in an SHmedia insn pattern. */
11071 shmedia_prepare_call_address (rtx fnaddr, int is_sibcall)
11075 fnaddr = XEXP (fnaddr, 0);
11076 is_sym = GET_CODE (fnaddr) == SYMBOL_REF;
11077 if (flag_pic && is_sym)
11079 if (! SYMBOL_REF_LOCAL_P (fnaddr))
11081 rtx reg = gen_reg_rtx (Pmode);
11083 /* We must not use GOTPLT for sibcalls, because PIC_REG
11084 must be restored before the PLT code gets to run. */
11086 emit_insn (gen_symGOT2reg (reg, fnaddr));
11088 emit_insn (gen_symGOTPLT2reg (reg, fnaddr));
11093 fnaddr = gen_sym2PIC (fnaddr);
11094 PUT_MODE (fnaddr, Pmode);
11097 /* If ptabs might trap, make this visible to the rest of the compiler.
11098 We generally assume that symbols pertain to valid locations, but
11099 it is possible to generate invalid symbols with asm or linker tricks.
11100 In a list of functions where each returns its successor, an invalid
11101 symbol might denote an empty list. */
11102 if (!TARGET_PT_FIXED
11103 && (!is_sym || TARGET_INVALID_SYMBOLS)
11104 && (!REG_P (fnaddr) || ! TARGET_REGISTER_P (REGNO (fnaddr))))
11106 rtx tr = gen_reg_rtx (PDImode);
11108 emit_insn (gen_ptabs (tr, fnaddr));
11111 else if (! target_reg_operand (fnaddr, Pmode))
11112 fnaddr = copy_to_mode_reg (Pmode, fnaddr);
11117 sh_secondary_reload (bool in_p, rtx x, enum reg_class class,
11118 enum machine_mode mode, secondary_reload_info *sri)
11122 if (REGCLASS_HAS_FP_REG (class)
11123 && ! TARGET_SHMEDIA
11124 && immediate_operand ((x), mode)
11125 && ! ((fp_zero_operand (x) || fp_one_operand (x))
11126 && mode == SFmode && fldi_ok ()))
11130 sri->icode = CODE_FOR_reload_insf__frn;
11133 sri->icode = CODE_FOR_reload_indf__frn;
11136 /* ??? If we knew that we are in the appropriate mode -
11137 single precision - we could use a reload pattern directly. */
11142 if (class == FPUL_REGS
11143 && ((GET_CODE (x) == REG
11144 && (REGNO (x) == MACL_REG || REGNO (x) == MACH_REG
11145 || REGNO (x) == T_REG))
11146 || GET_CODE (x) == PLUS))
11147 return GENERAL_REGS;
11148 if (class == FPUL_REGS && immediate_operand (x, mode))
11150 if (satisfies_constraint_I08 (x) || fp_zero_operand (x))
11151 return GENERAL_REGS;
11152 else if (mode == SFmode)
11154 sri->icode = CODE_FOR_reload_insi__i_fpul;
11157 if (class == FPSCR_REGS
11158 && ((GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
11159 || (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == PLUS)))
11160 return GENERAL_REGS;
11161 if (REGCLASS_HAS_FP_REG (class)
11163 && immediate_operand (x, mode)
11164 && x != CONST0_RTX (GET_MODE (x))
11165 && GET_MODE (x) != V4SFmode)
11166 return GENERAL_REGS;
11167 if ((mode == QImode || mode == HImode)
11168 && TARGET_SHMEDIA && inqhi_operand (x, mode))
11170 sri->icode = ((mode == QImode)
11171 ? CODE_FOR_reload_inqi : CODE_FOR_reload_inhi);
11174 if (TARGET_SHMEDIA && class == GENERAL_REGS
11175 && (GET_CODE (x) == LABEL_REF || PIC_DIRECT_ADDR_P (x)))
11176 return TARGET_REGS;
11177 } /* end of input-only processing. */
11179 if (((REGCLASS_HAS_FP_REG (class)
11180 && (GET_CODE (x) == REG
11181 && (GENERAL_OR_AP_REGISTER_P (REGNO (x))
11182 || (FP_REGISTER_P (REGNO (x)) && mode == SImode
11183 && TARGET_FMOVD))))
11184 || (REGCLASS_HAS_GENERAL_REG (class)
11185 && GET_CODE (x) == REG
11186 && FP_REGISTER_P (REGNO (x))))
11187 && ! TARGET_SHMEDIA
11188 && (mode == SFmode || mode == SImode))
11190 if ((class == FPUL_REGS
11191 || (REGCLASS_HAS_FP_REG (class)
11192 && ! TARGET_SHMEDIA && mode == SImode))
11193 && (GET_CODE (x) == MEM
11194 || (GET_CODE (x) == REG
11195 && (REGNO (x) >= FIRST_PSEUDO_REGISTER
11196 || REGNO (x) == T_REG
11197 || system_reg_operand (x, VOIDmode)))))
11199 if (class == FPUL_REGS)
11200 return GENERAL_REGS;
11203 if ((class == TARGET_REGS
11204 || (TARGET_SHMEDIA && class == SIBCALL_REGS))
11205 && !satisfies_constraint_Csy (x)
11206 && (GET_CODE (x) != REG || ! GENERAL_REGISTER_P (REGNO (x))))
11207 return GENERAL_REGS;
11208 if ((class == MAC_REGS || class == PR_REGS)
11209 && GET_CODE (x) == REG && ! GENERAL_REGISTER_P (REGNO (x))
11210 && class != REGNO_REG_CLASS (REGNO (x)))
11211 return GENERAL_REGS;
11212 if (class != GENERAL_REGS && GET_CODE (x) == REG
11213 && TARGET_REGISTER_P (REGNO (x)))
11214 return GENERAL_REGS;
11218 enum sh_divide_strategy_e sh_div_strategy = SH_DIV_STRATEGY_DEFAULT;