1 /* Output routines for GCC for Renesas / SuperH SH.
2 Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
4 Contributed by Steve Chamberlain (sac@cygnus.com).
5 Improved by Jim Wilson (wilson@cygnus.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
27 #include "insn-config.h"
35 #include "hard-reg-set.h"
37 #include "insn-attr.h"
41 #include "integrate.h"
45 #include "target-def.h"
47 #include "langhooks.h"
48 #include "basic-block.h"
50 #include "cfglayout.h"
52 #include "sched-int.h"
54 #include "tree-gimple.h"
56 #include "alloc-pool.h"
57 #include "tm-constrs.h"
60 int code_for_indirect_jump_scratch = CODE_FOR_indirect_jump_scratch;
62 #define MSW (TARGET_LITTLE_ENDIAN ? 1 : 0)
63 #define LSW (TARGET_LITTLE_ENDIAN ? 0 : 1)
65 /* These are some macros to abstract register modes. */
66 #define CONST_OK_FOR_ADD(size) \
67 (TARGET_SHMEDIA ? CONST_OK_FOR_I10 (size) : CONST_OK_FOR_I08 (size))
68 #define GEN_MOV (*(TARGET_SHMEDIA64 ? gen_movdi : gen_movsi))
69 #define GEN_ADD3 (*(TARGET_SHMEDIA64 ? gen_adddi3 : gen_addsi3))
70 #define GEN_SUB3 (*(TARGET_SHMEDIA64 ? gen_subdi3 : gen_subsi3))
72 /* Used to simplify the logic below. Find the attributes wherever
74 #define SH_ATTRIBUTES(decl) \
75 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
76 : DECL_ATTRIBUTES (decl) \
77 ? (DECL_ATTRIBUTES (decl)) \
78 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
80 /* Set to 1 by expand_prologue() when the function is an interrupt handler. */
81 int current_function_interrupt;
83 tree sh_deferred_function_attributes;
84 tree *sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
86 /* Global variables for machine-dependent things. */
88 /* Which cpu are we scheduling for. */
89 enum processor_type sh_cpu;
91 /* Definitions used in ready queue reordering for first scheduling pass. */
93 /* Reg weights arrays for modes SFmode and SImode, indexed by insn LUID. */
94 static short *regmode_weight[2];
96 /* Total SFmode and SImode weights of scheduled insns. */
97 static int curr_regmode_pressure[2];
99 /* Number of r0 life regions. */
100 static int r0_life_regions;
102 /* If true, skip cycles for Q -> R movement. */
103 static int skip_cycles = 0;
105 /* Cached value of can_issue_more. This is cached in sh_variable_issue hook
106 and returned from sh_reorder2. */
107 static short cached_can_issue_more;
109 /* Saved operands from the last compare to use when we generate an scc
115 /* Provides the class number of the smallest class containing
118 enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
120 R0_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
121 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
122 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
123 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
124 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
125 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
126 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
127 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
128 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
129 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
130 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
131 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
132 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
133 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
134 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
135 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
136 FP0_REGS,FP_REGS, FP_REGS, FP_REGS,
137 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
138 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
139 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
140 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
141 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
142 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
143 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
144 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
145 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
146 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
147 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
148 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
149 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
150 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
151 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
152 TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
153 TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
154 DF_REGS, DF_REGS, DF_REGS, DF_REGS,
155 DF_REGS, DF_REGS, DF_REGS, DF_REGS,
156 NO_REGS, GENERAL_REGS, PR_REGS, T_REGS,
157 MAC_REGS, MAC_REGS, FPUL_REGS, FPSCR_REGS,
158 GENERAL_REGS, GENERAL_REGS,
161 char sh_register_names[FIRST_PSEUDO_REGISTER] \
162 [MAX_REGISTER_NAME_LENGTH + 1] = SH_REGISTER_NAMES_INITIALIZER;
164 char sh_additional_register_names[ADDREGNAMES_SIZE] \
165 [MAX_ADDITIONAL_REGISTER_NAME_LENGTH + 1]
166 = SH_ADDITIONAL_REGISTER_NAMES_INITIALIZER;
168 int assembler_dialect;
170 static bool shmedia_space_reserved_for_target_registers;
172 static bool sh_handle_option (size_t, const char *, int);
173 static void split_branches (rtx);
174 static int branch_dest (rtx);
175 static void force_into (rtx, rtx);
176 static void print_slot (rtx);
177 static rtx add_constant (rtx, enum machine_mode, rtx);
178 static void dump_table (rtx, rtx);
179 static int hi_const (rtx);
180 static int broken_move (rtx);
181 static int mova_p (rtx);
182 static rtx find_barrier (int, rtx, rtx);
183 static int noncall_uses_reg (rtx, rtx, rtx *);
184 static rtx gen_block_redirect (rtx, int, int);
185 static void sh_reorg (void);
186 static void output_stack_adjust (int, rtx, int, HARD_REG_SET *);
187 static rtx frame_insn (rtx);
188 static rtx push (int);
189 static void pop (int);
190 static void push_regs (HARD_REG_SET *, int);
191 static int calc_live_regs (HARD_REG_SET *);
192 static HOST_WIDE_INT rounded_frame_size (int);
193 static rtx mark_constant_pool_use (rtx);
194 const struct attribute_spec sh_attribute_table[];
195 static tree sh_handle_interrupt_handler_attribute (tree *, tree, tree, int, bool *);
196 static tree sh_handle_resbank_handler_attribute (tree *, tree,
198 static tree sh2a_handle_function_vector_handler_attribute (tree *, tree,
200 static tree sh_handle_sp_switch_attribute (tree *, tree, tree, int, bool *);
201 static tree sh_handle_trap_exit_attribute (tree *, tree, tree, int, bool *);
202 static tree sh_handle_renesas_attribute (tree *, tree, tree, int, bool *);
203 static void sh_output_function_epilogue (FILE *, HOST_WIDE_INT);
204 static void sh_insert_attributes (tree, tree *);
205 static const char *sh_check_pch_target_flags (int);
206 static int sh_adjust_cost (rtx, rtx, rtx, int);
207 static int sh_issue_rate (void);
208 static int sh_dfa_new_cycle (FILE *, int, rtx, int, int, int *sort_p);
209 static short find_set_regmode_weight (rtx, enum machine_mode);
210 static short find_insn_regmode_weight (rtx, enum machine_mode);
211 static void find_regmode_weight (basic_block, enum machine_mode);
212 static int find_r0_life_regions (basic_block);
213 static void sh_md_init_global (FILE *, int, int);
214 static void sh_md_finish_global (FILE *, int);
215 static int rank_for_reorder (const void *, const void *);
216 static void swap_reorder (rtx *, int);
217 static void ready_reorder (rtx *, int);
218 static short high_pressure (enum machine_mode);
219 static int sh_reorder (FILE *, int, rtx *, int *, int);
220 static int sh_reorder2 (FILE *, int, rtx *, int *, int);
221 static void sh_md_init (FILE *, int, int);
222 static int sh_variable_issue (FILE *, int, rtx, int);
224 static bool sh_function_ok_for_sibcall (tree, tree);
226 static bool sh_cannot_modify_jumps_p (void);
227 static int sh_target_reg_class (void);
228 static bool sh_optimize_target_register_callee_saved (bool);
229 static bool sh_ms_bitfield_layout_p (const_tree);
231 static void sh_init_builtins (void);
232 static void sh_media_init_builtins (void);
233 static rtx sh_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
234 static void sh_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
235 static void sh_file_start (void);
236 static int flow_dependent_p (rtx, rtx);
237 static void flow_dependent_p_1 (rtx, const_rtx, void *);
238 static int shiftcosts (rtx);
239 static int andcosts (rtx);
240 static int addsubcosts (rtx);
241 static int multcosts (rtx);
242 static bool unspec_caller_rtx_p (rtx);
243 static bool sh_cannot_copy_insn_p (rtx);
244 static bool sh_rtx_costs (rtx, int, int, int *);
245 static int sh_address_cost (rtx);
246 static int sh_pr_n_sets (void);
247 static rtx sh_allocate_initial_value (rtx);
248 static int shmedia_target_regs_stack_space (HARD_REG_SET *);
249 static int shmedia_reserve_space_for_target_registers_p (int, HARD_REG_SET *);
250 static int shmedia_target_regs_stack_adjust (HARD_REG_SET *);
251 static int scavenge_reg (HARD_REG_SET *s);
252 struct save_schedule_s;
253 static struct save_entry_s *sh5_schedule_saves (HARD_REG_SET *,
254 struct save_schedule_s *, int);
256 static rtx sh_struct_value_rtx (tree, int);
257 static bool sh_return_in_memory (const_tree, const_tree);
258 static rtx sh_builtin_saveregs (void);
259 static void sh_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
260 static bool sh_strict_argument_naming (CUMULATIVE_ARGS *);
261 static bool sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *);
262 static tree sh_build_builtin_va_list (void);
263 static tree sh_canonical_va_list_type (tree);
264 static void sh_va_start (tree, rtx);
265 static tree sh_gimplify_va_arg_expr (tree, tree, tree *, tree *);
266 static bool sh_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
268 static bool sh_callee_copies (CUMULATIVE_ARGS *, enum machine_mode,
270 static int sh_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
272 static bool sh_scalar_mode_supported_p (enum machine_mode);
273 static int sh_dwarf_calling_convention (const_tree);
274 static void sh_encode_section_info (tree, rtx, int);
275 static int sh2a_function_vector_p (tree);
278 /* Initialize the GCC target structure. */
279 #undef TARGET_ATTRIBUTE_TABLE
280 #define TARGET_ATTRIBUTE_TABLE sh_attribute_table
282 /* The next two are used for debug info when compiling with -gdwarf. */
283 #undef TARGET_ASM_UNALIGNED_HI_OP
284 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uaword\t"
285 #undef TARGET_ASM_UNALIGNED_SI_OP
286 #define TARGET_ASM_UNALIGNED_SI_OP "\t.ualong\t"
288 /* These are NULLed out on non-SH5 in OVERRIDE_OPTIONS. */
289 #undef TARGET_ASM_UNALIGNED_DI_OP
290 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaquad\t"
291 #undef TARGET_ASM_ALIGNED_DI_OP
292 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
294 #undef TARGET_ASM_FUNCTION_EPILOGUE
295 #define TARGET_ASM_FUNCTION_EPILOGUE sh_output_function_epilogue
297 #undef TARGET_ASM_OUTPUT_MI_THUNK
298 #define TARGET_ASM_OUTPUT_MI_THUNK sh_output_mi_thunk
300 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
301 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
303 #undef TARGET_ASM_FILE_START
304 #define TARGET_ASM_FILE_START sh_file_start
305 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
306 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
308 #undef TARGET_DEFAULT_TARGET_FLAGS
309 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
310 #undef TARGET_HANDLE_OPTION
311 #define TARGET_HANDLE_OPTION sh_handle_option
313 #undef TARGET_INSERT_ATTRIBUTES
314 #define TARGET_INSERT_ATTRIBUTES sh_insert_attributes
316 #undef TARGET_SCHED_ADJUST_COST
317 #define TARGET_SCHED_ADJUST_COST sh_adjust_cost
319 #undef TARGET_SCHED_ISSUE_RATE
320 #define TARGET_SCHED_ISSUE_RATE sh_issue_rate
322 /* The next 5 hooks have been implemented for reenabling sched1. With the
323 help of these macros we are limiting the movement of insns in sched1 to
324 reduce the register pressure. The overall idea is to keep count of SImode
325 and SFmode regs required by already scheduled insns. When these counts
326 cross some threshold values; give priority to insns that free registers.
327 The insn that frees registers is most likely to be the insn with lowest
328 LUID (original insn order); but such an insn might be there in the stalled
329 queue (Q) instead of the ready queue (R). To solve this, we skip cycles
330 upto a max of 8 cycles so that such insns may move from Q -> R.
332 The description of the hooks are as below:
334 TARGET_SCHED_INIT_GLOBAL: Added a new target hook in the generic
335 scheduler; it is called inside the sched_init function just after
336 find_insn_reg_weights function call. It is used to calculate the SImode
337 and SFmode weights of insns of basic blocks; much similar to what
338 find_insn_reg_weights does.
339 TARGET_SCHED_FINISH_GLOBAL: Corresponding cleanup hook.
341 TARGET_SCHED_DFA_NEW_CYCLE: Skip cycles if high register pressure is
342 indicated by TARGET_SCHED_REORDER2; doing this may move insns from
345 TARGET_SCHED_REORDER: If the register pressure for SImode or SFmode is
346 high; reorder the ready queue so that the insn with lowest LUID will be
349 TARGET_SCHED_REORDER2: If the register pressure is high, indicate to
350 TARGET_SCHED_DFA_NEW_CYCLE to skip cycles.
352 TARGET_SCHED_VARIABLE_ISSUE: Cache the value of can_issue_more so that it
353 can be returned from TARGET_SCHED_REORDER2.
355 TARGET_SCHED_INIT: Reset the register pressure counting variables. */
357 #undef TARGET_SCHED_DFA_NEW_CYCLE
358 #define TARGET_SCHED_DFA_NEW_CYCLE sh_dfa_new_cycle
360 #undef TARGET_SCHED_INIT_GLOBAL
361 #define TARGET_SCHED_INIT_GLOBAL sh_md_init_global
363 #undef TARGET_SCHED_FINISH_GLOBAL
364 #define TARGET_SCHED_FINISH_GLOBAL sh_md_finish_global
366 #undef TARGET_SCHED_VARIABLE_ISSUE
367 #define TARGET_SCHED_VARIABLE_ISSUE sh_variable_issue
369 #undef TARGET_SCHED_REORDER
370 #define TARGET_SCHED_REORDER sh_reorder
372 #undef TARGET_SCHED_REORDER2
373 #define TARGET_SCHED_REORDER2 sh_reorder2
375 #undef TARGET_SCHED_INIT
376 #define TARGET_SCHED_INIT sh_md_init
378 #undef TARGET_CANNOT_MODIFY_JUMPS_P
379 #define TARGET_CANNOT_MODIFY_JUMPS_P sh_cannot_modify_jumps_p
380 #undef TARGET_BRANCH_TARGET_REGISTER_CLASS
381 #define TARGET_BRANCH_TARGET_REGISTER_CLASS sh_target_reg_class
382 #undef TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED
383 #define TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED \
384 sh_optimize_target_register_callee_saved
386 #undef TARGET_MS_BITFIELD_LAYOUT_P
387 #define TARGET_MS_BITFIELD_LAYOUT_P sh_ms_bitfield_layout_p
389 #undef TARGET_INIT_BUILTINS
390 #define TARGET_INIT_BUILTINS sh_init_builtins
391 #undef TARGET_EXPAND_BUILTIN
392 #define TARGET_EXPAND_BUILTIN sh_expand_builtin
394 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
395 #define TARGET_FUNCTION_OK_FOR_SIBCALL sh_function_ok_for_sibcall
397 #undef TARGET_CANNOT_COPY_INSN_P
398 #define TARGET_CANNOT_COPY_INSN_P sh_cannot_copy_insn_p
399 #undef TARGET_RTX_COSTS
400 #define TARGET_RTX_COSTS sh_rtx_costs
401 #undef TARGET_ADDRESS_COST
402 #define TARGET_ADDRESS_COST sh_address_cost
403 #undef TARGET_ALLOCATE_INITIAL_VALUE
404 #define TARGET_ALLOCATE_INITIAL_VALUE sh_allocate_initial_value
406 #undef TARGET_MACHINE_DEPENDENT_REORG
407 #define TARGET_MACHINE_DEPENDENT_REORG sh_reorg
410 #undef TARGET_HAVE_TLS
411 #define TARGET_HAVE_TLS true
414 #undef TARGET_PROMOTE_PROTOTYPES
415 #define TARGET_PROMOTE_PROTOTYPES sh_promote_prototypes
416 #undef TARGET_PROMOTE_FUNCTION_ARGS
417 #define TARGET_PROMOTE_FUNCTION_ARGS sh_promote_prototypes
418 #undef TARGET_PROMOTE_FUNCTION_RETURN
419 #define TARGET_PROMOTE_FUNCTION_RETURN sh_promote_prototypes
421 #undef TARGET_STRUCT_VALUE_RTX
422 #define TARGET_STRUCT_VALUE_RTX sh_struct_value_rtx
423 #undef TARGET_RETURN_IN_MEMORY
424 #define TARGET_RETURN_IN_MEMORY sh_return_in_memory
426 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
427 #define TARGET_EXPAND_BUILTIN_SAVEREGS sh_builtin_saveregs
428 #undef TARGET_SETUP_INCOMING_VARARGS
429 #define TARGET_SETUP_INCOMING_VARARGS sh_setup_incoming_varargs
430 #undef TARGET_STRICT_ARGUMENT_NAMING
431 #define TARGET_STRICT_ARGUMENT_NAMING sh_strict_argument_naming
432 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
433 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED sh_pretend_outgoing_varargs_named
434 #undef TARGET_MUST_PASS_IN_STACK
435 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
436 #undef TARGET_PASS_BY_REFERENCE
437 #define TARGET_PASS_BY_REFERENCE sh_pass_by_reference
438 #undef TARGET_CALLEE_COPIES
439 #define TARGET_CALLEE_COPIES sh_callee_copies
440 #undef TARGET_ARG_PARTIAL_BYTES
441 #define TARGET_ARG_PARTIAL_BYTES sh_arg_partial_bytes
443 #undef TARGET_BUILD_BUILTIN_VA_LIST
444 #define TARGET_BUILD_BUILTIN_VA_LIST sh_build_builtin_va_list
445 #undef TARGET_CANONICAL_VA_LIST_TYPE
446 #define TARGET_CANONICAL_VA_LIST_TYPE sh_canonical_va_list_type
447 #undef TARGET_EXPAND_BUILTIN_VA_START
448 #define TARGET_EXPAND_BUILTIN_VA_START sh_va_start
449 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
450 #define TARGET_GIMPLIFY_VA_ARG_EXPR sh_gimplify_va_arg_expr
452 #undef TARGET_SCALAR_MODE_SUPPORTED_P
453 #define TARGET_SCALAR_MODE_SUPPORTED_P sh_scalar_mode_supported_p
454 #undef TARGET_VECTOR_MODE_SUPPORTED_P
455 #define TARGET_VECTOR_MODE_SUPPORTED_P sh_vector_mode_supported_p
457 #undef TARGET_CHECK_PCH_TARGET_FLAGS
458 #define TARGET_CHECK_PCH_TARGET_FLAGS sh_check_pch_target_flags
460 #undef TARGET_DWARF_CALLING_CONVENTION
461 #define TARGET_DWARF_CALLING_CONVENTION sh_dwarf_calling_convention
463 /* Return regmode weight for insn. */
464 #define INSN_REGMODE_WEIGHT(INSN, MODE) regmode_weight[((MODE) == SImode) ? 0 : 1][INSN_UID (INSN)]
466 /* Return current register pressure for regmode. */
467 #define CURR_REGMODE_PRESSURE(MODE) curr_regmode_pressure[((MODE) == SImode) ? 0 : 1]
469 #undef TARGET_ENCODE_SECTION_INFO
470 #define TARGET_ENCODE_SECTION_INFO sh_encode_section_info
474 #undef TARGET_ENCODE_SECTION_INFO
475 #define TARGET_ENCODE_SECTION_INFO sh_symbian_encode_section_info
476 #undef TARGET_STRIP_NAME_ENCODING
477 #define TARGET_STRIP_NAME_ENCODING sh_symbian_strip_name_encoding
478 #undef TARGET_CXX_IMPORT_EXPORT_CLASS
479 #define TARGET_CXX_IMPORT_EXPORT_CLASS symbian_import_export_class
483 #undef TARGET_SECONDARY_RELOAD
484 #define TARGET_SECONDARY_RELOAD sh_secondary_reload
486 /* Machine-specific symbol_ref flags. */
487 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
489 struct gcc_target targetm = TARGET_INITIALIZER;
491 /* Implement TARGET_HANDLE_OPTION. */
494 sh_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED,
495 int value ATTRIBUTE_UNUSED)
500 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH1;
504 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2;
508 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A;
512 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_NOFPU;
516 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_SINGLE;
519 case OPT_m2a_single_only:
520 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_SINGLE_ONLY;
524 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2E;
528 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH3;
532 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH3E;
539 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4;
543 case OPT_m4_100_nofpu:
544 case OPT_m4_200_nofpu:
545 case OPT_m4_300_nofpu:
549 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_NOFPU;
553 case OPT_m4_100_single:
554 case OPT_m4_200_single:
555 case OPT_m4_300_single:
556 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_SINGLE;
559 case OPT_m4_single_only:
560 case OPT_m4_100_single_only:
561 case OPT_m4_200_single_only:
562 case OPT_m4_300_single_only:
563 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_SINGLE_ONLY;
567 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A;
572 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_NOFPU;
576 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_SINGLE;
579 case OPT_m4a_single_only:
580 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_SINGLE_ONLY;
584 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_32MEDIA;
587 case OPT_m5_32media_nofpu:
588 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_32MEDIA_NOFPU;
592 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_64MEDIA;
595 case OPT_m5_64media_nofpu:
596 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_64MEDIA_NOFPU;
600 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_COMPACT;
603 case OPT_m5_compact_nofpu:
604 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_COMPACT_NOFPU;
612 /* Print the operand address in x to the stream. */
615 print_operand_address (FILE *stream, rtx x)
617 switch (GET_CODE (x))
621 fprintf (stream, "@%s", reg_names[true_regnum (x)]);
626 rtx base = XEXP (x, 0);
627 rtx index = XEXP (x, 1);
629 switch (GET_CODE (index))
632 fprintf (stream, "@(%d,%s)", (int) INTVAL (index),
633 reg_names[true_regnum (base)]);
639 int base_num = true_regnum (base);
640 int index_num = true_regnum (index);
642 fprintf (stream, "@(r0,%s)",
643 reg_names[MAX (base_num, index_num)]);
654 fprintf (stream, "@-%s", reg_names[true_regnum (XEXP (x, 0))]);
658 fprintf (stream, "@%s+", reg_names[true_regnum (XEXP (x, 0))]);
662 x = mark_constant_pool_use (x);
663 output_addr_const (stream, x);
668 /* Print operand x (an rtx) in assembler syntax to file stream
669 according to modifier code.
671 '.' print a .s if insn needs delay slot
672 ',' print LOCAL_LABEL_PREFIX
673 '@' print trap, rte or rts depending upon pragma interruptness
674 '#' output a nop if there is nothing to put in the delay slot
675 ''' print likelihood suffix (/u for unlikely).
676 '>' print branch target if -fverbose-asm
677 'O' print a constant without the #
678 'R' print the LSW of a dp value - changes if in little endian
679 'S' print the MSW of a dp value - changes if in little endian
680 'T' print the next word of a dp value - same as 'R' in big endian mode.
681 'M' SHMEDIA: print an `x' if `m' will print `base,index'.
682 otherwise: print .b / .w / .l / .s / .d suffix if operand is a MEM.
683 'N' print 'r63' if the operand is (const_int 0).
684 'd' print a V2SF reg as dN instead of fpN.
685 'm' print a pair `base,offset' or `base,index', for LD and ST.
686 'U' Likewise for {LD,ST}{HI,LO}.
687 'V' print the position of a single bit set.
688 'W' print the position of a single bit cleared.
689 't' print a memory address which is a register.
690 'u' prints the lowest 16 bits of CONST_INT, as an unsigned value.
691 'o' output an operator. */
694 print_operand (FILE *stream, rtx x, int code)
697 enum machine_mode mode;
705 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
706 && get_attr_length (XVECEXP (final_sequence, 0, 1)))
707 fprintf (stream, ASSEMBLER_DIALECT ? "/s" : ".s");
710 fprintf (stream, "%s", LOCAL_LABEL_PREFIX);
713 trapa_attr = lookup_attribute ("trap_exit",
714 DECL_ATTRIBUTES (current_function_decl));
716 fprintf (stream, "trapa #%ld",
717 (long) TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (trapa_attr))));
718 else if (sh_cfun_interrupt_handler_p ())
720 if (sh_cfun_resbank_handler_p ())
721 fprintf (stream, "resbank\n");
722 fprintf (stream, "rte");
725 fprintf (stream, "rts");
728 /* Output a nop if there's nothing in the delay slot. */
729 if (dbr_sequence_length () == 0)
730 fprintf (stream, "\n\tnop");
734 rtx note = find_reg_note (current_output_insn, REG_BR_PROB, 0);
736 if (note && INTVAL (XEXP (note, 0)) * 2 < REG_BR_PROB_BASE)
737 fputs ("/u", stream);
741 if (flag_verbose_asm && JUMP_LABEL (current_output_insn))
743 fputs ("\t! target: ", stream);
744 output_addr_const (stream, JUMP_LABEL (current_output_insn));
748 x = mark_constant_pool_use (x);
749 output_addr_const (stream, x);
751 /* N.B.: %R / %S / %T adjust memory addresses by four.
752 For SHMEDIA, that means they can be used to access the first and
753 second 32 bit part of a 64 bit (or larger) value that
754 might be held in floating point registers or memory.
755 While they can be used to access 64 bit parts of a larger value
756 held in general purpose registers, that won't work with memory -
757 neither for fp registers, since the frxx names are used. */
759 if (REG_P (x) || GET_CODE (x) == SUBREG)
761 regno = true_regnum (x);
762 regno += FP_REGISTER_P (regno) ? 1 : LSW;
763 fputs (reg_names[regno], (stream));
767 x = adjust_address (x, SImode, 4 * LSW);
768 print_operand_address (stream, XEXP (x, 0));
775 if (mode == VOIDmode)
777 if (GET_MODE_SIZE (mode) >= 8)
778 sub = simplify_subreg (SImode, x, mode, 4 * LSW);
780 print_operand (stream, sub, 0);
782 output_operand_lossage ("invalid operand to %%R");
786 if (REG_P (x) || GET_CODE (x) == SUBREG)
788 regno = true_regnum (x);
789 regno += FP_REGISTER_P (regno) ? 0 : MSW;
790 fputs (reg_names[regno], (stream));
794 x = adjust_address (x, SImode, 4 * MSW);
795 print_operand_address (stream, XEXP (x, 0));
802 if (mode == VOIDmode)
804 if (GET_MODE_SIZE (mode) >= 8)
805 sub = simplify_subreg (SImode, x, mode, 4 * MSW);
807 print_operand (stream, sub, 0);
809 output_operand_lossage ("invalid operand to %%S");
813 /* Next word of a double. */
814 switch (GET_CODE (x))
817 fputs (reg_names[REGNO (x) + 1], (stream));
820 if (GET_CODE (XEXP (x, 0)) != PRE_DEC
821 && GET_CODE (XEXP (x, 0)) != POST_INC)
822 x = adjust_address (x, SImode, 4);
823 print_operand_address (stream, XEXP (x, 0));
831 gcc_assert (GET_CODE (x) == MEM);
833 switch (GET_CODE (x))
837 print_operand (stream, x, 0);
845 switch (GET_CODE (x))
847 case PLUS: fputs ("add", stream); break;
848 case MINUS: fputs ("sub", stream); break;
849 case MULT: fputs ("mul", stream); break;
850 case DIV: fputs ("div", stream); break;
851 case EQ: fputs ("eq", stream); break;
852 case NE: fputs ("ne", stream); break;
853 case GT: case LT: fputs ("gt", stream); break;
854 case GE: case LE: fputs ("ge", stream); break;
855 case GTU: case LTU: fputs ("gtu", stream); break;
856 case GEU: case LEU: fputs ("geu", stream); break;
864 if (GET_CODE (x) == MEM
865 && GET_CODE (XEXP (x, 0)) == PLUS
866 && (GET_CODE (XEXP (XEXP (x, 0), 1)) == REG
867 || GET_CODE (XEXP (XEXP (x, 0), 1)) == SUBREG))
872 if (GET_CODE (x) == MEM)
874 switch (GET_MODE (x))
876 case QImode: fputs (".b", stream); break;
877 case HImode: fputs (".w", stream); break;
878 case SImode: fputs (".l", stream); break;
879 case SFmode: fputs (".s", stream); break;
880 case DFmode: fputs (".d", stream); break;
881 default: gcc_unreachable ();
888 gcc_assert (GET_CODE (x) == MEM);
892 switch (GET_CODE (x))
896 print_operand (stream, x, 0);
897 fputs (", 0", stream);
901 print_operand (stream, XEXP (x, 0), 0);
902 fputs (", ", stream);
903 print_operand (stream, XEXP (x, 1), 0);
913 int num = exact_log2 (INTVAL (x));
914 gcc_assert (num >= 0);
915 fprintf (stream, "#%d", num);
921 int num = exact_log2 (~INTVAL (x));
922 gcc_assert (num >= 0);
923 fprintf (stream, "#%d", num);
928 gcc_assert (GET_CODE (x) == REG && GET_MODE (x) == V2SFmode);
930 fprintf ((stream), "d%s", reg_names[REGNO (x)] + 1);
934 if (x == CONST0_RTX (GET_MODE (x)))
936 fprintf ((stream), "r63");
941 if (GET_CODE (x) == CONST_INT)
943 fprintf ((stream), "%u", (unsigned) INTVAL (x) & (0x10000 - 1));
953 switch (GET_CODE (x))
957 rtx inner = XEXP (x, 0);
959 enum machine_mode inner_mode;
961 /* We might see SUBREGs with vector mode registers inside. */
962 if (GET_CODE (inner) == SUBREG
963 && (GET_MODE_SIZE (GET_MODE (inner))
964 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
965 && subreg_lowpart_p (inner))
966 inner = SUBREG_REG (inner);
967 if (GET_CODE (inner) == CONST_INT)
969 x = GEN_INT (trunc_int_for_mode (INTVAL (inner), GET_MODE (x)));
972 inner_mode = GET_MODE (inner);
973 if (GET_CODE (inner) == SUBREG
974 && (GET_MODE_SIZE (GET_MODE (inner))
975 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
976 && GET_CODE (SUBREG_REG (inner)) == REG)
978 offset = subreg_regno_offset (REGNO (SUBREG_REG (inner)),
979 GET_MODE (SUBREG_REG (inner)),
982 inner = SUBREG_REG (inner);
984 if (GET_CODE (inner) != REG || GET_MODE_SIZE (inner_mode) > 8)
986 /* Floating point register pairs are always big endian;
987 general purpose registers are 64 bit wide. */
988 regno = REGNO (inner);
989 regno = (HARD_REGNO_NREGS (regno, inner_mode)
990 - HARD_REGNO_NREGS (regno, mode))
998 /* FIXME: We need this on SHmedia32 because reload generates
999 some sign-extended HI or QI loads into DImode registers
1000 but, because Pmode is SImode, the address ends up with a
1001 subreg:SI of the DImode register. Maybe reload should be
1002 fixed so as to apply alter_subreg to such loads? */
1004 gcc_assert (trapping_target_operand (x, VOIDmode));
1005 x = XEXP (XEXP (x, 2), 0);
1006 goto default_output;
1008 gcc_assert (SUBREG_BYTE (x) == 0
1009 && GET_CODE (SUBREG_REG (x)) == REG);
1017 if (FP_REGISTER_P (regno)
1018 && mode == V16SFmode)
1019 fprintf ((stream), "mtrx%s", reg_names[regno] + 2);
1020 else if (FP_REGISTER_P (REGNO (x))
1021 && mode == V4SFmode)
1022 fprintf ((stream), "fv%s", reg_names[regno] + 2);
1023 else if (GET_CODE (x) == REG
1024 && mode == V2SFmode)
1025 fprintf ((stream), "fp%s", reg_names[regno] + 2);
1026 else if (FP_REGISTER_P (REGNO (x))
1027 && GET_MODE_SIZE (mode) > 4)
1028 fprintf ((stream), "d%s", reg_names[regno] + 1);
1030 fputs (reg_names[regno], (stream));
1034 output_address (XEXP (x, 0));
1039 && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1040 || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
1041 && (GET_MODE (XEXP (x, 0)) == DImode
1042 || GET_MODE (XEXP (x, 0)) == SImode)
1043 && GET_CODE (XEXP (XEXP (x, 0), 0)) == TRUNCATE
1044 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode)
1046 rtx val = XEXP (XEXP (XEXP (x, 0), 0), 0);
1048 bool nested_expr = false;
1050 fputc ('(', stream);
1051 if (GET_CODE (val) == ASHIFTRT)
1053 fputc ('(', stream);
1054 val2 = XEXP (val, 0);
1056 if (GET_CODE (val2) == CONST
1057 || GET_RTX_CLASS (GET_CODE (val2)) != RTX_OBJ)
1059 fputc ('(', stream);
1062 output_addr_const (stream, val2);
1064 fputc (')', stream);
1065 if (GET_CODE (val) == ASHIFTRT)
1067 fputs (" >> ", stream);
1068 output_addr_const (stream, XEXP (val, 1));
1069 fputc (')', stream);
1071 fputs (" & 65535)", stream);
1078 fputc ('#', stream);
1079 output_addr_const (stream, x);
1087 /* Encode symbol attributes of a SYMBOL_REF into its
1088 SYMBOL_REF_FLAGS. */
1090 sh_encode_section_info (tree decl, rtx rtl, int first)
1092 default_encode_section_info (decl, rtl, first);
1094 if (TREE_CODE (decl) == FUNCTION_DECL
1095 && sh2a_function_vector_p (decl) && TARGET_SH2A)
1096 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FUNCVEC_FUNCTION;
1099 /* Like force_operand, but guarantees that VALUE ends up in TARGET. */
1101 force_into (rtx value, rtx target)
1103 value = force_operand (value, target);
1104 if (! rtx_equal_p (value, target))
1105 emit_insn (gen_move_insn (target, value));
1108 /* Emit code to perform a block move. Choose the best method.
1110 OPERANDS[0] is the destination.
1111 OPERANDS[1] is the source.
1112 OPERANDS[2] is the size.
1113 OPERANDS[3] is the alignment safe to use. */
1116 expand_block_move (rtx *operands)
1118 int align = INTVAL (operands[3]);
1119 int constp = (GET_CODE (operands[2]) == CONST_INT);
1120 int bytes = (constp ? INTVAL (operands[2]) : 0);
1125 /* If we could use mov.l to move words and dest is word-aligned, we
1126 can use movua.l for loads and still generate a relatively short
1127 and efficient sequence. */
1128 if (TARGET_SH4A_ARCH && align < 4
1129 && MEM_ALIGN (operands[0]) >= 32
1130 && can_move_by_pieces (bytes, 32))
1132 rtx dest = copy_rtx (operands[0]);
1133 rtx src = copy_rtx (operands[1]);
1134 /* We could use different pseudos for each copied word, but
1135 since movua can only load into r0, it's kind of
1137 rtx temp = gen_reg_rtx (SImode);
1138 rtx src_addr = copy_addr_to_reg (XEXP (src, 0));
1141 while (copied + 4 <= bytes)
1143 rtx to = adjust_address (dest, SImode, copied);
1144 rtx from = adjust_automodify_address (src, BLKmode,
1147 set_mem_size (from, GEN_INT (4));
1148 emit_insn (gen_movua (temp, from));
1149 emit_move_insn (src_addr, plus_constant (src_addr, 4));
1150 emit_move_insn (to, temp);
1155 move_by_pieces (adjust_address (dest, BLKmode, copied),
1156 adjust_automodify_address (src, BLKmode,
1158 bytes - copied, align, 0);
1163 /* If it isn't a constant number of bytes, or if it doesn't have 4 byte
1164 alignment, or if it isn't a multiple of 4 bytes, then fail. */
1165 if (align < 4 || (bytes % 4 != 0))
1168 if (TARGET_HARD_SH4)
1172 else if (bytes == 12)
1174 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1175 rtx r4 = gen_rtx_REG (SImode, 4);
1176 rtx r5 = gen_rtx_REG (SImode, 5);
1178 function_symbol (func_addr_rtx, "__movmemSI12_i4", SFUNC_STATIC);
1179 force_into (XEXP (operands[0], 0), r4);
1180 force_into (XEXP (operands[1], 0), r5);
1181 emit_insn (gen_block_move_real_i4 (func_addr_rtx));
1184 else if (! TARGET_SMALLCODE)
1186 const char *entry_name;
1187 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1189 rtx r4 = gen_rtx_REG (SImode, 4);
1190 rtx r5 = gen_rtx_REG (SImode, 5);
1191 rtx r6 = gen_rtx_REG (SImode, 6);
1193 entry_name = (bytes & 4 ? "__movmem_i4_odd" : "__movmem_i4_even");
1194 function_symbol (func_addr_rtx, entry_name, SFUNC_STATIC);
1195 force_into (XEXP (operands[0], 0), r4);
1196 force_into (XEXP (operands[1], 0), r5);
1198 dwords = bytes >> 3;
1199 emit_insn (gen_move_insn (r6, GEN_INT (dwords - 1)));
1200 emit_insn (gen_block_lump_real_i4 (func_addr_rtx));
1209 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1210 rtx r4 = gen_rtx_REG (SImode, 4);
1211 rtx r5 = gen_rtx_REG (SImode, 5);
1213 sprintf (entry, "__movmemSI%d", bytes);
1214 function_symbol (func_addr_rtx, entry, SFUNC_STATIC);
1215 force_into (XEXP (operands[0], 0), r4);
1216 force_into (XEXP (operands[1], 0), r5);
1217 emit_insn (gen_block_move_real (func_addr_rtx));
1221 /* This is the same number of bytes as a memcpy call, but to a different
1222 less common function name, so this will occasionally use more space. */
1223 if (! TARGET_SMALLCODE)
1225 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1226 int final_switch, while_loop;
1227 rtx r4 = gen_rtx_REG (SImode, 4);
1228 rtx r5 = gen_rtx_REG (SImode, 5);
1229 rtx r6 = gen_rtx_REG (SImode, 6);
1231 function_symbol (func_addr_rtx, "__movmem", SFUNC_STATIC);
1232 force_into (XEXP (operands[0], 0), r4);
1233 force_into (XEXP (operands[1], 0), r5);
1235 /* r6 controls the size of the move. 16 is decremented from it
1236 for each 64 bytes moved. Then the negative bit left over is used
1237 as an index into a list of move instructions. e.g., a 72 byte move
1238 would be set up with size(r6) = 14, for one iteration through the
1239 big while loop, and a switch of -2 for the last part. */
1241 final_switch = 16 - ((bytes / 4) % 16);
1242 while_loop = ((bytes / 4) / 16 - 1) * 16;
1243 emit_insn (gen_move_insn (r6, GEN_INT (while_loop + final_switch)));
1244 emit_insn (gen_block_lump_real (func_addr_rtx));
1251 /* Prepare operands for a move define_expand; specifically, one of the
1252 operands must be in a register. */
1255 prepare_move_operands (rtx operands[], enum machine_mode mode)
1257 if ((mode == SImode || mode == DImode)
1259 && ! ((mode == Pmode || mode == ptr_mode)
1260 && tls_symbolic_operand (operands[1], Pmode) != 0))
1263 if (SYMBOLIC_CONST_P (operands[1]))
1265 if (GET_CODE (operands[0]) == MEM)
1266 operands[1] = force_reg (Pmode, operands[1]);
1267 else if (TARGET_SHMEDIA
1268 && GET_CODE (operands[1]) == LABEL_REF
1269 && target_reg_operand (operands[0], mode))
1273 temp = (!can_create_pseudo_p ()
1275 : gen_reg_rtx (Pmode));
1276 operands[1] = legitimize_pic_address (operands[1], mode, temp);
1279 else if (GET_CODE (operands[1]) == CONST
1280 && GET_CODE (XEXP (operands[1], 0)) == PLUS
1281 && SYMBOLIC_CONST_P (XEXP (XEXP (operands[1], 0), 0)))
1283 temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
1284 temp = legitimize_pic_address (XEXP (XEXP (operands[1], 0), 0),
1286 operands[1] = expand_binop (mode, add_optab, temp,
1287 XEXP (XEXP (operands[1], 0), 1),
1288 (!can_create_pseudo_p ()
1290 : gen_reg_rtx (Pmode)),
1291 0, OPTAB_LIB_WIDEN);
1295 if (! reload_in_progress && ! reload_completed)
1297 /* Copy the source to a register if both operands aren't registers. */
1298 if (! register_operand (operands[0], mode)
1299 && ! sh_register_operand (operands[1], mode))
1300 operands[1] = copy_to_mode_reg (mode, operands[1]);
1302 if (GET_CODE (operands[0]) == MEM && ! memory_operand (operands[0], mode))
1304 /* This is like change_address_1 (operands[0], mode, 0, 1) ,
1305 except that we can't use that function because it is static. */
1306 rtx new = change_address (operands[0], mode, 0);
1307 MEM_COPY_ATTRIBUTES (new, operands[0]);
1311 /* This case can happen while generating code to move the result
1312 of a library call to the target. Reject `st r0,@(rX,rY)' because
1313 reload will fail to find a spill register for rX, since r0 is already
1314 being used for the source. */
1316 && refers_to_regno_p (R0_REG, R0_REG + 1, operands[1], (rtx *)0)
1317 && GET_CODE (operands[0]) == MEM
1318 && GET_CODE (XEXP (operands[0], 0)) == PLUS
1319 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == REG)
1320 operands[1] = copy_to_mode_reg (mode, operands[1]);
1323 if (mode == Pmode || mode == ptr_mode)
1326 enum tls_model tls_kind;
1330 if (GET_CODE (op1) == CONST
1331 && GET_CODE (XEXP (op1, 0)) == PLUS
1332 && tls_symbolic_operand (XEXP (XEXP (op1, 0), 0), Pmode))
1334 opc = XEXP (XEXP (op1, 0), 1);
1335 op1 = XEXP (XEXP (op1, 0), 0);
1340 if ((tls_kind = tls_symbolic_operand (op1, Pmode)))
1342 rtx tga_op1, tga_ret, tmp, tmp2;
1346 case TLS_MODEL_GLOBAL_DYNAMIC:
1347 tga_ret = gen_rtx_REG (Pmode, R0_REG);
1348 emit_call_insn (gen_tls_global_dynamic (tga_ret, op1));
1352 case TLS_MODEL_LOCAL_DYNAMIC:
1353 tga_ret = gen_rtx_REG (Pmode, R0_REG);
1354 emit_call_insn (gen_tls_local_dynamic (tga_ret, op1));
1356 tmp = gen_reg_rtx (Pmode);
1357 emit_move_insn (tmp, tga_ret);
1359 if (register_operand (op0, Pmode))
1362 tmp2 = gen_reg_rtx (Pmode);
1364 emit_insn (gen_symDTPOFF2reg (tmp2, op1, tmp));
1368 case TLS_MODEL_INITIAL_EXEC:
1371 /* Don't schedule insns for getting GOT address when
1372 the first scheduling is enabled, to avoid spill
1374 if (flag_schedule_insns)
1375 emit_insn (gen_blockage ());
1376 emit_insn (gen_GOTaddr2picreg ());
1377 emit_use (gen_rtx_REG (SImode, PIC_REG));
1378 if (flag_schedule_insns)
1379 emit_insn (gen_blockage ());
1381 tga_op1 = !can_create_pseudo_p () ? op0 : gen_reg_rtx (Pmode);
1382 tmp = gen_sym2GOTTPOFF (op1);
1383 emit_insn (gen_tls_initial_exec (tga_op1, tmp));
1387 case TLS_MODEL_LOCAL_EXEC:
1388 tmp2 = gen_reg_rtx (Pmode);
1389 emit_insn (gen_load_gbr (tmp2));
1390 tmp = gen_reg_rtx (Pmode);
1391 emit_insn (gen_symTPOFF2reg (tmp, op1));
1393 if (register_operand (op0, Pmode))
1396 op1 = gen_reg_rtx (Pmode);
1398 emit_insn (gen_addsi3 (op1, tmp, tmp2));
1405 emit_insn (gen_addsi3 (op1, op1, force_reg (SImode, opc)));
1414 prepare_cbranch_operands (rtx *operands, enum machine_mode mode,
1415 enum rtx_code comparison)
1418 rtx scratch = NULL_RTX;
1420 if (comparison == CODE_FOR_nothing)
1421 comparison = GET_CODE (operands[0]);
1423 scratch = operands[4];
1424 if (GET_CODE (operands[1]) == CONST_INT
1425 && GET_CODE (operands[2]) != CONST_INT)
1427 rtx tmp = operands[1];
1429 operands[1] = operands[2];
1431 comparison = swap_condition (comparison);
1433 if (GET_CODE (operands[2]) == CONST_INT)
1435 HOST_WIDE_INT val = INTVAL (operands[2]);
1436 if ((val == -1 || val == -0x81)
1437 && (comparison == GT || comparison == LE))
1439 comparison = (comparison == GT) ? GE : LT;
1440 operands[2] = gen_int_mode (val + 1, mode);
1442 else if ((val == 1 || val == 0x80)
1443 && (comparison == GE || comparison == LT))
1445 comparison = (comparison == GE) ? GT : LE;
1446 operands[2] = gen_int_mode (val - 1, mode);
1448 else if (val == 1 && (comparison == GEU || comparison == LTU))
1450 comparison = (comparison == GEU) ? NE : EQ;
1451 operands[2] = CONST0_RTX (mode);
1453 else if (val == 0x80 && (comparison == GEU || comparison == LTU))
1455 comparison = (comparison == GEU) ? GTU : LEU;
1456 operands[2] = gen_int_mode (val - 1, mode);
1458 else if (val == 0 && (comparison == GTU || comparison == LEU))
1459 comparison = (comparison == GTU) ? NE : EQ;
1460 else if (mode == SImode
1461 && ((val == 0x7fffffff
1462 && (comparison == GTU || comparison == LEU))
1463 || ((unsigned HOST_WIDE_INT) val
1464 == (unsigned HOST_WIDE_INT) 0x7fffffff + 1
1465 && (comparison == GEU || comparison == LTU))))
1467 comparison = (comparison == GTU || comparison == GEU) ? LT : GE;
1468 operands[2] = CONST0_RTX (mode);
1472 if (can_create_pseudo_p ())
1473 operands[1] = force_reg (mode, op1);
1474 /* When we are handling DImode comparisons, we want to keep constants so
1475 that we can optimize the component comparisons; however, memory loads
1476 are better issued as a whole so that they can be scheduled well.
1477 SImode equality comparisons allow I08 constants, but only when they
1478 compare r0. Hence, if operands[1] has to be loaded from somewhere else
1479 into a register, that register might as well be r0, and we allow the
1480 constant. If it is already in a register, this is likely to be
1481 allocated to a different hard register, thus we load the constant into
1482 a register unless it is zero. */
1483 if (!REG_P (operands[2])
1484 && (GET_CODE (operands[2]) != CONST_INT
1485 || (mode == SImode && operands[2] != CONST0_RTX (SImode)
1486 && ((comparison != EQ && comparison != NE)
1487 || (REG_P (op1) && REGNO (op1) != R0_REG)
1488 || !satisfies_constraint_I08 (operands[2])))))
1490 if (scratch && GET_MODE (scratch) == mode)
1492 emit_move_insn (scratch, operands[2]);
1493 operands[2] = scratch;
1495 else if (can_create_pseudo_p ())
1496 operands[2] = force_reg (mode, operands[2]);
1502 expand_cbranchsi4 (rtx *operands, enum rtx_code comparison, int probability)
1504 rtx (*branch_expander) (rtx) = gen_branch_true;
1507 comparison = prepare_cbranch_operands (operands, SImode, comparison);
1510 case NE: case LT: case LE: case LTU: case LEU:
1511 comparison = reverse_condition (comparison);
1512 branch_expander = gen_branch_false;
1515 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, T_REG),
1516 gen_rtx_fmt_ee (comparison, SImode,
1517 operands[1], operands[2])));
1518 jump = emit_jump_insn (branch_expander (operands[3]));
1519 if (probability >= 0)
1521 = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (probability),
1526 /* ??? How should we distribute probabilities when more than one branch
1527 is generated. So far we only have soem ad-hoc observations:
1528 - If the operands are random, they are likely to differ in both parts.
1529 - If comparing items in a hash chain, the operands are random or equal;
1530 operation should be EQ or NE.
1531 - If items are searched in an ordered tree from the root, we can expect
1532 the highpart to be unequal about half of the time; operation should be
1533 an inequality comparison, operands non-constant, and overall probability
1534 about 50%. Likewise for quicksort.
1535 - Range checks will be often made against constants. Even if we assume for
1536 simplicity an even distribution of the non-constant operand over a
1537 sub-range here, the same probability could be generated with differently
1538 wide sub-ranges - as long as the ratio of the part of the subrange that
1539 is before the threshold to the part that comes after the threshold stays
1540 the same. Thus, we can't really tell anything here;
1541 assuming random distribution is at least simple.
1545 expand_cbranchdi4 (rtx *operands, enum rtx_code comparison)
1547 enum rtx_code msw_taken, msw_skip, lsw_taken;
1548 rtx skip_label = NULL_RTX;
1549 rtx op1h, op1l, op2h, op2l;
1552 int msw_taken_prob = -1, msw_skip_prob = -1, lsw_taken_prob = -1;
1553 rtx scratch = operands[4];
1555 comparison = prepare_cbranch_operands (operands, DImode, comparison);
1556 op1h = gen_highpart_mode (SImode, DImode, operands[1]);
1557 op2h = gen_highpart_mode (SImode, DImode, operands[2]);
1558 op1l = gen_lowpart (SImode, operands[1]);
1559 op2l = gen_lowpart (SImode, operands[2]);
1560 msw_taken = msw_skip = lsw_taken = CODE_FOR_nothing;
1561 prob = split_branch_probability;
1562 rev_prob = REG_BR_PROB_BASE - prob;
1565 /* ??? Should we use the cmpeqdi_t pattern for equality comparisons?
1566 That costs 1 cycle more when the first branch can be predicted taken,
1567 but saves us mispredicts because only one branch needs prediction.
1568 It also enables generating the cmpeqdi_t-1 pattern. */
1570 if (TARGET_CMPEQDI_T)
1572 emit_insn (gen_cmpeqdi_t (operands[1], operands[2]));
1573 emit_jump_insn (gen_branch_true (operands[3]));
1580 /* If we had more precision, we'd use rev_prob - (rev_prob >> 32) .
1582 msw_skip_prob = rev_prob;
1583 if (REG_BR_PROB_BASE <= 65535)
1584 lsw_taken_prob = prob ? REG_BR_PROB_BASE : 0;
1587 gcc_assert (HOST_BITS_PER_WIDEST_INT >= 64);
1591 - ((HOST_WIDEST_INT) REG_BR_PROB_BASE * rev_prob
1592 / ((HOST_WIDEST_INT) prob << 32)))
1598 if (TARGET_CMPEQDI_T)
1600 emit_insn (gen_cmpeqdi_t (operands[1], operands[2]));
1601 emit_jump_insn (gen_branch_false (operands[3]));
1605 msw_taken_prob = prob;
1610 msw_taken = comparison;
1611 if (GET_CODE (op2l) == CONST_INT && INTVAL (op2l) == -1)
1613 if (comparison != GTU || op2h != CONST0_RTX (SImode))
1614 msw_skip = swap_condition (msw_taken);
1618 if (op2l == CONST0_RTX (SImode))
1619 msw_taken = comparison;
1622 msw_taken = comparison == GE ? GT : GTU;
1623 msw_skip = swap_condition (msw_taken);
1628 msw_taken = comparison;
1629 if (op2l == CONST0_RTX (SImode))
1631 msw_skip = swap_condition (msw_taken);
1635 if (GET_CODE (op2l) == CONST_INT && INTVAL (op2l) == -1)
1636 msw_taken = comparison;
1640 if (comparison == LE)
1642 else if (op2h != CONST0_RTX (SImode))
1646 msw_skip = swap_condition (msw_taken);
1649 default: return false;
1651 num_branches = ((msw_taken != CODE_FOR_nothing)
1652 + (msw_skip != CODE_FOR_nothing)
1653 + (lsw_taken != CODE_FOR_nothing));
1654 if (comparison != EQ && comparison != NE && num_branches > 1)
1656 if (!CONSTANT_P (operands[2])
1657 && prob >= (int) (REG_BR_PROB_BASE * 3 / 8U)
1658 && prob <= (int) (REG_BR_PROB_BASE * 5 / 8U))
1660 msw_taken_prob = prob / 2U;
1662 = REG_BR_PROB_BASE * rev_prob / (REG_BR_PROB_BASE + rev_prob);
1663 lsw_taken_prob = prob;
1667 msw_taken_prob = prob;
1668 msw_skip_prob = REG_BR_PROB_BASE;
1669 /* ??? If we have a constant op2h, should we use that when
1670 calculating lsw_taken_prob? */
1671 lsw_taken_prob = prob;
1676 operands[4] = NULL_RTX;
1677 if (reload_completed
1678 && ! arith_reg_or_0_operand (op2h, SImode) && true_regnum (op1h)
1679 && (msw_taken != CODE_FOR_nothing || msw_skip != CODE_FOR_nothing))
1681 emit_move_insn (scratch, operands[2]);
1682 operands[2] = scratch;
1684 if (msw_taken != CODE_FOR_nothing)
1685 expand_cbranchsi4 (operands, msw_taken, msw_taken_prob);
1686 if (msw_skip != CODE_FOR_nothing)
1688 rtx taken_label = operands[3];
1690 /* Operands were possibly modified, but msw_skip doesn't expect this.
1691 Always use the original ones. */
1692 if (msw_taken != CODE_FOR_nothing)
1698 operands[3] = skip_label = gen_label_rtx ();
1699 expand_cbranchsi4 (operands, msw_skip, msw_skip_prob);
1700 operands[3] = taken_label;
1704 if (lsw_taken != CODE_FOR_nothing)
1706 if (reload_completed
1707 && ! arith_reg_or_0_operand (op2l, SImode) && true_regnum (op1l))
1708 operands[4] = scratch;
1709 expand_cbranchsi4 (operands, lsw_taken, lsw_taken_prob);
1711 if (msw_skip != CODE_FOR_nothing)
1712 emit_label (skip_label);
1716 /* Prepare the operands for an scc instruction; make sure that the
1717 compare has been done. */
1719 prepare_scc_operands (enum rtx_code code)
1721 rtx t_reg = gen_rtx_REG (SImode, T_REG);
1722 enum rtx_code oldcode = code;
1723 enum machine_mode mode;
1725 /* First need a compare insn. */
1729 /* It isn't possible to handle this case. */
1746 if (code != oldcode)
1748 rtx tmp = sh_compare_op0;
1749 sh_compare_op0 = sh_compare_op1;
1750 sh_compare_op1 = tmp;
1753 mode = GET_MODE (sh_compare_op0);
1754 if (mode == VOIDmode)
1755 mode = GET_MODE (sh_compare_op1);
1757 sh_compare_op0 = force_reg (mode, sh_compare_op0);
1758 if ((code != EQ && code != NE
1759 && (sh_compare_op1 != const0_rtx
1760 || code == GTU || code == GEU || code == LTU || code == LEU))
1761 || (mode == DImode && sh_compare_op1 != const0_rtx)
1762 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1763 sh_compare_op1 = force_reg (mode, sh_compare_op1);
1765 if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
1766 (mode == SFmode ? emit_sf_insn : emit_df_insn)
1767 (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
1768 gen_rtx_SET (VOIDmode, t_reg,
1769 gen_rtx_fmt_ee (code, SImode,
1770 sh_compare_op0, sh_compare_op1)),
1771 gen_rtx_USE (VOIDmode, get_fpscr_rtx ()))));
1773 emit_insn (gen_rtx_SET (VOIDmode, t_reg,
1774 gen_rtx_fmt_ee (code, SImode,
1775 sh_compare_op0, sh_compare_op1)));
1780 /* Called from the md file, set up the operands of a compare instruction. */
1783 from_compare (rtx *operands, int code)
1785 enum machine_mode mode = GET_MODE (sh_compare_op0);
1787 if (mode == VOIDmode)
1788 mode = GET_MODE (sh_compare_op1);
1791 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1793 /* Force args into regs, since we can't use constants here. */
1794 sh_compare_op0 = force_reg (mode, sh_compare_op0);
1795 if (sh_compare_op1 != const0_rtx
1796 || code == GTU || code == GEU
1797 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1798 sh_compare_op1 = force_reg (mode, sh_compare_op1);
1800 if (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT && code == GE)
1802 from_compare (operands, GT);
1803 insn = gen_ieee_ccmpeqsf_t (sh_compare_op0, sh_compare_op1);
1806 insn = gen_rtx_SET (VOIDmode,
1807 gen_rtx_REG (SImode, T_REG),
1808 gen_rtx_fmt_ee (code, SImode,
1809 sh_compare_op0, sh_compare_op1));
1810 if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
1812 insn = gen_rtx_PARALLEL (VOIDmode,
1814 gen_rtx_USE (VOIDmode, get_fpscr_rtx ())));
1815 (mode == SFmode ? emit_sf_insn : emit_df_insn) (insn);
1821 /* Functions to output assembly code. */
1823 /* Return a sequence of instructions to perform DI or DF move.
1825 Since the SH cannot move a DI or DF in one instruction, we have
1826 to take care when we see overlapping source and dest registers. */
1829 output_movedouble (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1830 enum machine_mode mode)
1832 rtx dst = operands[0];
1833 rtx src = operands[1];
1835 if (GET_CODE (dst) == MEM
1836 && GET_CODE (XEXP (dst, 0)) == PRE_DEC)
1837 return "mov.l %T1,%0\n\tmov.l %1,%0";
1839 if (register_operand (dst, mode)
1840 && register_operand (src, mode))
1842 if (REGNO (src) == MACH_REG)
1843 return "sts mach,%S0\n\tsts macl,%R0";
1845 /* When mov.d r1,r2 do r2->r3 then r1->r2;
1846 when mov.d r1,r0 do r1->r0 then r2->r1. */
1848 if (REGNO (src) + 1 == REGNO (dst))
1849 return "mov %T1,%T0\n\tmov %1,%0";
1851 return "mov %1,%0\n\tmov %T1,%T0";
1853 else if (GET_CODE (src) == CONST_INT)
1855 if (INTVAL (src) < 0)
1856 output_asm_insn ("mov #-1,%S0", operands);
1858 output_asm_insn ("mov #0,%S0", operands);
1860 return "mov %1,%R0";
1862 else if (GET_CODE (src) == MEM)
1865 int dreg = REGNO (dst);
1866 rtx inside = XEXP (src, 0);
1868 switch (GET_CODE (inside))
1871 ptrreg = REGNO (inside);
1875 ptrreg = subreg_regno (inside);
1879 ptrreg = REGNO (XEXP (inside, 0));
1880 /* ??? A r0+REG address shouldn't be possible here, because it isn't
1881 an offsettable address. Unfortunately, offsettable addresses use
1882 QImode to check the offset, and a QImode offsettable address
1883 requires r0 for the other operand, which is not currently
1884 supported, so we can't use the 'o' constraint.
1885 Thus we must check for and handle r0+REG addresses here.
1886 We punt for now, since this is likely very rare. */
1887 gcc_assert (GET_CODE (XEXP (inside, 1)) != REG);
1891 return "mov.l %1,%0\n\tmov.l %1+4,%T0";
1893 return "mov.l %1,%0\n\tmov.l %1,%T0";
1898 /* Work out the safe way to copy. Copy into the second half first. */
1900 return "mov.l %T1,%T0\n\tmov.l %1,%0";
1903 return "mov.l %1,%0\n\tmov.l %T1,%T0";
1906 /* Print an instruction which would have gone into a delay slot after
1907 another instruction, but couldn't because the other instruction expanded
1908 into a sequence where putting the slot insn at the end wouldn't work. */
1911 print_slot (rtx insn)
1913 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file, optimize, 1, NULL);
1915 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
1919 output_far_jump (rtx insn, rtx op)
1921 struct { rtx lab, reg, op; } this;
1922 rtx braf_base_lab = NULL_RTX;
1925 int offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn));
1928 this.lab = gen_label_rtx ();
1932 && offset - get_attr_length (insn) <= 32766)
1935 jump = "mov.w %O0,%1; braf %1";
1943 jump = "mov.l %O0,%1; braf %1";
1945 jump = "mov.l r0,@-r15; mova %O0,r0; mov.l @r0,%1; add r0,%1; mov.l @r15+,r0; jmp @%1";
1948 jump = "mov.l %O0,%1; jmp @%1";
1950 /* If we have a scratch register available, use it. */
1951 if (GET_CODE ((prev = prev_nonnote_insn (insn))) == INSN
1952 && INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
1954 this.reg = SET_DEST (XVECEXP (PATTERN (prev), 0, 0));
1955 if (REGNO (this.reg) == R0_REG && flag_pic && ! TARGET_SH2)
1956 jump = "mov.l r1,@-r15; mova %O0,r0; mov.l @r0,r1; add r1,r0; mov.l @r15+,r1; jmp @%1";
1957 output_asm_insn (jump, &this.lab);
1958 if (dbr_sequence_length ())
1959 print_slot (final_sequence);
1961 output_asm_insn ("nop", 0);
1965 /* Output the delay slot insn first if any. */
1966 if (dbr_sequence_length ())
1967 print_slot (final_sequence);
1969 this.reg = gen_rtx_REG (SImode, 13);
1970 /* We must keep the stack aligned to 8-byte boundaries on SH5.
1971 Fortunately, MACL is fixed and call-clobbered, and we never
1972 need its value across jumps, so save r13 in it instead of in
1975 output_asm_insn ("lds r13, macl", 0);
1977 output_asm_insn ("mov.l r13,@-r15", 0);
1978 output_asm_insn (jump, &this.lab);
1980 output_asm_insn ("sts macl, r13", 0);
1982 output_asm_insn ("mov.l @r15+,r13", 0);
1984 if (far && flag_pic && TARGET_SH2)
1986 braf_base_lab = gen_label_rtx ();
1987 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1988 CODE_LABEL_NUMBER (braf_base_lab));
1991 output_asm_insn (".align 2", 0);
1992 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (this.lab));
1994 if (far && flag_pic)
1997 this.lab = braf_base_lab;
1998 output_asm_insn (".long %O2-%O0", &this.lab);
2001 output_asm_insn (far ? ".long %O2" : ".word %O2-%O0", &this.lab);
2005 /* Local label counter, used for constants in the pool and inside
2006 pattern branches. */
2008 static int lf = 100;
2010 /* Output code for ordinary branches. */
2013 output_branch (int logic, rtx insn, rtx *operands)
2015 switch (get_attr_length (insn))
2018 /* This can happen if filling the delay slot has caused a forward
2019 branch to exceed its range (we could reverse it, but only
2020 when we know we won't overextend other branches; this should
2021 best be handled by relaxation).
2022 It can also happen when other condbranches hoist delay slot insn
2023 from their destination, thus leading to code size increase.
2024 But the branch will still be in the range -4092..+4098 bytes. */
2029 /* The call to print_slot will clobber the operands. */
2030 rtx op0 = operands[0];
2032 /* If the instruction in the delay slot is annulled (true), then
2033 there is no delay slot where we can put it now. The only safe
2034 place for it is after the label. final will do that by default. */
2037 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
2038 && get_attr_length (XVECEXP (final_sequence, 0, 1)))
2040 asm_fprintf (asm_out_file, "\tb%s%ss\t%LLF%d\n", logic ? "f" : "t",
2041 ASSEMBLER_DIALECT ? "/" : ".", label);
2042 print_slot (final_sequence);
2045 asm_fprintf (asm_out_file, "\tb%s\t%LLF%d\n", logic ? "f" : "t", label);
2047 output_asm_insn ("bra\t%l0", &op0);
2048 fprintf (asm_out_file, "\tnop\n");
2049 (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
2053 /* When relaxing, handle this like a short branch. The linker
2054 will fix it up if it still doesn't fit after relaxation. */
2056 return logic ? "bt%.\t%l0" : "bf%.\t%l0";
2058 /* These are for SH2e, in which we have to account for the
2059 extra nop because of the hardware bug in annulled branches. */
2065 gcc_assert (!final_sequence
2066 || !(INSN_ANNULLED_BRANCH_P
2067 (XVECEXP (final_sequence, 0, 0))));
2068 asm_fprintf (asm_out_file, "b%s%ss\t%LLF%d\n",
2070 ASSEMBLER_DIALECT ? "/" : ".", label);
2071 fprintf (asm_out_file, "\tnop\n");
2072 output_asm_insn ("bra\t%l0", operands);
2073 fprintf (asm_out_file, "\tnop\n");
2074 (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
2078 /* When relaxing, fall through. */
2083 sprintf (buffer, "b%s%ss\t%%l0",
2085 ASSEMBLER_DIALECT ? "/" : ".");
2086 output_asm_insn (buffer, &operands[0]);
2091 /* There should be no longer branches now - that would
2092 indicate that something has destroyed the branches set
2093 up in machine_dependent_reorg. */
2098 /* Output a code sequence for INSN using TEMPLATE with OPERANDS; but before,
2099 fill in operands 9 as a label to the successor insn.
2100 We try to use jump threading where possible.
2101 IF CODE matches the comparison in the IF_THEN_ELSE of a following jump,
2102 we assume the jump is taken. I.e. EQ means follow jmp and bf, NE means
2103 follow jmp and bt, if the address is in range. */
2105 output_branchy_insn (enum rtx_code code, const char *template,
2106 rtx insn, rtx *operands)
2108 rtx next_insn = NEXT_INSN (insn);
2110 if (next_insn && GET_CODE (next_insn) == JUMP_INSN && condjump_p (next_insn))
2112 rtx src = SET_SRC (PATTERN (next_insn));
2113 if (GET_CODE (src) == IF_THEN_ELSE && GET_CODE (XEXP (src, 0)) != code)
2115 /* Following branch not taken */
2116 operands[9] = gen_label_rtx ();
2117 emit_label_after (operands[9], next_insn);
2118 INSN_ADDRESSES_NEW (operands[9],
2119 INSN_ADDRESSES (INSN_UID (next_insn))
2120 + get_attr_length (next_insn));
2125 int offset = (branch_dest (next_insn)
2126 - INSN_ADDRESSES (INSN_UID (next_insn)) + 4);
2127 if (offset >= -252 && offset <= 258)
2129 if (GET_CODE (src) == IF_THEN_ELSE)
2131 src = XEXP (src, 1);
2137 operands[9] = gen_label_rtx ();
2138 emit_label_after (operands[9], insn);
2139 INSN_ADDRESSES_NEW (operands[9],
2140 INSN_ADDRESSES (INSN_UID (insn))
2141 + get_attr_length (insn));
2146 output_ieee_ccmpeq (rtx insn, rtx *operands)
2148 return output_branchy_insn (NE, "bt\t%l9\n\tfcmp/eq\t%1,%0",
2152 /* Output the start of the assembler file. */
2155 sh_file_start (void)
2157 default_file_start ();
2160 /* Declare the .directive section before it is used. */
2161 fputs ("\t.section .directive, \"SM\", @progbits, 1\n", asm_out_file);
2162 fputs ("\t.asciz \"#<SYMEDIT>#\\n\"\n", asm_out_file);
2166 /* We need to show the text section with the proper
2167 attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
2168 emits it without attributes in TEXT_SECTION_ASM_OP, else GAS
2169 will complain. We can teach GAS specifically about the
2170 default attributes for our choice of text section, but
2171 then we would have to change GAS again if/when we change
2172 the text section name. */
2173 fprintf (asm_out_file, "%s\n", TEXT_SECTION_ASM_OP);
2175 /* Switch to the data section so that the coffsem symbol
2176 isn't in the text section. */
2177 switch_to_section (data_section);
2179 if (TARGET_LITTLE_ENDIAN)
2180 fputs ("\t.little\n", asm_out_file);
2184 if (TARGET_SHCOMPACT)
2185 fputs ("\t.mode\tSHcompact\n", asm_out_file);
2186 else if (TARGET_SHMEDIA)
2187 fprintf (asm_out_file, "\t.mode\tSHmedia\n\t.abi\t%i\n",
2188 TARGET_SHMEDIA64 ? 64 : 32);
2192 /* Check if PAT includes UNSPEC_CALLER unspec pattern. */
2195 unspec_caller_rtx_p (rtx pat)
2197 switch (GET_CODE (pat))
2200 return unspec_caller_rtx_p (XEXP (pat, 0));
2203 if (unspec_caller_rtx_p (XEXP (pat, 0)))
2205 return unspec_caller_rtx_p (XEXP (pat, 1));
2207 if (XINT (pat, 1) == UNSPEC_CALLER)
2216 /* Indicate that INSN cannot be duplicated. This is true for insn
2217 that generates a unique label. */
2220 sh_cannot_copy_insn_p (rtx insn)
2224 if (!reload_completed || !flag_pic)
2227 if (GET_CODE (insn) != INSN)
2229 if (asm_noperands (insn) >= 0)
2232 pat = PATTERN (insn);
2233 if (GET_CODE (pat) != SET)
2235 pat = SET_SRC (pat);
2237 if (unspec_caller_rtx_p (pat))
2243 /* Actual number of instructions used to make a shift by N. */
2244 static const char ashiftrt_insns[] =
2245 { 0,1,2,3,4,5,8,8,8,8,8,8,8,8,8,8,2,3,4,5,8,8,8,8,8,8,8,8,8,8,8,2};
2247 /* Left shift and logical right shift are the same. */
2248 static const char shift_insns[] =
2249 { 0,1,1,2,2,3,3,4,1,2,2,3,3,4,3,3,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
2251 /* Individual shift amounts needed to get the above length sequences.
2252 One bit right shifts clobber the T bit, so when possible, put one bit
2253 shifts in the middle of the sequence, so the ends are eligible for
2254 branch delay slots. */
2255 static const short shift_amounts[32][5] = {
2256 {0}, {1}, {2}, {2, 1},
2257 {2, 2}, {2, 1, 2}, {2, 2, 2}, {2, 2, 1, 2},
2258 {8}, {8, 1}, {8, 2}, {8, 1, 2},
2259 {8, 2, 2}, {8, 2, 1, 2}, {8, -2, 8}, {8, -1, 8},
2260 {16}, {16, 1}, {16, 2}, {16, 1, 2},
2261 {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
2262 {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
2263 {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
2265 /* Likewise, but for shift amounts < 16, up to three highmost bits
2266 might be clobbered. This is typically used when combined with some
2267 kind of sign or zero extension. */
2269 static const char ext_shift_insns[] =
2270 { 0,1,1,2,2,3,2,2,1,2,2,3,3,3,2,2,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
2272 static const short ext_shift_amounts[32][4] = {
2273 {0}, {1}, {2}, {2, 1},
2274 {2, 2}, {2, 1, 2}, {8, -2}, {8, -1},
2275 {8}, {8, 1}, {8, 2}, {8, 1, 2},
2276 {8, 2, 2}, {16, -2, -1}, {16, -2}, {16, -1},
2277 {16}, {16, 1}, {16, 2}, {16, 1, 2},
2278 {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
2279 {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
2280 {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
2282 /* Assuming we have a value that has been sign-extended by at least one bit,
2283 can we use the ext_shift_amounts with the last shift turned to an arithmetic shift
2284 to shift it by N without data loss, and quicker than by other means? */
2285 #define EXT_SHIFT_SIGNED(n) (((n) | 8) == 15)
2287 /* This is used in length attributes in sh.md to help compute the length
2288 of arbitrary constant shift instructions. */
2291 shift_insns_rtx (rtx insn)
2293 rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2294 int shift_count = INTVAL (XEXP (set_src, 1));
2295 enum rtx_code shift_code = GET_CODE (set_src);
2300 return ashiftrt_insns[shift_count];
2303 return shift_insns[shift_count];
2309 /* Return the cost of a shift. */
2319 if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
2321 if (GET_MODE (x) == DImode
2322 && GET_CODE (XEXP (x, 1)) == CONST_INT
2323 && INTVAL (XEXP (x, 1)) == 1)
2326 /* Everything else is invalid, because there is no pattern for it. */
2329 /* If shift by a non constant, then this will be expensive. */
2330 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2331 return SH_DYNAMIC_SHIFT_COST;
2333 value = INTVAL (XEXP (x, 1));
2335 /* Otherwise, return the true cost in instructions. */
2336 if (GET_CODE (x) == ASHIFTRT)
2338 int cost = ashiftrt_insns[value];
2339 /* If SH3, then we put the constant in a reg and use shad. */
2340 if (cost > 1 + SH_DYNAMIC_SHIFT_COST)
2341 cost = 1 + SH_DYNAMIC_SHIFT_COST;
2345 return shift_insns[value];
2348 /* Return the cost of an AND operation. */
2355 /* Anding with a register is a single cycle and instruction. */
2356 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2359 i = INTVAL (XEXP (x, 1));
2363 if (satisfies_constraint_I10 (XEXP (x, 1))
2364 || satisfies_constraint_J16 (XEXP (x, 1)))
2367 return 1 + rtx_cost (XEXP (x, 1), AND);
2370 /* These constants are single cycle extu.[bw] instructions. */
2371 if (i == 0xff || i == 0xffff)
2373 /* Constants that can be used in an and immediate instruction in a single
2374 cycle, but this requires r0, so make it a little more expensive. */
2375 if (CONST_OK_FOR_K08 (i))
2377 /* Constants that can be loaded with a mov immediate and an and.
2378 This case is probably unnecessary. */
2379 if (CONST_OK_FOR_I08 (i))
2381 /* Any other constants requires a 2 cycle pc-relative load plus an and.
2382 This case is probably unnecessary. */
2386 /* Return the cost of an addition or a subtraction. */
2391 /* Adding a register is a single cycle insn. */
2392 if (GET_CODE (XEXP (x, 1)) == REG
2393 || GET_CODE (XEXP (x, 1)) == SUBREG)
2396 /* Likewise for small constants. */
2397 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2398 && CONST_OK_FOR_ADD (INTVAL (XEXP (x, 1))))
2402 switch (GET_CODE (XEXP (x, 1)))
2407 return TARGET_SHMEDIA64 ? 5 : 3;
2410 if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1))))
2412 else if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1)) >> 16))
2414 else if (CONST_OK_FOR_I16 ((INTVAL (XEXP (x, 1)) >> 16) >> 16))
2422 /* Any other constant requires a 2 cycle pc-relative load plus an
2427 /* Return the cost of a multiply. */
2429 multcosts (rtx x ATTRIBUTE_UNUSED)
2431 if (sh_multcost >= 0)
2434 /* ??? We have a mul insn, but it has a latency of three, and doesn't
2435 accept constants. Ideally, we would use a cost of one or two and
2436 add the cost of the operand, but disregard the latter when inside loops
2437 and loop invariant code motion is still to follow.
2438 Using a multiply first and splitting it later if it's a loss
2439 doesn't work because of different sign / zero extension semantics
2440 of multiplies vs. shifts. */
2441 return TARGET_SMALLCODE ? 2 : 3;
2445 /* We have a mul insn, so we can never take more than the mul and the
2446 read of the mac reg, but count more because of the latency and extra
2448 if (TARGET_SMALLCODE)
2453 /* If we're aiming at small code, then just count the number of
2454 insns in a multiply call sequence. */
2455 if (TARGET_SMALLCODE)
2458 /* Otherwise count all the insns in the routine we'd be calling too. */
2462 /* Compute a (partial) cost for rtx X. Return true if the complete
2463 cost has been computed, and false if subexpressions should be
2464 scanned. In either case, *TOTAL contains the cost result. */
2467 sh_rtx_costs (rtx x, int code, int outer_code, int *total)
2474 if (INTVAL (x) == 0)
2476 else if (outer_code == AND && and_operand ((x), DImode))
2478 else if ((outer_code == IOR || outer_code == XOR
2479 || outer_code == PLUS)
2480 && CONST_OK_FOR_I10 (INTVAL (x)))
2482 else if (CONST_OK_FOR_I16 (INTVAL (x)))
2483 *total = COSTS_N_INSNS (outer_code != SET);
2484 else if (CONST_OK_FOR_I16 (INTVAL (x) >> 16))
2485 *total = COSTS_N_INSNS ((outer_code != SET) + 1);
2486 else if (CONST_OK_FOR_I16 ((INTVAL (x) >> 16) >> 16))
2487 *total = COSTS_N_INSNS ((outer_code != SET) + 2);
2489 *total = COSTS_N_INSNS ((outer_code != SET) + 3);
2492 if (CONST_OK_FOR_I08 (INTVAL (x)))
2494 else if ((outer_code == AND || outer_code == IOR || outer_code == XOR)
2495 && CONST_OK_FOR_K08 (INTVAL (x)))
2497 /* prepare_cmp_insn will force costly constants int registers before
2498 the cbranch[sd]i4 patterns can see them, so preserve potentially
2499 interesting ones not covered by I08 above. */
2500 else if (outer_code == COMPARE
2501 && ((unsigned HOST_WIDE_INT) INTVAL (x)
2502 == (unsigned HOST_WIDE_INT) 0x7fffffff + 1
2503 || INTVAL (x) == 0x7fffffff
2504 || INTVAL (x) == 0x80 || INTVAL (x) == -0x81))
2513 if (TARGET_SHMEDIA64)
2514 *total = COSTS_N_INSNS (4);
2515 else if (TARGET_SHMEDIA32)
2516 *total = COSTS_N_INSNS (2);
2523 *total = COSTS_N_INSNS (4);
2524 /* prepare_cmp_insn will force costly constants int registers before
2525 the cbranchdi4 pattern can see them, so preserve potentially
2526 interesting ones. */
2527 else if (outer_code == COMPARE && GET_MODE (x) == DImode)
2533 if (x == CONST0_RTX (GET_MODE (x)))
2535 else if (sh_1el_vec (x, VOIDmode))
2536 *total = outer_code != SET;
2537 if (sh_rep_vec (x, VOIDmode))
2538 *total = ((GET_MODE_UNIT_SIZE (GET_MODE (x)) + 3) / 4
2539 + (outer_code != SET));
2540 *total = COSTS_N_INSNS (3) + (outer_code != SET);
2545 *total = COSTS_N_INSNS (addsubcosts (x));
2549 *total = COSTS_N_INSNS (andcosts (x));
2553 *total = COSTS_N_INSNS (multcosts (x));
2559 *total = COSTS_N_INSNS (shiftcosts (x));
2566 *total = COSTS_N_INSNS (20);
2570 if (sh_1el_vec (x, VOIDmode))
2571 *total = outer_code != SET;
2572 if (sh_rep_vec (x, VOIDmode))
2573 *total = ((GET_MODE_UNIT_SIZE (GET_MODE (x)) + 3) / 4
2574 + (outer_code != SET));
2575 *total = COSTS_N_INSNS (3) + (outer_code != SET);
2588 /* Compute the cost of an address. For the SH, all valid addresses are
2589 the same cost. Use a slightly higher cost for reg + reg addressing,
2590 since it increases pressure on r0. */
2593 sh_address_cost (rtx X)
2595 return (GET_CODE (X) == PLUS
2596 && ! CONSTANT_P (XEXP (X, 1))
2597 && ! TARGET_SHMEDIA ? 1 : 0);
2600 /* Code to expand a shift. */
2603 gen_ashift (int type, int n, rtx reg)
2605 /* Negative values here come from the shift_amounts array. */
2618 emit_insn (gen_ashrsi3_k (reg, reg, GEN_INT (n)));
2622 emit_insn (gen_lshrsi3_m (reg, reg, GEN_INT (n)));
2624 emit_insn (gen_lshrsi3_k (reg, reg, GEN_INT (n)));
2627 emit_insn (gen_ashlsi3_std (reg, reg, GEN_INT (n)));
2632 /* Same for HImode */
2635 gen_ashift_hi (int type, int n, rtx reg)
2637 /* Negative values here come from the shift_amounts array. */
2651 /* We don't have HImode right shift operations because using the
2652 ordinary 32 bit shift instructions for that doesn't generate proper
2653 zero/sign extension.
2654 gen_ashift_hi is only called in contexts where we know that the
2655 sign extension works out correctly. */
2658 if (GET_CODE (reg) == SUBREG)
2660 offset = SUBREG_BYTE (reg);
2661 reg = SUBREG_REG (reg);
2663 gen_ashift (type, n, gen_rtx_SUBREG (SImode, reg, offset));
2667 emit_insn (gen_ashlhi3_k (reg, reg, GEN_INT (n)));
2672 /* Output RTL to split a constant shift into its component SH constant
2673 shift instructions. */
2676 gen_shifty_op (int code, rtx *operands)
2678 int value = INTVAL (operands[2]);
2681 /* Truncate the shift count in case it is out of bounds. */
2682 value = value & 0x1f;
2686 if (code == LSHIFTRT)
2688 emit_insn (gen_rotlsi3_1 (operands[0], operands[0]));
2689 emit_insn (gen_movt (operands[0]));
2692 else if (code == ASHIFT)
2694 /* There is a two instruction sequence for 31 bit left shifts,
2695 but it requires r0. */
2696 if (GET_CODE (operands[0]) == REG && REGNO (operands[0]) == 0)
2698 emit_insn (gen_andsi3 (operands[0], operands[0], const1_rtx));
2699 emit_insn (gen_rotlsi3_31 (operands[0], operands[0]));
2704 else if (value == 0)
2706 /* This can happen even when optimizing, if there were subregs before
2707 reload. Don't output a nop here, as this is never optimized away;
2708 use a no-op move instead. */
2709 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[0]));
2713 max = shift_insns[value];
2714 for (i = 0; i < max; i++)
2715 gen_ashift (code, shift_amounts[value][i], operands[0]);
2718 /* Same as above, but optimized for values where the topmost bits don't
2722 gen_shifty_hi_op (int code, rtx *operands)
2724 int value = INTVAL (operands[2]);
2726 void (*gen_fun) (int, int, rtx);
2728 /* This operation is used by and_shl for SImode values with a few
2729 high bits known to be cleared. */
2733 emit_insn (gen_nop ());
2737 gen_fun = GET_MODE (operands[0]) == HImode ? gen_ashift_hi : gen_ashift;
2740 max = ext_shift_insns[value];
2741 for (i = 0; i < max; i++)
2742 gen_fun (code, ext_shift_amounts[value][i], operands[0]);
2745 /* When shifting right, emit the shifts in reverse order, so that
2746 solitary negative values come first. */
2747 for (i = ext_shift_insns[value] - 1; i >= 0; i--)
2748 gen_fun (code, ext_shift_amounts[value][i], operands[0]);
2751 /* Output RTL for an arithmetic right shift. */
2753 /* ??? Rewrite to use super-optimizer sequences. */
2756 expand_ashiftrt (rtx *operands)
2764 if (GET_CODE (operands[2]) != CONST_INT)
2766 rtx count = copy_to_mode_reg (SImode, operands[2]);
2767 emit_insn (gen_negsi2 (count, count));
2768 emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
2771 else if (ashiftrt_insns[INTVAL (operands[2]) & 31]
2772 > 1 + SH_DYNAMIC_SHIFT_COST)
2775 = force_reg (SImode, GEN_INT (- (INTVAL (operands[2]) & 31)));
2776 emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
2780 if (GET_CODE (operands[2]) != CONST_INT)
2783 value = INTVAL (operands[2]) & 31;
2787 /* If we are called from abs expansion, arrange things so that we
2788 we can use a single MT instruction that doesn't clobber the source,
2789 if LICM can hoist out the load of the constant zero. */
2790 if (currently_expanding_to_rtl)
2792 emit_insn (gen_cmpgtsi_t (force_reg (SImode, CONST0_RTX (SImode)),
2794 emit_insn (gen_mov_neg_si_t (operands[0]));
2797 emit_insn (gen_ashrsi2_31 (operands[0], operands[1]));
2800 else if (value >= 16 && value <= 19)
2802 wrk = gen_reg_rtx (SImode);
2803 emit_insn (gen_ashrsi2_16 (wrk, operands[1]));
2806 gen_ashift (ASHIFTRT, 1, wrk);
2807 emit_move_insn (operands[0], wrk);
2810 /* Expand a short sequence inline, longer call a magic routine. */
2811 else if (value <= 5)
2813 wrk = gen_reg_rtx (SImode);
2814 emit_move_insn (wrk, operands[1]);
2816 gen_ashift (ASHIFTRT, 1, wrk);
2817 emit_move_insn (operands[0], wrk);
2821 wrk = gen_reg_rtx (Pmode);
2823 /* Load the value into an arg reg and call a helper. */
2824 emit_move_insn (gen_rtx_REG (SImode, 4), operands[1]);
2825 sprintf (func, "__ashiftrt_r4_%d", value);
2826 function_symbol (wrk, func, SFUNC_STATIC);
2827 emit_insn (gen_ashrsi3_n (GEN_INT (value), wrk));
2828 emit_move_insn (operands[0], gen_rtx_REG (SImode, 4));
2833 sh_dynamicalize_shift_p (rtx count)
2835 return shift_insns[INTVAL (count)] > 1 + SH_DYNAMIC_SHIFT_COST;
2838 /* Try to find a good way to implement the combiner pattern
2839 [(set (match_operand:SI 0 "register_operand" "r")
2840 (and:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
2841 (match_operand:SI 2 "const_int_operand" "n"))
2842 (match_operand:SI 3 "const_int_operand" "n"))) .
2843 LEFT_RTX is operand 2 in the above pattern, and MASK_RTX is operand 3.
2844 return 0 for simple right / left or left/right shift combination.
2845 return 1 for a combination of shifts with zero_extend.
2846 return 2 for a combination of shifts with an AND that needs r0.
2847 return 3 for a combination of shifts with an AND that needs an extra
2848 scratch register, when the three highmost bits of the AND mask are clear.
2849 return 4 for a combination of shifts with an AND that needs an extra
2850 scratch register, when any of the three highmost bits of the AND mask
2852 If ATTRP is set, store an initial right shift width in ATTRP[0],
2853 and the instruction length in ATTRP[1] . These values are not valid
2855 When ATTRP is set and returning 1, ATTRP[2] gets set to the index into
2856 shift_amounts for the last shift value that is to be used before the
2859 shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
2861 unsigned HOST_WIDE_INT mask, lsb, mask2, lsb2;
2862 int left = INTVAL (left_rtx), right;
2864 int cost, best_cost = 10000;
2865 int best_right = 0, best_len = 0;
2869 if (left < 0 || left > 31)
2871 if (GET_CODE (mask_rtx) == CONST_INT)
2872 mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> left;
2874 mask = (unsigned HOST_WIDE_INT) GET_MODE_MASK (SImode) >> left;
2875 /* Can this be expressed as a right shift / left shift pair? */
2876 lsb = ((mask ^ (mask - 1)) >> 1) + 1;
2877 right = exact_log2 (lsb);
2878 mask2 = ~(mask + lsb - 1);
2879 lsb2 = ((mask2 ^ (mask2 - 1)) >> 1) + 1;
2880 /* mask has no zeroes but trailing zeroes <==> ! mask2 */
2882 best_cost = shift_insns[right] + shift_insns[right + left];
2883 /* mask has no trailing zeroes <==> ! right */
2884 else if (! right && mask2 == ~(lsb2 - 1))
2886 int late_right = exact_log2 (lsb2);
2887 best_cost = shift_insns[left + late_right] + shift_insns[late_right];
2889 /* Try to use zero extend. */
2890 if (mask2 == ~(lsb2 - 1))
2894 for (width = 8; width <= 16; width += 8)
2896 /* Can we zero-extend right away? */
2897 if (lsb2 == (unsigned HOST_WIDE_INT) 1 << width)
2900 = 1 + ext_shift_insns[right] + ext_shift_insns[left + right];
2901 if (cost < best_cost)
2912 /* ??? Could try to put zero extend into initial right shift,
2913 or even shift a bit left before the right shift. */
2914 /* Determine value of first part of left shift, to get to the
2915 zero extend cut-off point. */
2916 first = width - exact_log2 (lsb2) + right;
2917 if (first >= 0 && right + left - first >= 0)
2919 cost = ext_shift_insns[right] + ext_shift_insns[first] + 1
2920 + ext_shift_insns[right + left - first];
2921 if (cost < best_cost)
2933 /* Try to use r0 AND pattern */
2934 for (i = 0; i <= 2; i++)
2938 if (! CONST_OK_FOR_K08 (mask >> i))
2940 cost = (i != 0) + 2 + ext_shift_insns[left + i];
2941 if (cost < best_cost)
2946 best_len = cost - 1;
2949 /* Try to use a scratch register to hold the AND operand. */
2950 can_ext = ((mask << left) & ((unsigned HOST_WIDE_INT) 3 << 30)) == 0;
2951 for (i = 0; i <= 2; i++)
2955 cost = (i != 0) + (CONST_OK_FOR_I08 (mask >> i) ? 2 : 3)
2956 + (can_ext ? ext_shift_insns : shift_insns)[left + i];
2957 if (cost < best_cost)
2962 best_len = cost - 1 - ! CONST_OK_FOR_I08 (mask >> i);
2968 attrp[0] = best_right;
2969 attrp[1] = best_len;
2974 /* This is used in length attributes of the unnamed instructions
2975 corresponding to shl_and_kind return values of 1 and 2. */
2977 shl_and_length (rtx insn)
2979 rtx set_src, left_rtx, mask_rtx;
2982 set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2983 left_rtx = XEXP (XEXP (set_src, 0), 1);
2984 mask_rtx = XEXP (set_src, 1);
2985 shl_and_kind (left_rtx, mask_rtx, attributes);
2986 return attributes[1];
2989 /* This is used in length attribute of the and_shl_scratch instruction. */
2992 shl_and_scr_length (rtx insn)
2994 rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2995 int len = shift_insns[INTVAL (XEXP (set_src, 1))];
2996 rtx op = XEXP (set_src, 0);
2997 len += shift_insns[INTVAL (XEXP (op, 1))] + 1;
2998 op = XEXP (XEXP (op, 0), 0);
2999 return len + shift_insns[INTVAL (XEXP (op, 1))];
3002 /* Generate rtl for instructions for which shl_and_kind advised a particular
3003 method of generating them, i.e. returned zero. */
3006 gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
3009 unsigned HOST_WIDE_INT mask;
3010 int kind = shl_and_kind (left_rtx, mask_rtx, attributes);
3011 int right, total_shift;
3012 void (*shift_gen_fun) (int, rtx *) = gen_shifty_hi_op;
3014 right = attributes[0];
3015 total_shift = INTVAL (left_rtx) + right;
3016 mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> total_shift;
3023 int first = attributes[2];
3028 emit_insn ((mask << right) <= 0xff
3029 ? gen_zero_extendqisi2 (dest,
3030 gen_lowpart (QImode, source))
3031 : gen_zero_extendhisi2 (dest,
3032 gen_lowpart (HImode, source)));
3036 emit_insn (gen_movsi (dest, source));
3040 operands[2] = GEN_INT (right);
3041 gen_shifty_hi_op (LSHIFTRT, operands);
3045 operands[2] = GEN_INT (first);
3046 gen_shifty_hi_op (ASHIFT, operands);
3047 total_shift -= first;
3051 emit_insn (mask <= 0xff
3052 ? gen_zero_extendqisi2 (dest, gen_lowpart (QImode, dest))
3053 : gen_zero_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3054 if (total_shift > 0)
3056 operands[2] = GEN_INT (total_shift);
3057 gen_shifty_hi_op (ASHIFT, operands);
3062 shift_gen_fun = gen_shifty_op;
3064 /* If the topmost bit that matters is set, set the topmost bits
3065 that don't matter. This way, we might be able to get a shorter
3067 if (mask & ((HOST_WIDE_INT) 1 << (31 - total_shift)))
3068 mask |= (HOST_WIDE_INT) ~0 << (31 - total_shift);
3070 /* Don't expand fine-grained when combining, because that will
3071 make the pattern fail. */
3072 if (currently_expanding_to_rtl
3073 || reload_in_progress || reload_completed)
3077 /* Cases 3 and 4 should be handled by this split
3078 only while combining */
3079 gcc_assert (kind <= 2);
3082 emit_insn (gen_lshrsi3 (dest, source, GEN_INT (right)));
3085 emit_insn (gen_andsi3 (dest, source, GEN_INT (mask)));
3090 operands[2] = GEN_INT (total_shift);
3091 shift_gen_fun (ASHIFT, operands);
3098 if (kind != 4 && total_shift < 16)
3100 neg = -ext_shift_amounts[total_shift][1];
3102 neg -= ext_shift_amounts[total_shift][2];
3106 emit_insn (gen_and_shl_scratch (dest, source,
3109 GEN_INT (total_shift + neg),
3111 emit_insn (gen_movsi (dest, dest));
3118 /* Try to find a good way to implement the combiner pattern
3119 [(set (match_operand:SI 0 "register_operand" "=r")
3120 (sign_extract:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
3121 (match_operand:SI 2 "const_int_operand" "n")
3122 (match_operand:SI 3 "const_int_operand" "n")
3124 (clobber (reg:SI T_REG))]
3125 LEFT_RTX is operand 2 in the above pattern, and SIZE_RTX is operand 3.
3126 return 0 for simple left / right shift combination.
3127 return 1 for left shift / 8 bit sign extend / left shift.
3128 return 2 for left shift / 16 bit sign extend / left shift.
3129 return 3 for left shift / 8 bit sign extend / shift / sign extend.
3130 return 4 for left shift / 16 bit sign extend / shift / sign extend.
3131 return 5 for left shift / 16 bit sign extend / right shift
3132 return 6 for < 8 bit sign extend / left shift.
3133 return 7 for < 8 bit sign extend / left shift / single right shift.
3134 If COSTP is nonzero, assign the calculated cost to *COSTP. */
3137 shl_sext_kind (rtx left_rtx, rtx size_rtx, int *costp)
3139 int left, size, insize, ext;
3140 int cost = 0, best_cost;
3143 left = INTVAL (left_rtx);
3144 size = INTVAL (size_rtx);
3145 insize = size - left;
3146 gcc_assert (insize > 0);
3147 /* Default to left / right shift. */
3149 best_cost = shift_insns[32 - insize] + ashiftrt_insns[32 - size];
3152 /* 16 bit shift / sign extend / 16 bit shift */
3153 cost = shift_insns[16 - insize] + 1 + ashiftrt_insns[16 - size];
3154 /* If ashiftrt_insns[16 - size] is 8, this choice will be overridden
3155 below, by alternative 3 or something even better. */
3156 if (cost < best_cost)
3162 /* Try a plain sign extend between two shifts. */
3163 for (ext = 16; ext >= insize; ext -= 8)
3167 cost = ext_shift_insns[ext - insize] + 1 + shift_insns[size - ext];
3168 if (cost < best_cost)
3170 kind = ext / (unsigned) 8;
3174 /* Check if we can do a sloppy shift with a final signed shift
3175 restoring the sign. */
3176 if (EXT_SHIFT_SIGNED (size - ext))
3177 cost = ext_shift_insns[ext - insize] + ext_shift_insns[size - ext] + 1;
3178 /* If not, maybe it's still cheaper to do the second shift sloppy,
3179 and do a final sign extend? */
3180 else if (size <= 16)
3181 cost = ext_shift_insns[ext - insize] + 1
3182 + ext_shift_insns[size > ext ? size - ext : ext - size] + 1;
3185 if (cost < best_cost)
3187 kind = ext / (unsigned) 8 + 2;
3191 /* Check if we can sign extend in r0 */
3194 cost = 3 + shift_insns[left];
3195 if (cost < best_cost)
3200 /* Try the same with a final signed shift. */
3203 cost = 3 + ext_shift_insns[left + 1] + 1;
3204 if (cost < best_cost)
3213 /* Try to use a dynamic shift. */
3214 cost = shift_insns[32 - insize] + 1 + SH_DYNAMIC_SHIFT_COST;
3215 if (cost < best_cost)
3226 /* Function to be used in the length attribute of the instructions
3227 implementing this pattern. */
3230 shl_sext_length (rtx insn)
3232 rtx set_src, left_rtx, size_rtx;
3235 set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
3236 left_rtx = XEXP (XEXP (set_src, 0), 1);
3237 size_rtx = XEXP (set_src, 1);
3238 shl_sext_kind (left_rtx, size_rtx, &cost);
3242 /* Generate rtl for this pattern */
3245 gen_shl_sext (rtx dest, rtx left_rtx, rtx size_rtx, rtx source)
3248 int left, size, insize, cost;
3251 kind = shl_sext_kind (left_rtx, size_rtx, &cost);
3252 left = INTVAL (left_rtx);
3253 size = INTVAL (size_rtx);
3254 insize = size - left;
3262 int ext = kind & 1 ? 8 : 16;
3263 int shift2 = size - ext;
3265 /* Don't expand fine-grained when combining, because that will
3266 make the pattern fail. */
3267 if (! currently_expanding_to_rtl
3268 && ! reload_in_progress && ! reload_completed)
3270 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3271 emit_insn (gen_movsi (dest, source));
3275 emit_insn (gen_movsi (dest, source));
3279 operands[2] = GEN_INT (ext - insize);
3280 gen_shifty_hi_op (ASHIFT, operands);
3283 ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
3284 : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3289 operands[2] = GEN_INT (shift2);
3290 gen_shifty_op (ASHIFT, operands);
3297 if (EXT_SHIFT_SIGNED (shift2))
3299 operands[2] = GEN_INT (shift2 + 1);
3300 gen_shifty_op (ASHIFT, operands);
3301 operands[2] = const1_rtx;
3302 gen_shifty_op (ASHIFTRT, operands);
3305 operands[2] = GEN_INT (shift2);
3306 gen_shifty_hi_op (ASHIFT, operands);
3310 operands[2] = GEN_INT (-shift2);
3311 gen_shifty_hi_op (LSHIFTRT, operands);
3313 emit_insn (size <= 8
3314 ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
3315 : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3322 if (! currently_expanding_to_rtl
3323 && ! reload_in_progress && ! reload_completed)
3324 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3328 operands[2] = GEN_INT (16 - insize);
3329 gen_shifty_hi_op (ASHIFT, operands);
3330 emit_insn (gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3332 /* Don't use gen_ashrsi3 because it generates new pseudos. */
3334 gen_ashift (ASHIFTRT, 1, dest);
3339 /* Don't expand fine-grained when combining, because that will
3340 make the pattern fail. */
3341 if (! currently_expanding_to_rtl
3342 && ! reload_in_progress && ! reload_completed)
3344 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3345 emit_insn (gen_movsi (dest, source));
3348 emit_insn (gen_andsi3 (dest, source, GEN_INT ((1 << insize) - 1)));
3349 emit_insn (gen_xorsi3 (dest, dest, GEN_INT (1 << (insize - 1))));
3350 emit_insn (gen_addsi3 (dest, dest, GEN_INT (-1 << (insize - 1))));
3352 operands[2] = kind == 7 ? GEN_INT (left + 1) : left_rtx;
3353 gen_shifty_op (ASHIFT, operands);
3355 emit_insn (gen_ashrsi3_k (dest, dest, const1_rtx));
3363 /* Prefix a symbol_ref name with "datalabel". */
3366 gen_datalabel_ref (rtx sym)
3370 if (GET_CODE (sym) == LABEL_REF)
3371 return gen_rtx_CONST (GET_MODE (sym),
3372 gen_rtx_UNSPEC (GET_MODE (sym),
3376 gcc_assert (GET_CODE (sym) == SYMBOL_REF);
3378 str = XSTR (sym, 0);
3379 /* Share all SYMBOL_REF strings with the same value - that is important
3381 str = IDENTIFIER_POINTER (get_identifier (str));
3382 XSTR (sym, 0) = str;
3388 static alloc_pool label_ref_list_pool;
3390 typedef struct label_ref_list_d
3393 struct label_ref_list_d *next;
3394 } *label_ref_list_t;
3396 /* The SH cannot load a large constant into a register, constants have to
3397 come from a pc relative load. The reference of a pc relative load
3398 instruction must be less than 1k in front of the instruction. This
3399 means that we often have to dump a constant inside a function, and
3400 generate code to branch around it.
3402 It is important to minimize this, since the branches will slow things
3403 down and make things bigger.
3405 Worst case code looks like:
3423 We fix this by performing a scan before scheduling, which notices which
3424 instructions need to have their operands fetched from the constant table
3425 and builds the table.
3429 scan, find an instruction which needs a pcrel move. Look forward, find the
3430 last barrier which is within MAX_COUNT bytes of the requirement.
3431 If there isn't one, make one. Process all the instructions between
3432 the find and the barrier.
3434 In the above example, we can tell that L3 is within 1k of L1, so
3435 the first move can be shrunk from the 3 insn+constant sequence into
3436 just 1 insn, and the constant moved to L3 to make:
3447 Then the second move becomes the target for the shortening process. */
3451 rtx value; /* Value in table. */
3452 rtx label; /* Label of value. */
3453 label_ref_list_t wend; /* End of window. */
3454 enum machine_mode mode; /* Mode of value. */
3456 /* True if this constant is accessed as part of a post-increment
3457 sequence. Note that HImode constants are never accessed in this way. */
3458 bool part_of_sequence_p;
3461 /* The maximum number of constants that can fit into one pool, since
3462 constants in the range 0..510 are at least 2 bytes long, and in the
3463 range from there to 1018 at least 4 bytes. */
3465 #define MAX_POOL_SIZE 372
3466 static pool_node pool_vector[MAX_POOL_SIZE];
3467 static int pool_size;
3468 static rtx pool_window_label;
3469 static int pool_window_last;
3471 static int max_labelno_before_reorg;
3473 /* ??? If we need a constant in HImode which is the truncated value of a
3474 constant we need in SImode, we could combine the two entries thus saving
3475 two bytes. Is this common enough to be worth the effort of implementing
3478 /* ??? This stuff should be done at the same time that we shorten branches.
3479 As it is now, we must assume that all branches are the maximum size, and
3480 this causes us to almost always output constant pools sooner than
3483 /* Add a constant to the pool and return its label. */
3486 add_constant (rtx x, enum machine_mode mode, rtx last_value)
3490 label_ref_list_t ref, newref;
3492 /* First see if we've already got it. */
3493 for (i = 0; i < pool_size; i++)
3495 if (x->code == pool_vector[i].value->code
3496 && mode == pool_vector[i].mode)
3498 if (x->code == CODE_LABEL)
3500 if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
3503 if (rtx_equal_p (x, pool_vector[i].value))
3508 || ! rtx_equal_p (last_value, pool_vector[i-1].value))
3510 new = gen_label_rtx ();
3511 LABEL_REFS (new) = pool_vector[i].label;
3512 pool_vector[i].label = lab = new;
3514 if (lab && pool_window_label)
3516 newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
3517 newref->label = pool_window_label;
3518 ref = pool_vector[pool_window_last].wend;
3520 pool_vector[pool_window_last].wend = newref;
3523 pool_window_label = new;
3524 pool_window_last = i;
3530 /* Need a new one. */
3531 pool_vector[pool_size].value = x;
3532 if (last_value && rtx_equal_p (last_value, pool_vector[pool_size - 1].value))
3535 pool_vector[pool_size - 1].part_of_sequence_p = true;
3538 lab = gen_label_rtx ();
3539 pool_vector[pool_size].mode = mode;
3540 pool_vector[pool_size].label = lab;
3541 pool_vector[pool_size].wend = NULL;
3542 pool_vector[pool_size].part_of_sequence_p = (lab == 0);
3543 if (lab && pool_window_label)
3545 newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
3546 newref->label = pool_window_label;
3547 ref = pool_vector[pool_window_last].wend;
3549 pool_vector[pool_window_last].wend = newref;
3552 pool_window_label = lab;
3553 pool_window_last = pool_size;
3558 /* Output the literal table. START, if nonzero, is the first instruction
3559 this table is needed for, and also indicates that there is at least one
3560 casesi_worker_2 instruction; We have to emit the operand3 labels from
3561 these insns at a 4-byte aligned position. BARRIER is the barrier
3562 after which we are to place the table. */
3565 dump_table (rtx start, rtx barrier)
3571 label_ref_list_t ref;
3574 /* Do two passes, first time dump out the HI sized constants. */
3576 for (i = 0; i < pool_size; i++)
3578 pool_node *p = &pool_vector[i];
3580 if (p->mode == HImode)
3584 scan = emit_insn_after (gen_align_2 (), scan);
3587 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3588 scan = emit_label_after (lab, scan);
3589 scan = emit_insn_after (gen_consttable_2 (p->value, const0_rtx),
3591 for (ref = p->wend; ref; ref = ref->next)
3594 scan = emit_insn_after (gen_consttable_window_end (lab), scan);
3597 else if (p->mode == DFmode)
3605 scan = emit_insn_after (gen_align_4 (), scan);
3607 for (; start != barrier; start = NEXT_INSN (start))
3608 if (GET_CODE (start) == INSN
3609 && recog_memoized (start) == CODE_FOR_casesi_worker_2)
3611 rtx src = SET_SRC (XVECEXP (PATTERN (start), 0, 0));
3612 rtx lab = XEXP (XVECEXP (src, 0, 3), 0);
3614 scan = emit_label_after (lab, scan);
3617 if (TARGET_FMOVD && TARGET_ALIGN_DOUBLE && have_df)
3619 rtx align_insn = NULL_RTX;
3621 scan = emit_label_after (gen_label_rtx (), scan);
3622 scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
3625 for (i = 0; i < pool_size; i++)
3627 pool_node *p = &pool_vector[i];
3635 if (align_insn && !p->part_of_sequence_p)
3637 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3638 emit_label_before (lab, align_insn);
3639 emit_insn_before (gen_consttable_4 (p->value, const0_rtx),
3641 for (ref = p->wend; ref; ref = ref->next)
3644 emit_insn_before (gen_consttable_window_end (lab),
3647 delete_insn (align_insn);
3648 align_insn = NULL_RTX;
3653 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3654 scan = emit_label_after (lab, scan);
3655 scan = emit_insn_after (gen_consttable_4 (p->value,
3657 need_align = ! need_align;
3663 scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
3668 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3669 scan = emit_label_after (lab, scan);
3670 scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
3677 if (p->mode != HImode)
3679 for (ref = p->wend; ref; ref = ref->next)
3682 scan = emit_insn_after (gen_consttable_window_end (lab),
3691 for (i = 0; i < pool_size; i++)
3693 pool_node *p = &pool_vector[i];
3704 scan = emit_label_after (gen_label_rtx (), scan);
3705 scan = emit_insn_after (gen_align_4 (), scan);
3707 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3708 scan = emit_label_after (lab, scan);
3709 scan = emit_insn_after (gen_consttable_4 (p->value, const0_rtx),
3717 scan = emit_label_after (gen_label_rtx (), scan);
3718 scan = emit_insn_after (gen_align_4 (), scan);
3720 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3721 scan = emit_label_after (lab, scan);
3722 scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
3729 if (p->mode != HImode)
3731 for (ref = p->wend; ref; ref = ref->next)
3734 scan = emit_insn_after (gen_consttable_window_end (lab), scan);
3739 scan = emit_insn_after (gen_consttable_end (), scan);
3740 scan = emit_barrier_after (scan);
3742 pool_window_label = NULL_RTX;
3743 pool_window_last = 0;
3746 /* Return nonzero if constant would be an ok source for a
3747 mov.w instead of a mov.l. */
3752 return (GET_CODE (src) == CONST_INT
3753 && INTVAL (src) >= -32768
3754 && INTVAL (src) <= 32767);
3757 #define MOVA_LABELREF(mova) XVECEXP (SET_SRC (PATTERN (mova)), 0, 0)
3759 /* Nonzero if the insn is a move instruction which needs to be fixed. */
3761 /* ??? For a DImode/DFmode moves, we don't need to fix it if each half of the
3762 CONST_DOUBLE input value is CONST_OK_FOR_I08. For a SFmode move, we don't
3763 need to fix it if the input value is CONST_OK_FOR_I08. */
3766 broken_move (rtx insn)
3768 if (GET_CODE (insn) == INSN)
3770 rtx pat = PATTERN (insn);
3771 if (GET_CODE (pat) == PARALLEL)
3772 pat = XVECEXP (pat, 0, 0);
3773 if (GET_CODE (pat) == SET
3774 /* We can load any 8-bit value if we don't care what the high
3775 order bits end up as. */
3776 && GET_MODE (SET_DEST (pat)) != QImode
3777 && (CONSTANT_P (SET_SRC (pat))
3778 /* Match mova_const. */
3779 || (GET_CODE (SET_SRC (pat)) == UNSPEC
3780 && XINT (SET_SRC (pat), 1) == UNSPEC_MOVA
3781 && GET_CODE (XVECEXP (SET_SRC (pat), 0, 0)) == CONST))
3783 && GET_CODE (SET_SRC (pat)) == CONST_DOUBLE
3784 && (fp_zero_operand (SET_SRC (pat))
3785 || fp_one_operand (SET_SRC (pat)))
3786 /* ??? If this is a -m4 or -m4-single compilation, in general
3787 we don't know the current setting of fpscr, so disable fldi.
3788 There is an exception if this was a register-register move
3789 before reload - and hence it was ascertained that we have
3790 single precision setting - and in a post-reload optimization
3791 we changed this to do a constant load. In that case
3792 we don't have an r0 clobber, hence we must use fldi. */
3793 && (! TARGET_SH4 || TARGET_FMOVD
3794 || (GET_CODE (XEXP (XVECEXP (PATTERN (insn), 0, 2), 0))
3796 && GET_CODE (SET_DEST (pat)) == REG
3797 && FP_REGISTER_P (REGNO (SET_DEST (pat))))
3799 && GET_MODE (SET_DEST (pat)) == SImode
3800 && (satisfies_constraint_I20 (SET_SRC (pat))
3801 || satisfies_constraint_I28 (SET_SRC (pat))))
3802 && ! satisfies_constraint_I08 (SET_SRC (pat)))
3812 return (GET_CODE (insn) == INSN
3813 && GET_CODE (PATTERN (insn)) == SET
3814 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
3815 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_MOVA
3816 /* Don't match mova_const. */
3817 && GET_CODE (MOVA_LABELREF (insn)) == LABEL_REF);
3820 /* Fix up a mova from a switch that went out of range. */
3822 fixup_mova (rtx mova)
3824 PUT_MODE (XEXP (MOVA_LABELREF (mova), 0), QImode);
3827 SET_SRC (PATTERN (mova)) = MOVA_LABELREF (mova);
3828 INSN_CODE (mova) = -1;
3833 rtx lab = gen_label_rtx ();
3834 rtx wpat, wpat0, wpat1, wsrc, diff;
3838 worker = NEXT_INSN (worker);
3840 && GET_CODE (worker) != CODE_LABEL
3841 && GET_CODE (worker) != JUMP_INSN);
3842 } while (GET_CODE (worker) == NOTE
3843 || recog_memoized (worker) != CODE_FOR_casesi_worker_1);
3844 wpat = PATTERN (worker);
3845 wpat0 = XVECEXP (wpat, 0, 0);
3846 wpat1 = XVECEXP (wpat, 0, 1);
3847 wsrc = SET_SRC (wpat0);
3848 PATTERN (worker) = (gen_casesi_worker_2
3849 (SET_DEST (wpat0), XVECEXP (wsrc, 0, 1),
3850 XEXP (XVECEXP (wsrc, 0, 2), 0), lab,
3852 INSN_CODE (worker) = -1;
3853 diff = gen_rtx_MINUS (Pmode, XVECEXP (SET_SRC (PATTERN (mova)), 0, 0),
3854 gen_rtx_LABEL_REF (Pmode, lab));
3855 diff = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, diff), UNSPEC_PIC);
3856 SET_SRC (PATTERN (mova)) = gen_rtx_CONST (Pmode, diff);
3857 INSN_CODE (mova) = -1;
3861 /* NEW_MOVA is a mova we've just encountered while scanning forward. Update
3862 *num_mova, and check if the new mova is not nested within the first one.
3863 return 0 if *first_mova was replaced, 1 if new_mova was replaced,
3864 2 if new_mova has been assigned to *first_mova, -1 otherwise.. */
3866 untangle_mova (int *num_mova, rtx *first_mova, rtx new_mova)
3868 int n_addr = 0; /* Initialization to shut up spurious warning. */
3869 int f_target, n_target = 0; /* Likewise. */
3873 n_addr = INSN_ADDRESSES (INSN_UID (new_mova));
3874 n_target = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (new_mova), 0)));
3875 if (n_addr > n_target || n_addr + 1022 < n_target)
3877 /* Change the mova into a load.
3878 broken_move will then return true for it. */
3879 fixup_mova (new_mova);
3885 *first_mova = new_mova;
3890 = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (*first_mova), 0))))
3895 if (f_target - INSN_ADDRESSES (INSN_UID (*first_mova))
3896 > n_target - n_addr)
3898 fixup_mova (*first_mova);
3903 fixup_mova (new_mova);
3908 /* Find the last barrier from insn FROM which is close enough to hold the
3909 constant pool. If we can't find one, then create one near the end of
3913 find_barrier (int num_mova, rtx mova, rtx from)
3922 int leading_mova = num_mova;
3923 rtx barrier_before_mova = 0, found_barrier = 0, good_barrier = 0;
3928 /* For HImode: range is 510, add 4 because pc counts from address of
3929 second instruction after this one, subtract 2 for the jump instruction
3930 that we may need to emit before the table, subtract 2 for the instruction
3931 that fills the jump delay slot (in very rare cases, reorg will take an
3932 instruction from after the constant pool or will leave the delay slot
3933 empty). This gives 510.
3934 For SImode: range is 1020, add 4 because pc counts from address of
3935 second instruction after this one, subtract 2 in case pc is 2 byte
3936 aligned, subtract 2 for the jump instruction that we may need to emit
3937 before the table, subtract 2 for the instruction that fills the jump
3938 delay slot. This gives 1018. */
3940 /* The branch will always be shortened now that the reference address for
3941 forward branches is the successor address, thus we need no longer make
3942 adjustments to the [sh]i_limit for -O0. */
3947 while (from && count_si < si_limit && count_hi < hi_limit)
3949 int inc = get_attr_length (from);
3952 /* If this is a label that existed at the time of the compute_alignments
3953 call, determine the alignment. N.B. When find_barrier recurses for
3954 an out-of-reach mova, we might see labels at the start of previously
3955 inserted constant tables. */
3956 if (GET_CODE (from) == CODE_LABEL
3957 && CODE_LABEL_NUMBER (from) <= max_labelno_before_reorg)
3960 new_align = 1 << label_to_alignment (from);
3961 else if (GET_CODE (prev_nonnote_insn (from)) == BARRIER)
3962 new_align = 1 << barrier_align (from);
3967 /* In case we are scanning a constant table because of recursion, check
3968 for explicit alignments. If the table is long, we might be forced
3969 to emit the new table in front of it; the length of the alignment
3970 might be the last straw. */
3971 else if (GET_CODE (from) == INSN
3972 && GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
3973 && XINT (PATTERN (from), 1) == UNSPECV_ALIGN)
3974 new_align = INTVAL (XVECEXP (PATTERN (from), 0, 0));
3975 /* When we find the end of a constant table, paste the new constant
3976 at the end. That is better than putting it in front because
3977 this way, we don't need extra alignment for adding a 4-byte-aligned
3978 mov(a) label to a 2/4 or 8/4 byte aligned table. */
3979 else if (GET_CODE (from) == INSN
3980 && GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
3981 && XINT (PATTERN (from), 1) == UNSPECV_CONST_END)
3984 if (GET_CODE (from) == BARRIER)
3988 found_barrier = from;
3990 /* If we are at the end of the function, or in front of an alignment
3991 instruction, we need not insert an extra alignment. We prefer
3992 this kind of barrier. */
3993 if (barrier_align (from) > 2)
3994 good_barrier = from;
3996 /* If we are at the end of a hot/cold block, dump the constants
3998 next = NEXT_INSN (from);
4001 && NOTE_KIND (next) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
4005 if (broken_move (from))
4008 enum machine_mode mode;
4010 pat = PATTERN (from);
4011 if (GET_CODE (pat) == PARALLEL)
4012 pat = XVECEXP (pat, 0, 0);
4013 src = SET_SRC (pat);
4014 dst = SET_DEST (pat);
4015 mode = GET_MODE (dst);
4017 /* We must explicitly check the mode, because sometimes the
4018 front end will generate code to load unsigned constants into
4019 HImode targets without properly sign extending them. */
4021 || (mode == SImode && hi_const (src) && REGNO (dst) != FPUL_REG))
4024 /* We put the short constants before the long constants, so
4025 we must count the length of short constants in the range
4026 for the long constants. */
4027 /* ??? This isn't optimal, but is easy to do. */
4032 /* We dump DF/DI constants before SF/SI ones, because
4033 the limit is the same, but the alignment requirements
4034 are higher. We may waste up to 4 additional bytes
4035 for alignment, and the DF/DI constant may have
4036 another SF/SI constant placed before it. */
4037 if (TARGET_SHCOMPACT
4039 && (mode == DFmode || mode == DImode))
4044 while (si_align > 2 && found_si + si_align - 2 > count_si)
4046 if (found_si > count_si)
4047 count_si = found_si;
4048 found_si += GET_MODE_SIZE (mode);
4050 si_limit -= GET_MODE_SIZE (mode);
4056 switch (untangle_mova (&num_mova, &mova, from))
4058 case 0: return find_barrier (0, 0, mova);
4063 = good_barrier ? good_barrier : found_barrier;
4067 if (found_si > count_si)
4068 count_si = found_si;
4070 else if (GET_CODE (from) == JUMP_INSN
4071 && (GET_CODE (PATTERN (from)) == ADDR_VEC
4072 || GET_CODE (PATTERN (from)) == ADDR_DIFF_VEC))
4074 if ((num_mova > 1 && GET_MODE (prev_nonnote_insn (from)) == VOIDmode)
4076 && (prev_nonnote_insn (from)
4077 == XEXP (MOVA_LABELREF (mova), 0))))
4079 if (barrier_align (next_real_insn (from)) == align_jumps_log)
4081 /* We have just passed the barrier in front of the
4082 ADDR_DIFF_VEC, which is stored in found_barrier. Since
4083 the ADDR_DIFF_VEC is accessed as data, just like our pool
4084 constants, this is a good opportunity to accommodate what
4085 we have gathered so far.
4086 If we waited any longer, we could end up at a barrier in
4087 front of code, which gives worse cache usage for separated
4088 instruction / data caches. */
4089 good_barrier = found_barrier;
4094 rtx body = PATTERN (from);
4095 inc = XVECLEN (body, 1) * GET_MODE_SIZE (GET_MODE (body));
4098 /* For the SH1, we generate alignments even after jumps-around-jumps. */
4099 else if (GET_CODE (from) == JUMP_INSN
4101 && ! TARGET_SMALLCODE)
4107 if (new_align > si_align)
4109 si_limit -= (count_si - 1) & (new_align - si_align);
4110 si_align = new_align;
4112 count_si = (count_si + new_align - 1) & -new_align;
4117 if (new_align > hi_align)
4119 hi_limit -= (count_hi - 1) & (new_align - hi_align);
4120 hi_align = new_align;
4122 count_hi = (count_hi + new_align - 1) & -new_align;
4124 from = NEXT_INSN (from);
4131 /* Try as we might, the leading mova is out of range. Change
4132 it into a load (which will become a pcload) and retry. */
4134 return find_barrier (0, 0, mova);
4138 /* Insert the constant pool table before the mova instruction,
4139 to prevent the mova label reference from going out of range. */
4141 good_barrier = found_barrier = barrier_before_mova;
4147 if (good_barrier && next_real_insn (found_barrier))
4148 found_barrier = good_barrier;
4152 /* We didn't find a barrier in time to dump our stuff,
4153 so we'll make one. */
4154 rtx label = gen_label_rtx ();
4156 /* If we exceeded the range, then we must back up over the last
4157 instruction we looked at. Otherwise, we just need to undo the
4158 NEXT_INSN at the end of the loop. */
4159 if (PREV_INSN (from) != orig
4160 && (count_hi > hi_limit || count_si > si_limit))
4161 from = PREV_INSN (PREV_INSN (from));
4163 from = PREV_INSN (from);
4165 /* Walk back to be just before any jump or label.
4166 Putting it before a label reduces the number of times the branch
4167 around the constant pool table will be hit. Putting it before
4168 a jump makes it more likely that the bra delay slot will be
4170 while (GET_CODE (from) == JUMP_INSN || GET_CODE (from) == NOTE
4171 || GET_CODE (from) == CODE_LABEL)
4172 from = PREV_INSN (from);
4174 from = emit_jump_insn_after (gen_jump (label), from);
4175 JUMP_LABEL (from) = label;
4176 LABEL_NUSES (label) = 1;
4177 found_barrier = emit_barrier_after (from);
4178 emit_label_after (label, found_barrier);
4181 return found_barrier;
4184 /* If the instruction INSN is implemented by a special function, and we can
4185 positively find the register that is used to call the sfunc, and this
4186 register is not used anywhere else in this instruction - except as the
4187 destination of a set, return this register; else, return 0. */
4189 sfunc_uses_reg (rtx insn)
4192 rtx pattern, part, reg_part, reg;
4194 if (GET_CODE (insn) != INSN)
4196 pattern = PATTERN (insn);
4197 if (GET_CODE (pattern) != PARALLEL || get_attr_type (insn) != TYPE_SFUNC)
4200 for (reg_part = 0, i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
4202 part = XVECEXP (pattern, 0, i);
4203 if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == SImode)
4208 reg = XEXP (reg_part, 0);
4209 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
4211 part = XVECEXP (pattern, 0, i);
4212 if (part == reg_part || GET_CODE (part) == CLOBBER)
4214 if (reg_mentioned_p (reg, ((GET_CODE (part) == SET
4215 && GET_CODE (SET_DEST (part)) == REG)
4216 ? SET_SRC (part) : part)))
4222 /* See if the only way in which INSN uses REG is by calling it, or by
4223 setting it while calling it. Set *SET to a SET rtx if the register
4227 noncall_uses_reg (rtx reg, rtx insn, rtx *set)
4233 reg2 = sfunc_uses_reg (insn);
4234 if (reg2 && REGNO (reg2) == REGNO (reg))
4236 pattern = single_set (insn);
4238 && GET_CODE (SET_DEST (pattern)) == REG
4239 && REGNO (reg) == REGNO (SET_DEST (pattern)))
4243 if (GET_CODE (insn) != CALL_INSN)
4245 /* We don't use rtx_equal_p because we don't care if the mode is
4247 pattern = single_set (insn);
4249 && GET_CODE (SET_DEST (pattern)) == REG
4250 && REGNO (reg) == REGNO (SET_DEST (pattern)))
4256 par = PATTERN (insn);
4257 if (GET_CODE (par) == PARALLEL)
4258 for (i = XVECLEN (par, 0) - 1; i >= 0; i--)
4260 part = XVECEXP (par, 0, i);
4261 if (GET_CODE (part) != SET && reg_mentioned_p (reg, part))
4264 return reg_mentioned_p (reg, SET_SRC (pattern));
4270 pattern = PATTERN (insn);
4272 if (GET_CODE (pattern) == PARALLEL)
4276 for (i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
4277 if (reg_mentioned_p (reg, XVECEXP (pattern, 0, i)))
4279 pattern = XVECEXP (pattern, 0, 0);
4282 if (GET_CODE (pattern) == SET)
4284 if (reg_mentioned_p (reg, SET_DEST (pattern)))
4286 /* We don't use rtx_equal_p, because we don't care if the
4287 mode is different. */
4288 if (GET_CODE (SET_DEST (pattern)) != REG
4289 || REGNO (reg) != REGNO (SET_DEST (pattern)))
4295 pattern = SET_SRC (pattern);
4298 if (GET_CODE (pattern) != CALL
4299 || GET_CODE (XEXP (pattern, 0)) != MEM
4300 || ! rtx_equal_p (reg, XEXP (XEXP (pattern, 0), 0)))
4306 /* Given a X, a pattern of an insn or a part of it, return a mask of used
4307 general registers. Bits 0..15 mean that the respective registers
4308 are used as inputs in the instruction. Bits 16..31 mean that the
4309 registers 0..15, respectively, are used as outputs, or are clobbered.
4310 IS_DEST should be set to 16 if X is the destination of a SET, else to 0. */
4312 regs_used (rtx x, int is_dest)
4320 code = GET_CODE (x);
4325 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
4326 << (REGNO (x) + is_dest));
4330 rtx y = SUBREG_REG (x);
4332 if (GET_CODE (y) != REG)
4335 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
4337 subreg_regno_offset (REGNO (y),
4340 GET_MODE (x)) + is_dest));
4344 return regs_used (SET_SRC (x), 0) | regs_used (SET_DEST (x), 16);
4346 /* If there was a return value, it must have been indicated with USE. */
4361 fmt = GET_RTX_FORMAT (code);
4363 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4368 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4369 used |= regs_used (XVECEXP (x, i, j), is_dest);
4371 else if (fmt[i] == 'e')
4372 used |= regs_used (XEXP (x, i), is_dest);
4377 /* Create an instruction that prevents redirection of a conditional branch
4378 to the destination of the JUMP with address ADDR.
4379 If the branch needs to be implemented as an indirect jump, try to find
4380 a scratch register for it.
4381 If NEED_BLOCK is 0, don't do anything unless we need a scratch register.
4382 If any preceding insn that doesn't fit into a delay slot is good enough,
4383 pass 1. Pass 2 if a definite blocking insn is needed.
4384 -1 is used internally to avoid deep recursion.
4385 If a blocking instruction is made or recognized, return it. */
4388 gen_block_redirect (rtx jump, int addr, int need_block)
4391 rtx prev = prev_nonnote_insn (jump);
4394 /* First, check if we already have an instruction that satisfies our need. */
4395 if (prev && GET_CODE (prev) == INSN && ! INSN_DELETED_P (prev))
4397 if (INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
4399 if (GET_CODE (PATTERN (prev)) == USE
4400 || GET_CODE (PATTERN (prev)) == CLOBBER
4401 || get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
4403 else if ((need_block &= ~1) < 0)
4405 else if (recog_memoized (prev) == CODE_FOR_block_branch_redirect)
4408 if (GET_CODE (PATTERN (jump)) == RETURN)
4412 /* Reorg even does nasty things with return insns that cause branches
4413 to go out of range - see find_end_label and callers. */
4414 return emit_insn_before (gen_block_branch_redirect (const0_rtx) , jump);
4416 /* We can't use JUMP_LABEL here because it might be undefined
4417 when not optimizing. */
4418 dest = XEXP (SET_SRC (PATTERN (jump)), 0);
4419 /* If the branch is out of range, try to find a scratch register for it. */
4421 && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
4425 /* Don't look for the stack pointer as a scratch register,
4426 it would cause trouble if an interrupt occurred. */
4427 unsigned try = 0x7fff, used;
4428 int jump_left = flag_expensive_optimizations + 1;
4430 /* It is likely that the most recent eligible instruction is wanted for
4431 the delay slot. Therefore, find out which registers it uses, and
4432 try to avoid using them. */
4434 for (scan = jump; (scan = PREV_INSN (scan)); )
4438 if (INSN_DELETED_P (scan))
4440 code = GET_CODE (scan);
4441 if (code == CODE_LABEL || code == JUMP_INSN)
4444 && GET_CODE (PATTERN (scan)) != USE
4445 && GET_CODE (PATTERN (scan)) != CLOBBER
4446 && get_attr_in_delay_slot (scan) == IN_DELAY_SLOT_YES)
4448 try &= ~regs_used (PATTERN (scan), 0);
4452 for (used = dead = 0, scan = JUMP_LABEL (jump);
4453 (scan = NEXT_INSN (scan)); )
4457 if (INSN_DELETED_P (scan))
4459 code = GET_CODE (scan);
4462 used |= regs_used (PATTERN (scan), 0);
4463 if (code == CALL_INSN)
4464 used |= regs_used (CALL_INSN_FUNCTION_USAGE (scan), 0);
4465 dead |= (used >> 16) & ~used;
4471 if (code == JUMP_INSN)
4473 if (jump_left-- && simplejump_p (scan))
4474 scan = JUMP_LABEL (scan);
4480 /* Mask out the stack pointer again, in case it was
4481 the only 'free' register we have found. */
4484 /* If the immediate destination is still in range, check for possible
4485 threading with a jump beyond the delay slot insn.
4486 Don't check if we are called recursively; the jump has been or will be
4487 checked in a different invocation then. */
4489 else if (optimize && need_block >= 0)
4491 rtx next = next_active_insn (next_active_insn (dest));
4492 if (next && GET_CODE (next) == JUMP_INSN
4493 && GET_CODE (PATTERN (next)) == SET
4494 && recog_memoized (next) == CODE_FOR_jump_compact)
4496 dest = JUMP_LABEL (next);
4498 && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
4500 gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), -1);
4506 rtx reg = gen_rtx_REG (SImode, exact_log2 (dead & -dead));
4508 /* It would be nice if we could convert the jump into an indirect
4509 jump / far branch right now, and thus exposing all constituent
4510 instructions to further optimization. However, reorg uses
4511 simplejump_p to determine if there is an unconditional jump where
4512 it should try to schedule instructions from the target of the
4513 branch; simplejump_p fails for indirect jumps even if they have
4515 rtx insn = emit_insn_before (gen_indirect_jump_scratch
4516 (reg, GEN_INT (INSN_UID (JUMP_LABEL (jump))))
4518 /* ??? We would like this to have the scope of the jump, but that
4519 scope will change when a delay slot insn of an inner scope is added.
4520 Hence, after delay slot scheduling, we'll have to expect
4521 NOTE_INSN_BLOCK_END notes between the indirect_jump_scratch and
4524 INSN_LOCATOR (insn) = INSN_LOCATOR (jump);
4525 INSN_CODE (insn) = CODE_FOR_indirect_jump_scratch;
4528 else if (need_block)
4529 /* We can't use JUMP_LABEL here because it might be undefined
4530 when not optimizing. */
4531 return emit_insn_before (gen_block_branch_redirect
4532 (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))))
4537 #define CONDJUMP_MIN -252
4538 #define CONDJUMP_MAX 262
4541 /* A label (to be placed) in front of the jump
4542 that jumps to our ultimate destination. */
4544 /* Where we are going to insert it if we cannot move the jump any farther,
4545 or the jump itself if we have picked up an existing jump. */
4547 /* The ultimate destination. */
4549 struct far_branch *prev;
4550 /* If the branch has already been created, its address;
4551 else the address of its first prospective user. */
4555 static void gen_far_branch (struct far_branch *);
4556 enum mdep_reorg_phase_e mdep_reorg_phase;
4558 gen_far_branch (struct far_branch *bp)
4560 rtx insn = bp->insert_place;
4562 rtx label = gen_label_rtx ();
4565 emit_label_after (label, insn);
4568 jump = emit_jump_insn_after (gen_jump (bp->far_label), insn);
4569 LABEL_NUSES (bp->far_label)++;
4572 jump = emit_jump_insn_after (gen_return (), insn);
4573 /* Emit a barrier so that reorg knows that any following instructions
4574 are not reachable via a fall-through path.
4575 But don't do this when not optimizing, since we wouldn't suppress the
4576 alignment for the barrier then, and could end up with out-of-range
4577 pc-relative loads. */
4579 emit_barrier_after (jump);
4580 emit_label_after (bp->near_label, insn);
4581 JUMP_LABEL (jump) = bp->far_label;
4582 ok = invert_jump (insn, label, 1);
4585 /* If we are branching around a jump (rather than a return), prevent
4586 reorg from using an insn from the jump target as the delay slot insn -
4587 when reorg did this, it pessimized code (we rather hide the delay slot)
4588 and it could cause branches to go out of range. */
4591 (gen_stuff_delay_slot
4592 (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))),
4593 GEN_INT (recog_memoized (insn) == CODE_FOR_branch_false)),
4595 /* Prevent reorg from undoing our splits. */
4596 gen_block_redirect (jump, bp->address += 2, 2);
4599 /* Fix up ADDR_DIFF_VECs. */
4601 fixup_addr_diff_vecs (rtx first)
4605 for (insn = first; insn; insn = NEXT_INSN (insn))
4607 rtx vec_lab, pat, prev, prevpat, x, braf_label;
4609 if (GET_CODE (insn) != JUMP_INSN
4610 || GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
4612 pat = PATTERN (insn);
4613 vec_lab = XEXP (XEXP (pat, 0), 0);
4615 /* Search the matching casesi_jump_2. */
4616 for (prev = vec_lab; ; prev = PREV_INSN (prev))
4618 if (GET_CODE (prev) != JUMP_INSN)
4620 prevpat = PATTERN (prev);
4621 if (GET_CODE (prevpat) != PARALLEL || XVECLEN (prevpat, 0) != 2)
4623 x = XVECEXP (prevpat, 0, 1);
4624 if (GET_CODE (x) != USE)
4627 if (GET_CODE (x) == LABEL_REF && XEXP (x, 0) == vec_lab)
4630 /* FIXME: This is a bug in the optimizer, but it seems harmless
4631 to just avoid panicing. */
4635 /* Emit the reference label of the braf where it belongs, right after
4636 the casesi_jump_2 (i.e. braf). */
4637 braf_label = XEXP (XEXP (SET_SRC (XVECEXP (prevpat, 0, 0)), 1), 0);
4638 emit_label_after (braf_label, prev);
4640 /* Fix up the ADDR_DIF_VEC to be relative
4641 to the reference address of the braf. */
4642 XEXP (XEXP (pat, 0), 0) = braf_label;
4646 /* BARRIER_OR_LABEL is either a BARRIER or a CODE_LABEL immediately following
4647 a barrier. Return the base 2 logarithm of the desired alignment. */
4649 barrier_align (rtx barrier_or_label)
4651 rtx next = next_real_insn (barrier_or_label), pat, prev;
4652 int slot, credit, jump_to_next = 0;
4657 pat = PATTERN (next);
4659 if (GET_CODE (pat) == ADDR_DIFF_VEC)
4662 if (GET_CODE (pat) == UNSPEC_VOLATILE && XINT (pat, 1) == UNSPECV_ALIGN)
4663 /* This is a barrier in front of a constant table. */
4666 prev = prev_real_insn (barrier_or_label);
4667 if (GET_CODE (PATTERN (prev)) == ADDR_DIFF_VEC)
4669 pat = PATTERN (prev);
4670 /* If this is a very small table, we want to keep the alignment after
4671 the table to the minimum for proper code alignment. */
4672 return ((TARGET_SMALLCODE
4673 || ((unsigned) XVECLEN (pat, 1) * GET_MODE_SIZE (GET_MODE (pat))
4674 <= (unsigned) 1 << (CACHE_LOG - 2)))
4675 ? 1 << TARGET_SHMEDIA : align_jumps_log);
4678 if (TARGET_SMALLCODE)
4681 if (! TARGET_SH2 || ! optimize)
4682 return align_jumps_log;
4684 /* When fixing up pcloads, a constant table might be inserted just before
4685 the basic block that ends with the barrier. Thus, we can't trust the
4686 instruction lengths before that. */
4687 if (mdep_reorg_phase > SH_FIXUP_PCLOAD)
4689 /* Check if there is an immediately preceding branch to the insn beyond
4690 the barrier. We must weight the cost of discarding useful information
4691 from the current cache line when executing this branch and there is
4692 an alignment, against that of fetching unneeded insn in front of the
4693 branch target when there is no alignment. */
4695 /* There are two delay_slot cases to consider. One is the simple case
4696 where the preceding branch is to the insn beyond the barrier (simple
4697 delay slot filling), and the other is where the preceding branch has
4698 a delay slot that is a duplicate of the insn after the barrier
4699 (fill_eager_delay_slots) and the branch is to the insn after the insn
4700 after the barrier. */
4702 /* PREV is presumed to be the JUMP_INSN for the barrier under
4703 investigation. Skip to the insn before it. */
4704 prev = prev_real_insn (prev);
4706 for (slot = 2, credit = (1 << (CACHE_LOG - 2)) + 2;
4707 credit >= 0 && prev && GET_CODE (prev) == INSN;
4708 prev = prev_real_insn (prev))
4711 if (GET_CODE (PATTERN (prev)) == USE
4712 || GET_CODE (PATTERN (prev)) == CLOBBER)
4714 if (GET_CODE (PATTERN (prev)) == SEQUENCE)
4716 prev = XVECEXP (PATTERN (prev), 0, 1);
4717 if (INSN_UID (prev) == INSN_UID (next))
4719 /* Delay slot was filled with insn at jump target. */
4726 get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
4728 credit -= get_attr_length (prev);
4731 && GET_CODE (prev) == JUMP_INSN
4732 && JUMP_LABEL (prev))
4736 || next_real_insn (JUMP_LABEL (prev)) == next
4737 /* If relax_delay_slots() decides NEXT was redundant
4738 with some previous instruction, it will have
4739 redirected PREV's jump to the following insn. */
4740 || JUMP_LABEL (prev) == next_nonnote_insn (next)
4741 /* There is no upper bound on redundant instructions
4742 that might have been skipped, but we must not put an
4743 alignment where none had been before. */
4744 || (x = (NEXT_INSN (NEXT_INSN (PREV_INSN (prev)))),
4746 && (INSN_CODE (x) == CODE_FOR_block_branch_redirect
4747 || INSN_CODE (x) == CODE_FOR_indirect_jump_scratch
4748 || INSN_CODE (x) == CODE_FOR_stuff_delay_slot))))
4750 rtx pat = PATTERN (prev);
4751 if (GET_CODE (pat) == PARALLEL)
4752 pat = XVECEXP (pat, 0, 0);
4753 if (credit - slot >= (GET_CODE (SET_SRC (pat)) == PC ? 2 : 0))
4759 return align_jumps_log;
4762 /* If we are inside a phony loop, almost any kind of label can turn up as the
4763 first one in the loop. Aligning a braf label causes incorrect switch
4764 destination addresses; we can detect braf labels because they are
4765 followed by a BARRIER.
4766 Applying loop alignment to small constant or switch tables is a waste
4767 of space, so we suppress this too. */
4769 sh_loop_align (rtx label)
4774 next = next_nonnote_insn (next);
4775 while (next && GET_CODE (next) == CODE_LABEL);
4779 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC
4780 || recog_memoized (next) == CODE_FOR_consttable_2)
4783 return align_loops_log;
4786 /* Do a final pass over the function, just before delayed branch
4792 rtx first, insn, mova = NULL_RTX;
4794 rtx r0_rtx = gen_rtx_REG (Pmode, 0);
4795 rtx r0_inc_rtx = gen_rtx_POST_INC (Pmode, r0_rtx);
4797 first = get_insns ();
4798 max_labelno_before_reorg = max_label_num ();
4800 /* We must split call insns before introducing `mova's. If we're
4801 optimizing, they'll have already been split. Otherwise, make
4802 sure we don't split them too late. */
4804 split_all_insns_noflow ();
4809 /* If relaxing, generate pseudo-ops to associate function calls with
4810 the symbols they call. It does no harm to not generate these
4811 pseudo-ops. However, when we can generate them, it enables to
4812 linker to potentially relax the jsr to a bsr, and eliminate the
4813 register load and, possibly, the constant pool entry. */
4815 mdep_reorg_phase = SH_INSERT_USES_LABELS;
4818 /* Remove all REG_LABEL_OPERAND notes. We want to use them for our
4819 own purposes. This works because none of the remaining passes
4820 need to look at them.
4822 ??? But it may break in the future. We should use a machine
4823 dependent REG_NOTE, or some other approach entirely. */
4824 for (insn = first; insn; insn = NEXT_INSN (insn))
4830 while ((note = find_reg_note (insn, REG_LABEL_OPERAND,
4832 remove_note (insn, note);
4836 for (insn = first; insn; insn = NEXT_INSN (insn))
4838 rtx pattern, reg, link, set, scan, dies, label;
4839 int rescan = 0, foundinsn = 0;
4841 if (GET_CODE (insn) == CALL_INSN)
4843 pattern = PATTERN (insn);
4845 if (GET_CODE (pattern) == PARALLEL)
4846 pattern = XVECEXP (pattern, 0, 0);
4847 if (GET_CODE (pattern) == SET)
4848 pattern = SET_SRC (pattern);
4850 if (GET_CODE (pattern) != CALL
4851 || GET_CODE (XEXP (pattern, 0)) != MEM)
4854 reg = XEXP (XEXP (pattern, 0), 0);
4858 reg = sfunc_uses_reg (insn);
4863 if (GET_CODE (reg) != REG)
4866 /* Try scanning backward to find where the register is set. */
4868 for (scan = PREV_INSN (insn);
4869 scan && GET_CODE (scan) != CODE_LABEL;
4870 scan = PREV_INSN (scan))
4872 if (! INSN_P (scan))
4875 if (! reg_mentioned_p (reg, scan))
4878 if (noncall_uses_reg (reg, scan, &set))
4891 /* The register is set at LINK. */
4893 /* We can only optimize the function call if the register is
4894 being set to a symbol. In theory, we could sometimes
4895 optimize calls to a constant location, but the assembler
4896 and linker do not support that at present. */
4897 if (GET_CODE (SET_SRC (set)) != SYMBOL_REF
4898 && GET_CODE (SET_SRC (set)) != LABEL_REF)
4901 /* Scan forward from LINK to the place where REG dies, and
4902 make sure that the only insns which use REG are
4903 themselves function calls. */
4905 /* ??? This doesn't work for call targets that were allocated
4906 by reload, since there may not be a REG_DEAD note for the
4910 for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan))
4914 /* Don't try to trace forward past a CODE_LABEL if we haven't
4915 seen INSN yet. Ordinarily, we will only find the setting insn
4916 if it is in the same basic block. However,
4917 cross-jumping can insert code labels in between the load and
4918 the call, and can result in situations where a single call
4919 insn may have two targets depending on where we came from. */
4921 if (GET_CODE (scan) == CODE_LABEL && ! foundinsn)
4924 if (! INSN_P (scan))
4927 /* Don't try to trace forward past a JUMP. To optimize
4928 safely, we would have to check that all the
4929 instructions at the jump destination did not use REG. */
4931 if (GET_CODE (scan) == JUMP_INSN)
4934 if (! reg_mentioned_p (reg, scan))
4937 if (noncall_uses_reg (reg, scan, &scanset))
4944 && (GET_CODE (scan) == CALL_INSN || sfunc_uses_reg (scan)))
4946 /* There is a function call to this register other
4947 than the one we are checking. If we optimize
4948 this call, we need to rescan again below. */
4952 /* ??? We shouldn't have to worry about SCANSET here.
4953 We should just be able to check for a REG_DEAD note
4954 on a function call. However, the REG_DEAD notes are
4955 apparently not dependable around libcalls; c-torture
4956 execute/920501-2 is a test case. If SCANSET is set,
4957 then this insn sets the register, so it must have
4958 died earlier. Unfortunately, this will only handle
4959 the cases in which the register is, in fact, set in a
4962 /* ??? We shouldn't have to use FOUNDINSN here.
4963 This dates back to when we used LOG_LINKS to find
4964 the most recent insn which sets the register. */
4968 || find_reg_note (scan, REG_DEAD, reg)))
4977 /* Either there was a branch, or some insn used REG
4978 other than as a function call address. */
4982 /* Create a code label, and put it in a REG_LABEL_OPERAND note
4983 on the insn which sets the register, and on each call insn
4984 which uses the register. In final_prescan_insn we look for
4985 the REG_LABEL_OPERAND notes, and output the appropriate label
4988 label = gen_label_rtx ();
4989 add_reg_note (link, REG_LABEL_OPERAND, label);
4990 add_reg_note (insn, REG_LABEL_OPERAND, label);
4998 scan = NEXT_INSN (scan);
5000 && ((GET_CODE (scan) == CALL_INSN
5001 && reg_mentioned_p (reg, scan))
5002 || ((reg2 = sfunc_uses_reg (scan))
5003 && REGNO (reg2) == REGNO (reg))))
5004 add_reg_note (scan, REG_LABEL_OPERAND, label);
5006 while (scan != dies);
5012 fixup_addr_diff_vecs (first);
5016 mdep_reorg_phase = SH_SHORTEN_BRANCHES0;
5017 shorten_branches (first);
5020 /* Scan the function looking for move instructions which have to be
5021 changed to pc-relative loads and insert the literal tables. */
5022 label_ref_list_pool = create_alloc_pool ("label references list",
5023 sizeof (struct label_ref_list_d),
5025 mdep_reorg_phase = SH_FIXUP_PCLOAD;
5026 for (insn = first, num_mova = 0; insn; insn = NEXT_INSN (insn))
5030 /* ??? basic block reordering can move a switch table dispatch
5031 below the switch table. Check if that has happened.
5032 We only have the addresses available when optimizing; but then,
5033 this check shouldn't be needed when not optimizing. */
5034 if (!untangle_mova (&num_mova, &mova, insn))
5040 else if (GET_CODE (insn) == JUMP_INSN
5041 && GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
5043 /* ??? loop invariant motion can also move a mova out of a
5044 loop. Since loop does this code motion anyway, maybe we
5045 should wrap UNSPEC_MOVA into a CONST, so that reload can
5048 && GET_MODE (prev_nonnote_insn (insn)) == VOIDmode)
5049 || (prev_nonnote_insn (insn)
5050 == XEXP (MOVA_LABELREF (mova), 0))))
5057 /* Some code might have been inserted between the mova and
5058 its ADDR_DIFF_VEC. Check if the mova is still in range. */
5059 for (scan = mova, total = 0; scan != insn; scan = NEXT_INSN (scan))
5060 total += get_attr_length (scan);
5062 /* range of mova is 1020, add 4 because pc counts from address of
5063 second instruction after this one, subtract 2 in case pc is 2
5064 byte aligned. Possible alignment needed for the ADDR_DIFF_VEC
5065 cancels out with alignment effects of the mova itself. */
5068 /* Change the mova into a load, and restart scanning
5069 there. broken_move will then return true for mova. */
5074 if (broken_move (insn)
5075 || (GET_CODE (insn) == INSN
5076 && recog_memoized (insn) == CODE_FOR_casesi_worker_2))
5079 /* Scan ahead looking for a barrier to stick the constant table
5081 rtx barrier = find_barrier (num_mova, mova, insn);
5082 rtx last_float_move = NULL_RTX, last_float = 0, *last_float_addr = NULL;
5083 int need_aligned_label = 0;
5085 if (num_mova && ! mova_p (mova))
5087 /* find_barrier had to change the first mova into a
5088 pcload; thus, we have to start with this new pcload. */
5092 /* Now find all the moves between the points and modify them. */
5093 for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
5095 if (GET_CODE (scan) == CODE_LABEL)
5097 if (GET_CODE (scan) == INSN
5098 && recog_memoized (scan) == CODE_FOR_casesi_worker_2)
5099 need_aligned_label = 1;
5100 if (broken_move (scan))
5102 rtx *patp = &PATTERN (scan), pat = *patp;
5106 enum machine_mode mode;
5108 if (GET_CODE (pat) == PARALLEL)
5109 patp = &XVECEXP (pat, 0, 0), pat = *patp;
5110 src = SET_SRC (pat);
5111 dst = SET_DEST (pat);
5112 mode = GET_MODE (dst);
5114 if (mode == SImode && hi_const (src)
5115 && REGNO (dst) != FPUL_REG)
5120 while (GET_CODE (dst) == SUBREG)
5122 offset += subreg_regno_offset (REGNO (SUBREG_REG (dst)),
5123 GET_MODE (SUBREG_REG (dst)),
5126 dst = SUBREG_REG (dst);
5128 dst = gen_rtx_REG (HImode, REGNO (dst) + offset);
5130 if (GET_CODE (dst) == REG && FP_ANY_REGISTER_P (REGNO (dst)))
5132 /* This must be an insn that clobbers r0. */
5133 rtx *clobberp = &XVECEXP (PATTERN (scan), 0,
5134 XVECLEN (PATTERN (scan), 0)
5136 rtx clobber = *clobberp;
5138 gcc_assert (GET_CODE (clobber) == CLOBBER
5139 && rtx_equal_p (XEXP (clobber, 0), r0_rtx));
5142 && reg_set_between_p (r0_rtx, last_float_move, scan))
5146 && GET_MODE_SIZE (mode) != 4
5147 && GET_MODE_SIZE (GET_MODE (last_float)) == 4)
5149 lab = add_constant (src, mode, last_float);
5151 emit_insn_before (gen_mova (lab), scan);
5154 /* There will be a REG_UNUSED note for r0 on
5155 LAST_FLOAT_MOVE; we have to change it to REG_INC,
5156 lest reorg:mark_target_live_regs will not
5157 consider r0 to be used, and we end up with delay
5158 slot insn in front of SCAN that clobbers r0. */
5160 = find_regno_note (last_float_move, REG_UNUSED, 0);
5162 /* If we are not optimizing, then there may not be
5165 PUT_MODE (note, REG_INC);
5167 *last_float_addr = r0_inc_rtx;
5169 last_float_move = scan;
5171 newsrc = gen_const_mem (mode,
5172 (((TARGET_SH4 && ! TARGET_FMOVD)
5173 || REGNO (dst) == FPUL_REG)
5176 last_float_addr = &XEXP (newsrc, 0);
5178 /* Remove the clobber of r0. */
5179 *clobberp = gen_rtx_CLOBBER (GET_MODE (clobber),
5180 gen_rtx_SCRATCH (Pmode));
5182 /* This is a mova needing a label. Create it. */
5183 else if (GET_CODE (src) == UNSPEC
5184 && XINT (src, 1) == UNSPEC_MOVA
5185 && GET_CODE (XVECEXP (src, 0, 0)) == CONST)
5187 lab = add_constant (XVECEXP (src, 0, 0), mode, 0);
5188 newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
5189 newsrc = gen_rtx_UNSPEC (SImode,
5190 gen_rtvec (1, newsrc),
5195 lab = add_constant (src, mode, 0);
5196 newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
5197 newsrc = gen_const_mem (mode, newsrc);
5199 *patp = gen_rtx_SET (VOIDmode, dst, newsrc);
5200 INSN_CODE (scan) = -1;
5203 dump_table (need_aligned_label ? insn : 0, barrier);
5207 free_alloc_pool (label_ref_list_pool);
5208 for (insn = first; insn; insn = NEXT_INSN (insn))
5209 PUT_MODE (insn, VOIDmode);
5211 mdep_reorg_phase = SH_SHORTEN_BRANCHES1;
5212 INSN_ADDRESSES_FREE ();
5213 split_branches (first);
5215 /* The INSN_REFERENCES_ARE_DELAYED in sh.h is problematic because it
5216 also has an effect on the register that holds the address of the sfunc.
5217 Insert an extra dummy insn in front of each sfunc that pretends to
5218 use this register. */
5219 if (flag_delayed_branch)
5221 for (insn = first; insn; insn = NEXT_INSN (insn))
5223 rtx reg = sfunc_uses_reg (insn);
5227 emit_insn_before (gen_use_sfunc_addr (reg), insn);
5231 /* fpscr is not actually a user variable, but we pretend it is for the
5232 sake of the previous optimization passes, since we want it handled like
5233 one. However, we don't have any debugging information for it, so turn
5234 it into a non-user variable now. */
5236 REG_USERVAR_P (get_fpscr_rtx ()) = 0;
5238 mdep_reorg_phase = SH_AFTER_MDEP_REORG;
5242 get_dest_uid (rtx label, int max_uid)
5244 rtx dest = next_real_insn (label);
5247 /* This can happen for an undefined label. */
5249 dest_uid = INSN_UID (dest);
5250 /* If this is a newly created branch redirection blocking instruction,
5251 we cannot index the branch_uid or insn_addresses arrays with its
5252 uid. But then, we won't need to, because the actual destination is
5253 the following branch. */
5254 while (dest_uid >= max_uid)
5256 dest = NEXT_INSN (dest);
5257 dest_uid = INSN_UID (dest);
5259 if (GET_CODE (dest) == JUMP_INSN && GET_CODE (PATTERN (dest)) == RETURN)
5264 /* Split condbranches that are out of range. Also add clobbers for
5265 scratch registers that are needed in far jumps.
5266 We do this before delay slot scheduling, so that it can take our
5267 newly created instructions into account. It also allows us to
5268 find branches with common targets more easily. */
5271 split_branches (rtx first)
5274 struct far_branch **uid_branch, *far_branch_list = 0;
5275 int max_uid = get_max_uid ();
5278 /* Find out which branches are out of range. */
5279 shorten_branches (first);
5281 uid_branch = (struct far_branch **) alloca (max_uid * sizeof *uid_branch);
5282 memset ((char *) uid_branch, 0, max_uid * sizeof *uid_branch);
5284 for (insn = first; insn; insn = NEXT_INSN (insn))
5285 if (! INSN_P (insn))
5287 else if (INSN_DELETED_P (insn))
5289 /* Shorten_branches would split this instruction again,
5290 so transform it into a note. */
5291 SET_INSN_DELETED (insn);
5293 else if (GET_CODE (insn) == JUMP_INSN
5294 /* Don't mess with ADDR_DIFF_VEC */
5295 && (GET_CODE (PATTERN (insn)) == SET
5296 || GET_CODE (PATTERN (insn)) == RETURN))
5298 enum attr_type type = get_attr_type (insn);
5299 if (type == TYPE_CBRANCH)
5303 if (get_attr_length (insn) > 4)
5305 rtx src = SET_SRC (PATTERN (insn));
5306 rtx olabel = XEXP (XEXP (src, 1), 0);
5307 int addr = INSN_ADDRESSES (INSN_UID (insn));
5309 int dest_uid = get_dest_uid (olabel, max_uid);
5310 struct far_branch *bp = uid_branch[dest_uid];
5312 /* redirect_jump needs a valid JUMP_LABEL, and it might delete
5313 the label if the LABEL_NUSES count drops to zero. There is
5314 always a jump_optimize pass that sets these values, but it
5315 proceeds to delete unreferenced code, and then if not
5316 optimizing, to un-delete the deleted instructions, thus
5317 leaving labels with too low uses counts. */
5320 JUMP_LABEL (insn) = olabel;
5321 LABEL_NUSES (olabel)++;
5325 bp = (struct far_branch *) alloca (sizeof *bp);
5326 uid_branch[dest_uid] = bp;
5327 bp->prev = far_branch_list;
5328 far_branch_list = bp;
5330 = XEXP (XEXP (SET_SRC (PATTERN (insn)), 1), 0);
5331 LABEL_NUSES (bp->far_label)++;
5335 label = bp->near_label;
5336 if (! label && bp->address - addr >= CONDJUMP_MIN)
5338 rtx block = bp->insert_place;
5340 if (GET_CODE (PATTERN (block)) == RETURN)
5341 block = PREV_INSN (block);
5343 block = gen_block_redirect (block,
5345 label = emit_label_after (gen_label_rtx (),
5347 bp->near_label = label;
5349 else if (label && ! NEXT_INSN (label))
5351 if (addr + 2 - bp->address <= CONDJUMP_MAX)
5352 bp->insert_place = insn;
5354 gen_far_branch (bp);
5358 || (NEXT_INSN (label) && bp->address - addr < CONDJUMP_MIN))
5360 bp->near_label = label = gen_label_rtx ();
5361 bp->insert_place = insn;
5364 ok = redirect_jump (insn, label, 0);
5369 /* get_attr_length (insn) == 2 */
5370 /* Check if we have a pattern where reorg wants to redirect
5371 the branch to a label from an unconditional branch that
5373 /* We can't use JUMP_LABEL here because it might be undefined
5374 when not optimizing. */
5375 /* A syntax error might cause beyond to be NULL_RTX. */
5377 = next_active_insn (XEXP (XEXP (SET_SRC (PATTERN (insn)), 1),
5381 && (GET_CODE (beyond) == JUMP_INSN
5382 || ((beyond = next_active_insn (beyond))
5383 && GET_CODE (beyond) == JUMP_INSN))
5384 && GET_CODE (PATTERN (beyond)) == SET
5385 && recog_memoized (beyond) == CODE_FOR_jump_compact
5387 (INSN_UID (XEXP (SET_SRC (PATTERN (beyond)), 0)))
5388 - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
5390 gen_block_redirect (beyond,
5391 INSN_ADDRESSES (INSN_UID (beyond)), 1);
5394 next = next_active_insn (insn);
5396 if ((GET_CODE (next) == JUMP_INSN
5397 || ((next = next_active_insn (next))
5398 && GET_CODE (next) == JUMP_INSN))
5399 && GET_CODE (PATTERN (next)) == SET
5400 && recog_memoized (next) == CODE_FOR_jump_compact
5402 (INSN_UID (XEXP (SET_SRC (PATTERN (next)), 0)))
5403 - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
5405 gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), 1);
5407 else if (type == TYPE_JUMP || type == TYPE_RETURN)
5409 int addr = INSN_ADDRESSES (INSN_UID (insn));
5412 struct far_branch *bp;
5414 if (type == TYPE_JUMP)
5416 far_label = XEXP (SET_SRC (PATTERN (insn)), 0);
5417 dest_uid = get_dest_uid (far_label, max_uid);
5420 /* Parse errors can lead to labels outside
5422 if (! NEXT_INSN (far_label))
5427 JUMP_LABEL (insn) = far_label;
5428 LABEL_NUSES (far_label)++;
5430 redirect_jump (insn, NULL_RTX, 1);
5434 bp = uid_branch[dest_uid];
5437 bp = (struct far_branch *) alloca (sizeof *bp);
5438 uid_branch[dest_uid] = bp;
5439 bp->prev = far_branch_list;
5440 far_branch_list = bp;
5442 bp->far_label = far_label;
5444 LABEL_NUSES (far_label)++;
5446 else if (bp->near_label && ! NEXT_INSN (bp->near_label))
5447 if (addr - bp->address <= CONDJUMP_MAX)
5448 emit_label_after (bp->near_label, PREV_INSN (insn));
5451 gen_far_branch (bp);
5457 bp->insert_place = insn;
5459 emit_insn_before (gen_block_branch_redirect (const0_rtx), insn);
5461 gen_block_redirect (insn, addr, bp->near_label ? 2 : 0);
5464 /* Generate all pending far branches,
5465 and free our references to the far labels. */
5466 while (far_branch_list)
5468 if (far_branch_list->near_label
5469 && ! NEXT_INSN (far_branch_list->near_label))
5470 gen_far_branch (far_branch_list);
5472 && far_branch_list->far_label
5473 && ! --LABEL_NUSES (far_branch_list->far_label))
5474 delete_insn (far_branch_list->far_label);
5475 far_branch_list = far_branch_list->prev;
5478 /* Instruction length information is no longer valid due to the new
5479 instructions that have been generated. */
5480 init_insn_lengths ();
5483 /* Dump out instruction addresses, which is useful for debugging the
5484 constant pool table stuff.
5486 If relaxing, output the label and pseudo-ops used to link together
5487 calls and the instruction which set the registers. */
5489 /* ??? The addresses printed by this routine for insns are nonsense for
5490 insns which are inside of a sequence where none of the inner insns have
5491 variable length. This is because the second pass of shorten_branches
5492 does not bother to update them. */
5495 final_prescan_insn (rtx insn, rtx *opvec ATTRIBUTE_UNUSED,
5496 int noperands ATTRIBUTE_UNUSED)
5498 if (TARGET_DUMPISIZE)
5499 fprintf (asm_out_file, "\n! at %04x\n", INSN_ADDRESSES (INSN_UID (insn)));
5505 note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX);
5510 pattern = PATTERN (insn);
5511 if (GET_CODE (pattern) == PARALLEL)
5512 pattern = XVECEXP (pattern, 0, 0);
5513 switch (GET_CODE (pattern))
5516 if (GET_CODE (SET_SRC (pattern)) != CALL
5517 && get_attr_type (insn) != TYPE_SFUNC)
5519 targetm.asm_out.internal_label
5520 (asm_out_file, "L", CODE_LABEL_NUMBER (XEXP (note, 0)));
5523 /* else FALLTHROUGH */
5525 asm_fprintf (asm_out_file, "\t.uses %LL%d\n",
5526 CODE_LABEL_NUMBER (XEXP (note, 0)));
5536 /* Dump out any constants accumulated in the final pass. These will
5540 output_jump_label_table (void)
5546 fprintf (asm_out_file, "\t.align 2\n");
5547 for (i = 0; i < pool_size; i++)
5549 pool_node *p = &pool_vector[i];
5551 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5552 CODE_LABEL_NUMBER (p->label));
5553 output_asm_insn (".long %O0", &p->value);
5561 /* A full frame looks like:
5565 [ if current_function_anonymous_args
5578 local-0 <- fp points here. */
5580 /* Number of bytes pushed for anonymous args, used to pass information
5581 between expand_prologue and expand_epilogue. */
5583 /* Adjust the stack by SIZE bytes. REG holds the rtl of the register to be
5584 adjusted. If epilogue_p is zero, this is for a prologue; otherwise, it's
5585 for an epilogue and a negative value means that it's for a sibcall
5586 epilogue. If LIVE_REGS_MASK is nonzero, it points to a HARD_REG_SET of
5587 all the registers that are about to be restored, and hence dead. */
5590 output_stack_adjust (int size, rtx reg, int epilogue_p,
5591 HARD_REG_SET *live_regs_mask)
5593 rtx (*emit_fn) (rtx) = epilogue_p ? &emit_insn : &frame_insn;
5596 HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
5598 /* This test is bogus, as output_stack_adjust is used to re-align the
5601 gcc_assert (!(size % align));
5604 if (CONST_OK_FOR_ADD (size))
5605 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size)));
5606 /* Try to do it with two partial adjustments; however, we must make
5607 sure that the stack is properly aligned at all times, in case
5608 an interrupt occurs between the two partial adjustments. */
5609 else if (CONST_OK_FOR_ADD (size / 2 & -align)
5610 && CONST_OK_FOR_ADD (size - (size / 2 & -align)))
5612 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size / 2 & -align)));
5613 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size - (size / 2 & -align))));
5619 int temp = epilogue_p ? 7 : (TARGET_SH5 ? 0 : 1);
5622 /* If TEMP is invalid, we could temporarily save a general
5623 register to MACL. However, there is currently no need
5624 to handle this case, so just die when we see it. */
5626 || current_function_interrupt
5627 || ! call_really_used_regs[temp] || fixed_regs[temp])
5629 if (temp < 0 && ! current_function_interrupt
5630 && (TARGET_SHMEDIA || epilogue_p >= 0))
5633 COPY_HARD_REG_SET (temps, call_used_reg_set);
5634 AND_COMPL_HARD_REG_SET (temps, call_fixed_reg_set);
5638 if (crtl->return_rtx)
5640 enum machine_mode mode;
5641 mode = GET_MODE (crtl->return_rtx);
5642 if (BASE_RETURN_VALUE_REG (mode) == FIRST_RET_REG)
5643 nreg = HARD_REGNO_NREGS (FIRST_RET_REG, mode);
5645 for (i = 0; i < nreg; i++)
5646 CLEAR_HARD_REG_BIT (temps, FIRST_RET_REG + i);
5647 if (crtl->calls_eh_return)
5649 CLEAR_HARD_REG_BIT (temps, EH_RETURN_STACKADJ_REGNO);
5650 for (i = 0; i <= 3; i++)
5651 CLEAR_HARD_REG_BIT (temps, EH_RETURN_DATA_REGNO (i));
5654 if (TARGET_SHMEDIA && epilogue_p < 0)
5655 for (i = FIRST_TARGET_REG; i <= LAST_TARGET_REG; i++)
5656 CLEAR_HARD_REG_BIT (temps, i);
5657 if (epilogue_p <= 0)
5659 for (i = FIRST_PARM_REG;
5660 i < FIRST_PARM_REG + NPARM_REGS (SImode); i++)
5661 CLEAR_HARD_REG_BIT (temps, i);
5662 if (cfun->static_chain_decl != NULL)
5663 CLEAR_HARD_REG_BIT (temps, STATIC_CHAIN_REGNUM);
5665 temp = scavenge_reg (&temps);
5667 if (temp < 0 && live_regs_mask)
5671 COPY_HARD_REG_SET (temps, *live_regs_mask);
5672 CLEAR_HARD_REG_BIT (temps, REGNO (reg));
5673 temp = scavenge_reg (&temps);
5677 rtx adj_reg, tmp_reg, mem;
5679 /* If we reached here, the most likely case is the (sibcall)
5680 epilogue for non SHmedia. Put a special push/pop sequence
5681 for such case as the last resort. This looks lengthy but
5682 would not be problem because it seems to be very
5685 gcc_assert (!TARGET_SHMEDIA && epilogue_p);
5688 /* ??? There is still the slight possibility that r4 or
5689 r5 have been reserved as fixed registers or assigned
5690 as global registers, and they change during an
5691 interrupt. There are possible ways to handle this:
5693 - If we are adjusting the frame pointer (r14), we can do
5694 with a single temp register and an ordinary push / pop
5696 - Grab any call-used or call-saved registers (i.e. not
5697 fixed or globals) for the temps we need. We might
5698 also grab r14 if we are adjusting the stack pointer.
5699 If we can't find enough available registers, issue
5700 a diagnostic and die - the user must have reserved
5701 way too many registers.
5702 But since all this is rather unlikely to happen and
5703 would require extra testing, we just die if r4 / r5
5704 are not available. */
5705 gcc_assert (!fixed_regs[4] && !fixed_regs[5]
5706 && !global_regs[4] && !global_regs[5]);
5708 adj_reg = gen_rtx_REG (GET_MODE (reg), 4);
5709 tmp_reg = gen_rtx_REG (GET_MODE (reg), 5);
5710 emit_move_insn (gen_tmp_stack_mem (Pmode, reg), adj_reg);
5711 emit_insn (GEN_MOV (adj_reg, GEN_INT (size)));
5712 emit_insn (GEN_ADD3 (adj_reg, adj_reg, reg));
5713 mem = gen_tmp_stack_mem (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
5714 emit_move_insn (mem, tmp_reg);
5715 emit_move_insn (tmp_reg, gen_tmp_stack_mem (Pmode, reg));
5716 mem = gen_tmp_stack_mem (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
5717 emit_move_insn (mem, tmp_reg);
5718 emit_move_insn (reg, adj_reg);
5719 mem = gen_tmp_stack_mem (Pmode, gen_rtx_POST_INC (Pmode, reg));
5720 emit_move_insn (adj_reg, mem);
5721 mem = gen_tmp_stack_mem (Pmode, gen_rtx_POST_INC (Pmode, reg));
5722 emit_move_insn (tmp_reg, mem);
5723 /* Tell flow the insns that pop r4/r5 aren't dead. */
5728 const_reg = gen_rtx_REG (GET_MODE (reg), temp);
5730 /* If SIZE is negative, subtract the positive value.
5731 This sometimes allows a constant pool entry to be shared
5732 between prologue and epilogue code. */
5735 emit_insn (GEN_MOV (const_reg, GEN_INT (-size)));
5736 insn = emit_fn (GEN_SUB3 (reg, reg, const_reg));
5740 emit_insn (GEN_MOV (const_reg, GEN_INT (size)));
5741 insn = emit_fn (GEN_ADD3 (reg, reg, const_reg));
5745 = (gen_rtx_EXPR_LIST
5746 (REG_FRAME_RELATED_EXPR,
5747 gen_rtx_SET (VOIDmode, reg,
5748 gen_rtx_PLUS (SImode, reg, GEN_INT (size))),
5758 RTX_FRAME_RELATED_P (x) = 1;
5762 /* Output RTL to push register RN onto the stack. */
5769 x = gen_push_fpul ();
5770 else if (rn == FPSCR_REG)
5771 x = gen_push_fpscr ();
5772 else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && ! TARGET_FPU_SINGLE
5773 && FP_OR_XD_REGISTER_P (rn))
5775 if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
5777 x = gen_push_4 (gen_rtx_REG (DFmode, rn));
5779 else if (TARGET_SH2E && FP_REGISTER_P (rn))
5780 x = gen_push_e (gen_rtx_REG (SFmode, rn));
5782 x = gen_push (gen_rtx_REG (SImode, rn));
5786 = gen_rtx_EXPR_LIST (REG_INC,
5787 gen_rtx_REG (SImode, STACK_POINTER_REGNUM), 0);
5791 /* Output RTL to pop register RN from the stack. */
5798 x = gen_pop_fpul ();
5799 else if (rn == FPSCR_REG)
5800 x = gen_pop_fpscr ();
5801 else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && ! TARGET_FPU_SINGLE
5802 && FP_OR_XD_REGISTER_P (rn))
5804 if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
5806 x = gen_pop_4 (gen_rtx_REG (DFmode, rn));
5808 else if (TARGET_SH2E && FP_REGISTER_P (rn))
5809 x = gen_pop_e (gen_rtx_REG (SFmode, rn));
5811 x = gen_pop (gen_rtx_REG (SImode, rn));
5815 = gen_rtx_EXPR_LIST (REG_INC,
5816 gen_rtx_REG (SImode, STACK_POINTER_REGNUM), 0);
5819 /* Generate code to push the regs specified in the mask. */
5822 push_regs (HARD_REG_SET *mask, int interrupt_handler)
5824 int i = interrupt_handler ? LAST_BANKED_REG + 1 : 0;
5827 /* Push PR last; this gives better latencies after the prologue, and
5828 candidates for the return delay slot when there are no general
5829 registers pushed. */
5830 for (; i < FIRST_PSEUDO_REGISTER; i++)
5832 /* If this is an interrupt handler, and the SZ bit varies,
5833 and we have to push any floating point register, we need
5834 to switch to the correct precision first. */
5835 if (i == FIRST_FP_REG && interrupt_handler && TARGET_FMOVD
5836 && hard_reg_set_intersect_p (*mask, reg_class_contents[DF_REGS]))
5838 HARD_REG_SET unsaved;
5841 COMPL_HARD_REG_SET (unsaved, *mask);
5842 fpscr_set_from_mem (NORMAL_MODE (FP_MODE), unsaved);
5846 && (i != FPSCR_REG || ! skip_fpscr)
5847 && TEST_HARD_REG_BIT (*mask, i))
5849 /* If the ISR has RESBANK attribute assigned, don't push any of
5850 the following registers - R0-R14, MACH, MACL and GBR. */
5851 if (! (sh_cfun_resbank_handler_p ()
5852 && ((i >= FIRST_GENERAL_REG && i < LAST_GENERAL_REG)
5860 /* Push banked registers last to improve delay slot opportunities. */
5861 if (interrupt_handler)
5862 for (i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
5863 if (TEST_HARD_REG_BIT (*mask, i))
5866 /* Don't push PR register for an ISR with RESBANK attribute assigned. */
5867 if (TEST_HARD_REG_BIT (*mask, PR_REG) && !sh_cfun_resbank_handler_p ())
5871 /* Calculate how much extra space is needed to save all callee-saved
5873 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
5876 shmedia_target_regs_stack_space (HARD_REG_SET *live_regs_mask)
5879 int stack_space = 0;
5880 int interrupt_handler = sh_cfun_interrupt_handler_p ();
5882 for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
5883 if ((! call_really_used_regs[reg] || interrupt_handler)
5884 && ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
5885 /* Leave space to save this target register on the stack,
5886 in case target register allocation wants to use it. */
5887 stack_space += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
5891 /* Decide whether we should reserve space for callee-save target registers,
5892 in case target register allocation wants to use them. REGS_SAVED is
5893 the space, in bytes, that is already required for register saves.
5894 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
5897 shmedia_reserve_space_for_target_registers_p (int regs_saved,
5898 HARD_REG_SET *live_regs_mask)
5902 return shmedia_target_regs_stack_space (live_regs_mask) <= regs_saved;
5905 /* Decide how much space to reserve for callee-save target registers
5906 in case target register allocation wants to use them.
5907 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
5910 shmedia_target_regs_stack_adjust (HARD_REG_SET *live_regs_mask)
5912 if (shmedia_space_reserved_for_target_registers)
5913 return shmedia_target_regs_stack_space (live_regs_mask);
5918 /* Work out the registers which need to be saved, both as a mask and a
5919 count of saved words. Return the count.
5921 If doing a pragma interrupt function, then push all regs used by the
5922 function, and if we call another function (we can tell by looking at PR),
5923 make sure that all the regs it clobbers are safe too. */
5926 calc_live_regs (HARD_REG_SET *live_regs_mask)
5931 bool interrupt_or_trapa_handler, trapa_handler, interrupt_handler;
5932 bool nosave_low_regs;
5933 int pr_live, has_call;
5935 attrs = DECL_ATTRIBUTES (current_function_decl);
5936 interrupt_or_trapa_handler = sh_cfun_interrupt_handler_p ();
5937 trapa_handler = lookup_attribute ("trapa_handler", attrs) != NULL_TREE;
5938 interrupt_handler = interrupt_or_trapa_handler && ! trapa_handler;
5939 nosave_low_regs = lookup_attribute ("nosave_low_regs", attrs) != NULL_TREE;
5941 CLEAR_HARD_REG_SET (*live_regs_mask);
5942 if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && interrupt_handler
5943 && df_regs_ever_live_p (FPSCR_REG))
5944 target_flags &= ~MASK_FPU_SINGLE;
5945 /* If we can save a lot of saves by switching to double mode, do that. */
5946 else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && TARGET_FPU_SINGLE)
5947 for (count = 0, reg = FIRST_FP_REG; reg <= LAST_FP_REG; reg += 2)
5948 if (df_regs_ever_live_p (reg) && df_regs_ever_live_p (reg+1)
5949 && (! call_really_used_regs[reg]
5950 || interrupt_handler)
5953 target_flags &= ~MASK_FPU_SINGLE;
5956 /* PR_MEDIA_REG is a general purpose register, thus global_alloc already
5957 knows how to use it. That means the pseudo originally allocated for
5958 the initial value can become the PR_MEDIA_REG hard register, as seen for
5959 execute/20010122-1.c:test9. */
5961 /* ??? this function is called from initial_elimination_offset, hence we
5962 can't use the result of sh_media_register_for_return here. */
5963 pr_live = sh_pr_n_sets ();
5966 rtx pr_initial = has_hard_reg_initial_val (Pmode, PR_REG);
5967 pr_live = (pr_initial
5968 ? (GET_CODE (pr_initial) != REG
5969 || REGNO (pr_initial) != (PR_REG))
5970 : df_regs_ever_live_p (PR_REG));
5971 /* For Shcompact, if not optimizing, we end up with a memory reference
5972 using the return address pointer for __builtin_return_address even
5973 though there is no actual need to put the PR register on the stack. */
5974 pr_live |= df_regs_ever_live_p (RETURN_ADDRESS_POINTER_REGNUM);
5976 /* Force PR to be live if the prologue has to call the SHmedia
5977 argument decoder or register saver. */
5978 if (TARGET_SHCOMPACT
5979 && ((crtl->args.info.call_cookie
5980 & ~ CALL_COOKIE_RET_TRAMP (1))
5981 || crtl->saves_all_registers))
5983 has_call = TARGET_SHMEDIA ? ! leaf_function_p () : pr_live;
5984 for (count = 0, reg = FIRST_PSEUDO_REGISTER; reg-- != 0; )
5986 if (reg == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG)
5989 ? (/* Need to save all the regs ever live. */
5990 (df_regs_ever_live_p (reg)
5991 || (call_really_used_regs[reg]
5992 && (! fixed_regs[reg] || reg == MACH_REG || reg == MACL_REG
5993 || reg == PIC_OFFSET_TABLE_REGNUM)
5995 || (TARGET_SHMEDIA && has_call
5996 && REGISTER_NATURAL_MODE (reg) == SImode
5997 && (GENERAL_REGISTER_P (reg) || TARGET_REGISTER_P (reg))))
5998 && reg != STACK_POINTER_REGNUM && reg != ARG_POINTER_REGNUM
5999 && reg != RETURN_ADDRESS_POINTER_REGNUM
6000 && reg != T_REG && reg != GBR_REG
6001 /* Push fpscr only on targets which have FPU */
6002 && (reg != FPSCR_REG || TARGET_FPU_ANY))
6003 : (/* Only push those regs which are used and need to be saved. */
6006 && crtl->args.info.call_cookie
6007 && reg == PIC_OFFSET_TABLE_REGNUM)
6008 || (df_regs_ever_live_p (reg)
6009 && (!call_really_used_regs[reg]
6010 || (trapa_handler && reg == FPSCR_REG && TARGET_FPU_ANY)))
6011 || (crtl->calls_eh_return
6012 && (reg == EH_RETURN_DATA_REGNO (0)
6013 || reg == EH_RETURN_DATA_REGNO (1)
6014 || reg == EH_RETURN_DATA_REGNO (2)
6015 || reg == EH_RETURN_DATA_REGNO (3)))
6016 || ((reg == MACL_REG || reg == MACH_REG)
6017 && df_regs_ever_live_p (reg)
6018 && sh_cfun_attr_renesas_p ())
6021 SET_HARD_REG_BIT (*live_regs_mask, reg);
6022 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
6024 if ((TARGET_SH4 || TARGET_SH2A_DOUBLE || TARGET_SH5) && TARGET_FMOVD
6025 && GET_MODE_CLASS (REGISTER_NATURAL_MODE (reg)) == MODE_FLOAT)
6027 if (FP_REGISTER_P (reg))
6029 if (! TARGET_FPU_SINGLE && ! df_regs_ever_live_p (reg ^ 1))
6031 SET_HARD_REG_BIT (*live_regs_mask, (reg ^ 1));
6032 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg ^ 1));
6035 else if (XD_REGISTER_P (reg))
6037 /* Must switch to double mode to access these registers. */
6038 target_flags &= ~MASK_FPU_SINGLE;
6042 if (nosave_low_regs && reg == R8_REG)
6045 /* If we have a target register optimization pass after prologue / epilogue
6046 threading, we need to assume all target registers will be live even if
6048 if (flag_branch_target_load_optimize2
6049 && TARGET_SAVE_ALL_TARGET_REGS
6050 && shmedia_space_reserved_for_target_registers)
6051 for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
6052 if ((! call_really_used_regs[reg] || interrupt_handler)
6053 && ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
6055 SET_HARD_REG_BIT (*live_regs_mask, reg);
6056 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
6058 /* If this is an interrupt handler, we don't have any call-clobbered
6059 registers we can conveniently use for target register save/restore.
6060 Make sure we save at least one general purpose register when we need
6061 to save target registers. */
6062 if (interrupt_handler
6063 && hard_reg_set_intersect_p (*live_regs_mask,
6064 reg_class_contents[TARGET_REGS])
6065 && ! hard_reg_set_intersect_p (*live_regs_mask,
6066 reg_class_contents[GENERAL_REGS]))
6068 SET_HARD_REG_BIT (*live_regs_mask, R0_REG);
6069 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (R0_REG));
6075 /* Code to generate prologue and epilogue sequences */
6077 /* PUSHED is the number of bytes that are being pushed on the
6078 stack for register saves. Return the frame size, padded
6079 appropriately so that the stack stays properly aligned. */
6080 static HOST_WIDE_INT
6081 rounded_frame_size (int pushed)
6083 HOST_WIDE_INT size = get_frame_size ();
6084 HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
6086 return ((size + pushed + align - 1) & -align) - pushed;
6089 /* Choose a call-clobbered target-branch register that remains
6090 unchanged along the whole function. We set it up as the return
6091 value in the prologue. */
6093 sh_media_register_for_return (void)
6098 if (! current_function_is_leaf)
6100 if (lookup_attribute ("interrupt_handler",
6101 DECL_ATTRIBUTES (current_function_decl)))
6103 if (sh_cfun_interrupt_handler_p ())
6106 tr0_used = flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
6108 for (regno = FIRST_TARGET_REG + tr0_used; regno <= LAST_TARGET_REG; regno++)
6109 if (call_really_used_regs[regno] && ! df_regs_ever_live_p (regno))
6115 /* The maximum registers we need to save are:
6116 - 62 general purpose registers (r15 is stack pointer, r63 is zero)
6117 - 32 floating point registers (for each pair, we save none,
6118 one single precision value, or a double precision value).
6119 - 8 target registers
6120 - add 1 entry for a delimiter. */
6121 #define MAX_SAVED_REGS (62+32+8)
6123 typedef struct save_entry_s
6132 /* There will be a delimiter entry with VOIDmode both at the start and the
6133 end of a filled in schedule. The end delimiter has the offset of the
6134 save with the smallest (i.e. most negative) offset. */
6135 typedef struct save_schedule_s
6137 save_entry entries[MAX_SAVED_REGS + 2];
6138 int temps[MAX_TEMPS+1];
6141 /* Fill in SCHEDULE according to LIVE_REGS_MASK. If RESTORE is nonzero,
6142 use reverse order. Returns the last entry written to (not counting
6143 the delimiter). OFFSET_BASE is a number to be added to all offset
6147 sh5_schedule_saves (HARD_REG_SET *live_regs_mask, save_schedule *schedule,
6151 save_entry *entry = schedule->entries;
6155 if (! current_function_interrupt)
6156 for (i = FIRST_GENERAL_REG; tmpx < MAX_TEMPS && i <= LAST_GENERAL_REG; i++)
6157 if (call_really_used_regs[i] && ! fixed_regs[i] && i != PR_MEDIA_REG
6158 && ! FUNCTION_ARG_REGNO_P (i)
6159 && i != FIRST_RET_REG
6160 && ! (cfun->static_chain_decl != NULL && i == STATIC_CHAIN_REGNUM)
6161 && ! (crtl->calls_eh_return
6162 && (i == EH_RETURN_STACKADJ_REGNO
6163 || ((unsigned) i >= EH_RETURN_DATA_REGNO (0)
6164 && (unsigned) i <= EH_RETURN_DATA_REGNO (3)))))
6165 schedule->temps[tmpx++] = i;
6167 entry->mode = VOIDmode;
6168 entry->offset = offset_base;
6170 /* We loop twice: first, we save 8-byte aligned registers in the
6171 higher addresses, that are known to be aligned. Then, we
6172 proceed to saving 32-bit registers that don't need 8-byte
6174 If this is an interrupt function, all registers that need saving
6175 need to be saved in full. moreover, we need to postpone saving
6176 target registers till we have saved some general purpose registers
6177 we can then use as scratch registers. */
6178 offset = offset_base;
6179 for (align = 1; align >= 0; align--)
6181 for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
6182 if (TEST_HARD_REG_BIT (*live_regs_mask, i))
6184 enum machine_mode mode = REGISTER_NATURAL_MODE (i);
6187 if (current_function_interrupt)
6189 if (TARGET_REGISTER_P (i))
6191 if (GENERAL_REGISTER_P (i))
6194 if (mode == SFmode && (i % 2) == 1
6195 && ! TARGET_FPU_SINGLE && FP_REGISTER_P (i)
6196 && (TEST_HARD_REG_BIT (*live_regs_mask, (i ^ 1))))
6203 /* If we're doing the aligned pass and this is not aligned,
6204 or we're doing the unaligned pass and this is aligned,
6206 if ((GET_MODE_SIZE (mode) % (STACK_BOUNDARY / BITS_PER_UNIT) == 0)
6210 if (current_function_interrupt
6211 && GENERAL_REGISTER_P (i)
6212 && tmpx < MAX_TEMPS)
6213 schedule->temps[tmpx++] = i;
6215 offset -= GET_MODE_SIZE (mode);
6218 entry->offset = offset;
6221 if (align && current_function_interrupt)
6222 for (i = LAST_TARGET_REG; i >= FIRST_TARGET_REG; i--)
6223 if (TEST_HARD_REG_BIT (*live_regs_mask, i))
6225 offset -= GET_MODE_SIZE (DImode);
6227 entry->mode = DImode;
6228 entry->offset = offset;
6233 entry->mode = VOIDmode;
6234 entry->offset = offset;
6235 schedule->temps[tmpx] = -1;
6240 sh_expand_prologue (void)
6242 HARD_REG_SET live_regs_mask;
6245 int save_flags = target_flags;
6248 = lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl));
6250 current_function_interrupt = sh_cfun_interrupt_handler_p ();
6252 /* We have pretend args if we had an object sent partially in registers
6253 and partially on the stack, e.g. a large structure. */
6254 pretend_args = crtl->args.pretend_args_size;
6255 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl)
6256 && (NPARM_REGS(SImode)
6257 > crtl->args.info.arg_count[(int) SH_ARG_INT]))
6259 output_stack_adjust (-pretend_args
6260 - crtl->args.info.stack_regs * 8,
6261 stack_pointer_rtx, 0, NULL);
6263 if (TARGET_SHCOMPACT && flag_pic && crtl->args.info.call_cookie)
6264 /* We're going to use the PIC register to load the address of the
6265 incoming-argument decoder and/or of the return trampoline from
6266 the GOT, so make sure the PIC register is preserved and
6268 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
6270 if (TARGET_SHCOMPACT
6271 && (crtl->args.info.call_cookie & ~ CALL_COOKIE_RET_TRAMP(1)))
6275 /* First, make all registers with incoming arguments that will
6276 be pushed onto the stack live, so that register renaming
6277 doesn't overwrite them. */
6278 for (reg = 0; reg < NPARM_REGS (SImode); reg++)
6279 if (CALL_COOKIE_STACKSEQ_GET (crtl->args.info.call_cookie)
6280 >= NPARM_REGS (SImode) - reg)
6281 for (; reg < NPARM_REGS (SImode); reg++)
6282 emit_insn (gen_shcompact_preserve_incoming_args
6283 (gen_rtx_REG (SImode, FIRST_PARM_REG + reg)));
6284 else if (CALL_COOKIE_INT_REG_GET
6285 (crtl->args.info.call_cookie, reg) == 1)
6286 emit_insn (gen_shcompact_preserve_incoming_args
6287 (gen_rtx_REG (SImode, FIRST_PARM_REG + reg)));
6289 emit_move_insn (gen_rtx_REG (Pmode, MACL_REG),
6291 emit_move_insn (gen_rtx_REG (SImode, R0_REG),
6292 GEN_INT (crtl->args.info.call_cookie));
6293 emit_move_insn (gen_rtx_REG (SImode, MACH_REG),
6294 gen_rtx_REG (SImode, R0_REG));
6296 else if (TARGET_SHMEDIA)
6298 int tr = sh_media_register_for_return ();
6301 emit_move_insn (gen_rtx_REG (DImode, tr),
6302 gen_rtx_REG (DImode, PR_MEDIA_REG));
6305 /* Emit the code for SETUP_VARARGS. */
6308 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
6310 /* Push arg regs as if they'd been provided by caller in stack. */
6311 for (i = 0; i < NPARM_REGS(SImode); i++)
6313 int rn = NPARM_REGS(SImode) + FIRST_PARM_REG - i - 1;
6316 if (i >= (NPARM_REGS(SImode)
6317 - crtl->args.info.arg_count[(int) SH_ARG_INT]
6325 /* If we're supposed to switch stacks at function entry, do so now. */
6328 /* The argument specifies a variable holding the address of the
6329 stack the interrupt function should switch to/from at entry/exit. */
6331 = ggc_strdup (TREE_STRING_POINTER (TREE_VALUE (sp_switch_attr)));
6332 rtx sp_switch = gen_rtx_SYMBOL_REF (Pmode, s);
6334 emit_insn (gen_sp_switch_1 (sp_switch));
6337 d = calc_live_regs (&live_regs_mask);
6338 /* ??? Maybe we could save some switching if we can move a mode switch
6339 that already happens to be at the function start into the prologue. */
6340 if (target_flags != save_flags && ! current_function_interrupt)
6341 emit_insn (gen_toggle_sz ());
6345 int offset_base, offset;
6347 int offset_in_r0 = -1;
6349 int tregs_space = shmedia_target_regs_stack_adjust (&live_regs_mask);
6350 int total_size, save_size;
6351 save_schedule schedule;
6355 if (call_really_used_regs[R0_REG] && ! fixed_regs[R0_REG]
6356 && ! current_function_interrupt)
6357 r0 = gen_rtx_REG (Pmode, R0_REG);
6359 /* D is the actual number of bytes that we need for saving registers,
6360 however, in initial_elimination_offset we have committed to using
6361 an additional TREGS_SPACE amount of bytes - in order to keep both
6362 addresses to arguments supplied by the caller and local variables
6363 valid, we must keep this gap. Place it between the incoming
6364 arguments and the actually saved registers in a bid to optimize
6365 locality of reference. */
6366 total_size = d + tregs_space;
6367 total_size += rounded_frame_size (total_size);
6368 save_size = total_size - rounded_frame_size (d);
6369 if (save_size % (STACK_BOUNDARY / BITS_PER_UNIT))
6370 d_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
6371 - save_size % (STACK_BOUNDARY / BITS_PER_UNIT));
6373 /* If adjusting the stack in a single step costs nothing extra, do so.
6374 I.e. either if a single addi is enough, or we need a movi anyway,
6375 and we don't exceed the maximum offset range (the test for the
6376 latter is conservative for simplicity). */
6378 && (CONST_OK_FOR_I10 (-total_size)
6379 || (! CONST_OK_FOR_I10 (-(save_size + d_rounding))
6380 && total_size <= 2044)))
6381 d_rounding = total_size - save_size;
6383 offset_base = d + d_rounding;
6385 output_stack_adjust (-(save_size + d_rounding), stack_pointer_rtx,
6388 sh5_schedule_saves (&live_regs_mask, &schedule, offset_base);
6389 tmp_pnt = schedule.temps;
6390 for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
6392 enum machine_mode mode = entry->mode;
6393 unsigned int reg = entry->reg;
6394 rtx reg_rtx, mem_rtx, pre_dec = NULL_RTX;
6397 offset = entry->offset;
6399 reg_rtx = gen_rtx_REG (mode, reg);
6401 mem_rtx = gen_frame_mem (mode,
6402 gen_rtx_PLUS (Pmode,
6406 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_pre_dec);
6413 if (HAVE_PRE_DECREMENT
6414 && (offset_in_r0 - offset == GET_MODE_SIZE (mode)
6415 || mem_rtx == NULL_RTX
6416 || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
6418 pre_dec = gen_frame_mem (mode, gen_rtx_PRE_DEC (Pmode, r0));
6420 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (pre_dec, 0),
6429 offset += GET_MODE_SIZE (mode);
6433 if (mem_rtx != NULL_RTX)
6436 if (offset_in_r0 == -1)
6438 emit_move_insn (r0, GEN_INT (offset));
6439 offset_in_r0 = offset;
6441 else if (offset != offset_in_r0)
6446 GEN_INT (offset - offset_in_r0)));
6447 offset_in_r0 += offset - offset_in_r0;
6450 if (pre_dec != NULL_RTX)
6456 (Pmode, r0, stack_pointer_rtx));
6460 offset -= GET_MODE_SIZE (mode);
6461 offset_in_r0 -= GET_MODE_SIZE (mode);
6466 mem_rtx = gen_frame_mem (mode, r0);
6468 mem_rtx = gen_frame_mem (mode,
6469 gen_rtx_PLUS (Pmode,
6473 /* We must not use an r0-based address for target-branch
6474 registers or for special registers without pre-dec
6475 memory addresses, since we store their values in r0
6477 gcc_assert (!TARGET_REGISTER_P (reg)
6478 && ((reg != PR_REG && !SPECIAL_REGISTER_P (reg))
6479 || mem_rtx == pre_dec));
6482 orig_reg_rtx = reg_rtx;
6483 if (TARGET_REGISTER_P (reg)
6484 || ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
6485 && mem_rtx != pre_dec))
6487 rtx tmp_reg = gen_rtx_REG (GET_MODE (reg_rtx), *tmp_pnt);
6489 emit_move_insn (tmp_reg, reg_rtx);
6491 if (REGNO (tmp_reg) == R0_REG)
6495 gcc_assert (!refers_to_regno_p
6496 (R0_REG, R0_REG+1, mem_rtx, (rtx *) 0));
6499 if (*++tmp_pnt <= 0)
6500 tmp_pnt = schedule.temps;
6507 /* Mark as interesting for dwarf cfi generator */
6508 insn = emit_move_insn (mem_rtx, reg_rtx);
6509 RTX_FRAME_RELATED_P (insn) = 1;
6510 /* If we use an intermediate register for the save, we can't
6511 describe this exactly in cfi as a copy of the to-be-saved
6512 register into the temporary register and then the temporary
6513 register on the stack, because the temporary register can
6514 have a different natural size than the to-be-saved register.
6515 Thus, we gloss over the intermediate copy and pretend we do
6516 a direct save from the to-be-saved register. */
6517 if (REGNO (reg_rtx) != reg)
6521 set = gen_rtx_SET (VOIDmode, mem_rtx, orig_reg_rtx);
6522 note_rtx = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, set,
6524 REG_NOTES (insn) = note_rtx;
6527 if (TARGET_SHCOMPACT && (offset_in_r0 != -1))
6529 rtx reg_rtx = gen_rtx_REG (mode, reg);
6531 rtx mem_rtx = gen_frame_mem (mode,
6532 gen_rtx_PLUS (Pmode,
6536 set = gen_rtx_SET (VOIDmode, mem_rtx, reg_rtx);
6537 note_rtx = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, set,
6539 REG_NOTES (insn) = note_rtx;
6544 gcc_assert (entry->offset == d_rounding);
6547 push_regs (&live_regs_mask, current_function_interrupt);
6549 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
6550 emit_insn (gen_GOTaddr2picreg ());
6552 if (SHMEDIA_REGS_STACK_ADJUST ())
6554 /* This must NOT go through the PLT, otherwise mach and macl
6555 may be clobbered. */
6556 function_symbol (gen_rtx_REG (Pmode, R0_REG),
6558 ? "__GCC_push_shmedia_regs"
6559 : "__GCC_push_shmedia_regs_nofpu"), SFUNC_GOT);
6560 emit_insn (gen_shmedia_save_restore_regs_compact
6561 (GEN_INT (-SHMEDIA_REGS_STACK_ADJUST ())));
6564 if (target_flags != save_flags && ! current_function_interrupt)
6565 emit_insn (gen_toggle_sz ());
6567 target_flags = save_flags;
6569 output_stack_adjust (-rounded_frame_size (d) + d_rounding,
6570 stack_pointer_rtx, 0, NULL);
6572 if (frame_pointer_needed)
6573 frame_insn (GEN_MOV (hard_frame_pointer_rtx, stack_pointer_rtx));
6575 if (TARGET_SHCOMPACT
6576 && (crtl->args.info.call_cookie & ~ CALL_COOKIE_RET_TRAMP(1)))
6578 /* This must NOT go through the PLT, otherwise mach and macl
6579 may be clobbered. */
6580 function_symbol (gen_rtx_REG (Pmode, R0_REG),
6581 "__GCC_shcompact_incoming_args", SFUNC_GOT);
6582 emit_insn (gen_shcompact_incoming_args ());
6587 sh_expand_epilogue (bool sibcall_p)
6589 HARD_REG_SET live_regs_mask;
6593 int save_flags = target_flags;
6594 int frame_size, save_size;
6595 int fpscr_deferred = 0;
6596 int e = sibcall_p ? -1 : 1;
6598 d = calc_live_regs (&live_regs_mask);
6601 frame_size = rounded_frame_size (d);
6605 int tregs_space = shmedia_target_regs_stack_adjust (&live_regs_mask);
6607 if (d % (STACK_BOUNDARY / BITS_PER_UNIT))
6608 d_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
6609 - d % (STACK_BOUNDARY / BITS_PER_UNIT));
6611 total_size = d + tregs_space;
6612 total_size += rounded_frame_size (total_size);
6613 save_size = total_size - frame_size;
6615 /* If adjusting the stack in a single step costs nothing extra, do so.
6616 I.e. either if a single addi is enough, or we need a movi anyway,
6617 and we don't exceed the maximum offset range (the test for the
6618 latter is conservative for simplicity). */
6620 && ! frame_pointer_needed
6621 && (CONST_OK_FOR_I10 (total_size)
6622 || (! CONST_OK_FOR_I10 (save_size + d_rounding)
6623 && total_size <= 2044)))
6624 d_rounding = frame_size;
6626 frame_size -= d_rounding;
6629 if (frame_pointer_needed)
6631 /* We must avoid scheduling the epilogue with previous basic blocks
6632 when exception handling is enabled. See PR/18032. */
6633 if (flag_exceptions)
6634 emit_insn (gen_blockage ());
6635 output_stack_adjust (frame_size, hard_frame_pointer_rtx, e,
6638 /* We must avoid moving the stack pointer adjustment past code
6639 which reads from the local frame, else an interrupt could
6640 occur after the SP adjustment and clobber data in the local
6642 emit_insn (gen_blockage ());
6643 emit_insn (GEN_MOV (stack_pointer_rtx, hard_frame_pointer_rtx));
6645 else if (frame_size)
6647 /* We must avoid moving the stack pointer adjustment past code
6648 which reads from the local frame, else an interrupt could
6649 occur after the SP adjustment and clobber data in the local
6651 emit_insn (gen_blockage ());
6652 output_stack_adjust (frame_size, stack_pointer_rtx, e, &live_regs_mask);
6655 if (SHMEDIA_REGS_STACK_ADJUST ())
6657 function_symbol (gen_rtx_REG (Pmode, R0_REG),
6659 ? "__GCC_pop_shmedia_regs"
6660 : "__GCC_pop_shmedia_regs_nofpu"), SFUNC_GOT);
6661 /* This must NOT go through the PLT, otherwise mach and macl
6662 may be clobbered. */
6663 emit_insn (gen_shmedia_save_restore_regs_compact
6664 (GEN_INT (SHMEDIA_REGS_STACK_ADJUST ())));
6667 /* Pop all the registers. */
6669 if (target_flags != save_flags && ! current_function_interrupt)
6670 emit_insn (gen_toggle_sz ());
6673 int offset_base, offset;
6674 int offset_in_r0 = -1;
6676 rtx r0 = gen_rtx_REG (Pmode, R0_REG);
6677 save_schedule schedule;
6681 entry = sh5_schedule_saves (&live_regs_mask, &schedule, d_rounding);
6682 offset_base = -entry[1].offset + d_rounding;
6683 tmp_pnt = schedule.temps;
6684 for (; entry->mode != VOIDmode; entry--)
6686 enum machine_mode mode = entry->mode;
6687 int reg = entry->reg;
6688 rtx reg_rtx, mem_rtx, post_inc = NULL_RTX, insn;
6690 offset = offset_base + entry->offset;
6691 reg_rtx = gen_rtx_REG (mode, reg);
6693 mem_rtx = gen_frame_mem (mode,
6694 gen_rtx_PLUS (Pmode,
6698 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_post_inc);
6704 if (HAVE_POST_INCREMENT
6705 && (offset == offset_in_r0
6706 || (offset + GET_MODE_SIZE (mode) != d + d_rounding
6707 && mem_rtx == NULL_RTX)
6708 || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
6710 post_inc = gen_frame_mem (mode, gen_rtx_POST_INC (Pmode, r0));
6712 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (post_inc, 0),
6715 post_inc = NULL_RTX;
6724 if (mem_rtx != NULL_RTX)
6727 if (offset_in_r0 == -1)
6729 emit_move_insn (r0, GEN_INT (offset));
6730 offset_in_r0 = offset;
6732 else if (offset != offset_in_r0)
6737 GEN_INT (offset - offset_in_r0)));
6738 offset_in_r0 += offset - offset_in_r0;
6741 if (post_inc != NULL_RTX)
6747 (Pmode, r0, stack_pointer_rtx));
6753 offset_in_r0 += GET_MODE_SIZE (mode);
6756 mem_rtx = gen_frame_mem (mode, r0);
6758 mem_rtx = gen_frame_mem (mode,
6759 gen_rtx_PLUS (Pmode,
6763 gcc_assert ((reg != PR_REG && !SPECIAL_REGISTER_P (reg))
6764 || mem_rtx == post_inc);
6767 if ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
6768 && mem_rtx != post_inc)
6770 insn = emit_move_insn (r0, mem_rtx);
6773 else if (TARGET_REGISTER_P (reg))
6775 rtx tmp_reg = gen_rtx_REG (mode, *tmp_pnt);
6777 /* Give the scheduler a bit of freedom by using up to
6778 MAX_TEMPS registers in a round-robin fashion. */
6779 insn = emit_move_insn (tmp_reg, mem_rtx);
6782 tmp_pnt = schedule.temps;
6785 insn = emit_move_insn (reg_rtx, mem_rtx);
6788 gcc_assert (entry->offset + offset_base == d + d_rounding);
6790 else /* ! TARGET_SH5 */
6795 /* For an ISR with RESBANK attribute assigned, don't pop PR
6797 if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG)
6798 && !sh_cfun_resbank_handler_p ())
6800 if (!frame_pointer_needed)
6801 emit_insn (gen_blockage ());
6805 /* Banked registers are poped first to avoid being scheduled in the
6806 delay slot. RTE switches banks before the ds instruction. */
6807 if (current_function_interrupt)
6809 for (i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
6810 if (TEST_HARD_REG_BIT (live_regs_mask, i))
6811 pop (LAST_BANKED_REG - i);
6813 last_reg = FIRST_PSEUDO_REGISTER - LAST_BANKED_REG - 1;
6816 last_reg = FIRST_PSEUDO_REGISTER;
6818 for (i = 0; i < last_reg; i++)
6820 int j = (FIRST_PSEUDO_REGISTER - 1) - i;
6822 if (j == FPSCR_REG && current_function_interrupt && TARGET_FMOVD
6823 && hard_reg_set_intersect_p (live_regs_mask,
6824 reg_class_contents[DF_REGS]))
6826 /* For an ISR with RESBANK attribute assigned, don't pop
6827 following registers, R0-R14, MACH, MACL and GBR. */
6828 else if (j != PR_REG && TEST_HARD_REG_BIT (live_regs_mask, j)
6829 && ! (sh_cfun_resbank_handler_p ()
6830 && ((j >= FIRST_GENERAL_REG
6831 && j < LAST_GENERAL_REG)
6837 if (j == FIRST_FP_REG && fpscr_deferred)
6841 if (target_flags != save_flags && ! current_function_interrupt)
6842 emit_insn (gen_toggle_sz ());
6843 target_flags = save_flags;
6845 output_stack_adjust (crtl->args.pretend_args_size
6846 + save_size + d_rounding
6847 + crtl->args.info.stack_regs * 8,
6848 stack_pointer_rtx, e, NULL);
6850 if (crtl->calls_eh_return)
6851 emit_insn (GEN_ADD3 (stack_pointer_rtx, stack_pointer_rtx,
6852 EH_RETURN_STACKADJ_RTX));
6854 /* Switch back to the normal stack if necessary. */
6855 if (lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl)))
6856 emit_insn (gen_sp_switch_2 ());
6858 /* Tell flow the insn that pops PR isn't dead. */
6859 /* PR_REG will never be live in SHmedia mode, and we don't need to
6860 USE PR_MEDIA_REG, since it will be explicitly copied to TR0_REG
6861 by the return pattern. */
6862 if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG))
6863 emit_use (gen_rtx_REG (SImode, PR_REG));
6866 static int sh_need_epilogue_known = 0;
6869 sh_need_epilogue (void)
6871 if (! sh_need_epilogue_known)
6876 sh_expand_epilogue (0);
6877 epilogue = get_insns ();
6879 sh_need_epilogue_known = (epilogue == NULL ? -1 : 1);
6881 return sh_need_epilogue_known > 0;
6884 /* Emit code to change the current function's return address to RA.
6885 TEMP is available as a scratch register, if needed. */
6888 sh_set_return_address (rtx ra, rtx tmp)
6890 HARD_REG_SET live_regs_mask;
6892 int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
6895 d = calc_live_regs (&live_regs_mask);
6897 /* If pr_reg isn't life, we can set it (or the register given in
6898 sh_media_register_for_return) directly. */
6899 if (! TEST_HARD_REG_BIT (live_regs_mask, pr_reg))
6905 int rr_regno = sh_media_register_for_return ();
6910 rr = gen_rtx_REG (DImode, rr_regno);
6913 rr = gen_rtx_REG (SImode, pr_reg);
6915 emit_insn (GEN_MOV (rr, ra));
6916 /* Tell flow the register for return isn't dead. */
6924 save_schedule schedule;
6927 entry = sh5_schedule_saves (&live_regs_mask, &schedule, 0);
6928 offset = entry[1].offset;
6929 for (; entry->mode != VOIDmode; entry--)
6930 if (entry->reg == pr_reg)
6933 /* We can't find pr register. */
6937 offset = entry->offset - offset;
6938 pr_offset = (rounded_frame_size (d) + offset
6939 + SHMEDIA_REGS_STACK_ADJUST ());
6942 pr_offset = rounded_frame_size (d);
6944 emit_insn (GEN_MOV (tmp, GEN_INT (pr_offset)));
6945 emit_insn (GEN_ADD3 (tmp, tmp, hard_frame_pointer_rtx));
6947 tmp = gen_frame_mem (Pmode, tmp);
6948 emit_insn (GEN_MOV (tmp, ra));
6951 /* Clear variables at function end. */
6954 sh_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6955 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6957 sh_need_epilogue_known = 0;
6961 sh_builtin_saveregs (void)
6963 /* First unnamed integer register. */
6964 int first_intreg = crtl->args.info.arg_count[(int) SH_ARG_INT];
6965 /* Number of integer registers we need to save. */
6966 int n_intregs = MAX (0, NPARM_REGS (SImode) - first_intreg);
6967 /* First unnamed SFmode float reg */
6968 int first_floatreg = crtl->args.info.arg_count[(int) SH_ARG_FLOAT];
6969 /* Number of SFmode float regs to save. */
6970 int n_floatregs = MAX (0, NPARM_REGS (SFmode) - first_floatreg);
6973 alias_set_type alias_set;
6979 int pushregs = n_intregs;
6981 while (pushregs < NPARM_REGS (SImode) - 1
6982 && (CALL_COOKIE_INT_REG_GET
6983 (crtl->args.info.call_cookie,
6984 NPARM_REGS (SImode) - pushregs)
6987 crtl->args.info.call_cookie
6988 &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
6993 if (pushregs == NPARM_REGS (SImode))
6994 crtl->args.info.call_cookie
6995 |= (CALL_COOKIE_INT_REG (0, 1)
6996 | CALL_COOKIE_STACKSEQ (pushregs - 1));
6998 crtl->args.info.call_cookie
6999 |= CALL_COOKIE_STACKSEQ (pushregs);
7001 crtl->args.pretend_args_size += 8 * n_intregs;
7003 if (TARGET_SHCOMPACT)
7007 if (! TARGET_SH2E && ! TARGET_SH4 && ! TARGET_SH5)
7009 error ("__builtin_saveregs not supported by this subtarget");
7016 /* Allocate block of memory for the regs. */
7017 /* ??? If n_intregs + n_floatregs == 0, should we allocate at least 1 byte?
7018 Or can assign_stack_local accept a 0 SIZE argument? */
7019 bufsize = (n_intregs * UNITS_PER_WORD) + (n_floatregs * UNITS_PER_WORD);
7022 regbuf = gen_frame_mem (BLKmode, gen_rtx_REG (Pmode, ARG_POINTER_REGNUM));
7023 else if (n_floatregs & 1)
7027 regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
7028 addr = copy_to_mode_reg (Pmode, XEXP (regbuf, 0));
7029 emit_insn (gen_iorsi3 (addr, addr, GEN_INT (UNITS_PER_WORD)));
7030 regbuf = change_address (regbuf, BLKmode, addr);
7032 else if (STACK_BOUNDARY < 64 && TARGET_FPU_DOUBLE && n_floatregs)
7036 regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
7037 addr = copy_to_mode_reg (Pmode, plus_constant (XEXP (regbuf, 0), 4));
7038 mask = copy_to_mode_reg (Pmode, GEN_INT (-8));
7039 emit_insn (gen_andsi3 (addr, addr, mask));
7040 regbuf = change_address (regbuf, BLKmode, addr);
7043 regbuf = assign_stack_local (BLKmode, bufsize, TARGET_FPU_DOUBLE ? 64 : 0);
7044 alias_set = get_varargs_alias_set ();
7045 set_mem_alias_set (regbuf, alias_set);
7048 This is optimized to only save the regs that are necessary. Explicitly
7049 named args need not be saved. */
7051 move_block_from_reg (BASE_ARG_REG (SImode) + first_intreg,
7052 adjust_address (regbuf, BLKmode,
7053 n_floatregs * UNITS_PER_WORD),
7057 /* Return the address of the regbuf. */
7058 return XEXP (regbuf, 0);
7061 This is optimized to only save the regs that are necessary. Explicitly
7062 named args need not be saved.
7063 We explicitly build a pointer to the buffer because it halves the insn
7064 count when not optimizing (otherwise the pointer is built for each reg
7066 We emit the moves in reverse order so that we can use predecrement. */
7068 fpregs = copy_to_mode_reg (Pmode,
7069 plus_constant (XEXP (regbuf, 0),
7070 n_floatregs * UNITS_PER_WORD));
7071 if (TARGET_SH4 || TARGET_SH2A_DOUBLE)
7074 for (regno = NPARM_REGS (DFmode) - 2; regno >= first_floatreg; regno -= 2)
7076 emit_insn (gen_addsi3 (fpregs, fpregs,
7077 GEN_INT (-2 * UNITS_PER_WORD)));
7078 mem = change_address (regbuf, DFmode, fpregs);
7079 emit_move_insn (mem,
7080 gen_rtx_REG (DFmode, BASE_ARG_REG (DFmode) + regno));
7082 regno = first_floatreg;
7085 emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
7086 mem = change_address (regbuf, SFmode, fpregs);
7087 emit_move_insn (mem,
7088 gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno
7089 - (TARGET_LITTLE_ENDIAN != 0)));
7093 for (regno = NPARM_REGS (SFmode) - 1; regno >= first_floatreg; regno--)
7097 emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
7098 mem = change_address (regbuf, SFmode, fpregs);
7099 emit_move_insn (mem,
7100 gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno));
7103 /* Return the address of the regbuf. */
7104 return XEXP (regbuf, 0);
7107 /* Define the `__builtin_va_list' type for the ABI. */
7110 sh_build_builtin_va_list (void)
7112 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7115 if (TARGET_SH5 || (! TARGET_SH2E && ! TARGET_SH4)
7116 || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
7117 return ptr_type_node;
7119 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
7121 f_next_o = build_decl (FIELD_DECL, get_identifier ("__va_next_o"),
7123 f_next_o_limit = build_decl (FIELD_DECL,
7124 get_identifier ("__va_next_o_limit"),
7126 f_next_fp = build_decl (FIELD_DECL, get_identifier ("__va_next_fp"),
7128 f_next_fp_limit = build_decl (FIELD_DECL,
7129 get_identifier ("__va_next_fp_limit"),
7131 f_next_stack = build_decl (FIELD_DECL, get_identifier ("__va_next_stack"),
7134 DECL_FIELD_CONTEXT (f_next_o) = record;
7135 DECL_FIELD_CONTEXT (f_next_o_limit) = record;
7136 DECL_FIELD_CONTEXT (f_next_fp) = record;
7137 DECL_FIELD_CONTEXT (f_next_fp_limit) = record;
7138 DECL_FIELD_CONTEXT (f_next_stack) = record;
7140 TYPE_FIELDS (record) = f_next_o;
7141 TREE_CHAIN (f_next_o) = f_next_o_limit;
7142 TREE_CHAIN (f_next_o_limit) = f_next_fp;
7143 TREE_CHAIN (f_next_fp) = f_next_fp_limit;
7144 TREE_CHAIN (f_next_fp_limit) = f_next_stack;
7146 layout_type (record);
7151 /* Return always va_list_type_node. */
7154 sh_canonical_va_list_type (tree type ATTRIBUTE_UNUSED)
7156 return va_list_type_node;
7159 /* Implement `va_start' for varargs and stdarg. */
7162 sh_va_start (tree valist, rtx nextarg)
7164 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7165 tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
7171 expand_builtin_saveregs ();
7172 std_expand_builtin_va_start (valist, nextarg);
7176 if ((! TARGET_SH2E && ! TARGET_SH4)
7177 || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
7179 std_expand_builtin_va_start (valist, nextarg);
7183 f_next_o = TYPE_FIELDS (va_list_type_node);
7184 f_next_o_limit = TREE_CHAIN (f_next_o);
7185 f_next_fp = TREE_CHAIN (f_next_o_limit);
7186 f_next_fp_limit = TREE_CHAIN (f_next_fp);
7187 f_next_stack = TREE_CHAIN (f_next_fp_limit);
7189 next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
7191 next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
7192 valist, f_next_o_limit, NULL_TREE);
7193 next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp), valist, f_next_fp,
7195 next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
7196 valist, f_next_fp_limit, NULL_TREE);
7197 next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
7198 valist, f_next_stack, NULL_TREE);
7200 /* Call __builtin_saveregs. */
7201 u = make_tree (sizetype, expand_builtin_saveregs ());
7202 u = fold_convert (ptr_type_node, u);
7203 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp, u);
7204 TREE_SIDE_EFFECTS (t) = 1;
7205 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7207 nfp = crtl->args.info.arg_count[SH_ARG_FLOAT];
7212 u = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, u,
7213 size_int (UNITS_PER_WORD * nfp));
7214 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp_limit, u);
7215 TREE_SIDE_EFFECTS (t) = 1;
7216 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7218 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_o, u);
7219 TREE_SIDE_EFFECTS (t) = 1;
7220 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7222 nint = crtl->args.info.arg_count[SH_ARG_INT];
7227 u = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, u,
7228 size_int (UNITS_PER_WORD * nint));
7229 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_o_limit, u);
7230 TREE_SIDE_EFFECTS (t) = 1;
7231 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7233 u = make_tree (ptr_type_node, nextarg);
7234 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_stack, u);
7235 TREE_SIDE_EFFECTS (t) = 1;
7236 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7239 /* TYPE is a RECORD_TYPE. If there is only a single nonzero-sized
7240 member, return it. */
7242 find_sole_member (tree type)
7244 tree field, member = NULL_TREE;
7246 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
7248 if (TREE_CODE (field) != FIELD_DECL)
7250 if (!DECL_SIZE (field))
7252 if (integer_zerop (DECL_SIZE (field)))
7260 /* Implement `va_arg'. */
7263 sh_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
7264 tree *post_p ATTRIBUTE_UNUSED)
7266 HOST_WIDE_INT size, rsize;
7267 tree tmp, pptr_type_node;
7268 tree addr, lab_over = NULL, result = NULL;
7269 int pass_by_ref = targetm.calls.must_pass_in_stack (TYPE_MODE (type), type);
7273 type = build_pointer_type (type);
7275 size = int_size_in_bytes (type);
7276 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
7277 pptr_type_node = build_pointer_type (ptr_type_node);
7279 if (! TARGET_SH5 && (TARGET_SH2E || TARGET_SH4)
7280 && ! (TARGET_HITACHI || sh_cfun_attr_renesas_p ()))
7282 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7283 tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
7288 f_next_o = TYPE_FIELDS (va_list_type_node);
7289 f_next_o_limit = TREE_CHAIN (f_next_o);
7290 f_next_fp = TREE_CHAIN (f_next_o_limit);
7291 f_next_fp_limit = TREE_CHAIN (f_next_fp);
7292 f_next_stack = TREE_CHAIN (f_next_fp_limit);
7294 next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
7296 next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
7297 valist, f_next_o_limit, NULL_TREE);
7298 next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp),
7299 valist, f_next_fp, NULL_TREE);
7300 next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
7301 valist, f_next_fp_limit, NULL_TREE);
7302 next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
7303 valist, f_next_stack, NULL_TREE);
7305 /* Structures with a single member with a distinct mode are passed
7306 like their member. This is relevant if the latter has a REAL_TYPE
7307 or COMPLEX_TYPE type. */
7309 while (TREE_CODE (eff_type) == RECORD_TYPE
7310 && (member = find_sole_member (eff_type))
7311 && (TREE_CODE (TREE_TYPE (member)) == REAL_TYPE
7312 || TREE_CODE (TREE_TYPE (member)) == COMPLEX_TYPE
7313 || TREE_CODE (TREE_TYPE (member)) == RECORD_TYPE))
7315 tree field_type = TREE_TYPE (member);
7317 if (TYPE_MODE (eff_type) == TYPE_MODE (field_type))
7318 eff_type = field_type;
7321 gcc_assert ((TYPE_ALIGN (eff_type)
7322 < GET_MODE_ALIGNMENT (TYPE_MODE (field_type)))
7323 || (TYPE_ALIGN (eff_type)
7324 > GET_MODE_BITSIZE (TYPE_MODE (field_type))));
7329 if (TARGET_SH4 || TARGET_SH2A_DOUBLE)
7331 pass_as_float = ((TREE_CODE (eff_type) == REAL_TYPE && size <= 8)
7332 || (TREE_CODE (eff_type) == COMPLEX_TYPE
7333 && TREE_CODE (TREE_TYPE (eff_type)) == REAL_TYPE
7338 pass_as_float = (TREE_CODE (eff_type) == REAL_TYPE && size == 4);
7341 addr = create_tmp_var (pptr_type_node, NULL);
7342 lab_false = create_artificial_label ();
7343 lab_over = create_artificial_label ();
7345 valist = build1 (INDIRECT_REF, ptr_type_node, addr);
7349 tree next_fp_tmp = create_tmp_var (TREE_TYPE (f_next_fp), NULL);
7351 bool is_double = size == 8 && TREE_CODE (eff_type) == REAL_TYPE;
7353 tmp = build1 (ADDR_EXPR, pptr_type_node, next_fp);
7354 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
7355 gimplify_and_add (tmp, pre_p);
7357 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp_tmp, valist);
7358 gimplify_and_add (tmp, pre_p);
7359 tmp = next_fp_limit;
7360 if (size > 4 && !is_double)
7361 tmp = build2 (POINTER_PLUS_EXPR, TREE_TYPE (tmp), tmp,
7362 size_int (4 - size));
7363 tmp = build2 (GE_EXPR, boolean_type_node, next_fp_tmp, tmp);
7364 cmp = build3 (COND_EXPR, void_type_node, tmp,
7365 build1 (GOTO_EXPR, void_type_node, lab_false),
7368 gimplify_and_add (cmp, pre_p);
7370 if (TYPE_ALIGN (eff_type) > BITS_PER_WORD
7371 || (is_double || size == 16))
7373 tmp = fold_convert (sizetype, next_fp_tmp);
7374 tmp = build2 (BIT_AND_EXPR, sizetype, tmp,
7375 size_int (UNITS_PER_WORD));
7376 tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node,
7378 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node,
7380 gimplify_and_add (tmp, pre_p);
7383 gimplify_and_add (cmp, pre_p);
7385 #ifdef FUNCTION_ARG_SCmode_WART
7386 if (TYPE_MODE (eff_type) == SCmode
7387 && TARGET_SH4 && TARGET_LITTLE_ENDIAN)
7389 tree subtype = TREE_TYPE (eff_type);
7393 = std_gimplify_va_arg_expr (next_fp_tmp, subtype, pre_p, NULL);
7394 imag = get_initialized_tmp_var (imag, pre_p, NULL);
7397 = std_gimplify_va_arg_expr (next_fp_tmp, subtype, pre_p, NULL);
7398 real = get_initialized_tmp_var (real, pre_p, NULL);
7400 result = build2 (COMPLEX_EXPR, type, real, imag);
7401 result = get_initialized_tmp_var (result, pre_p, NULL);
7403 #endif /* FUNCTION_ARG_SCmode_WART */
7405 tmp = build1 (GOTO_EXPR, void_type_node, lab_over);
7406 gimplify_and_add (tmp, pre_p);
7408 tmp = build1 (LABEL_EXPR, void_type_node, lab_false);
7409 gimplify_and_add (tmp, pre_p);
7411 tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
7412 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
7413 gimplify_and_add (tmp, pre_p);
7414 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp_tmp, valist);
7415 gimplify_and_add (tmp, pre_p);
7417 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, valist, next_fp_tmp);
7418 gimplify_and_add (tmp, post_p);
7419 valist = next_fp_tmp;
7423 tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node, next_o,
7425 tmp = build2 (GT_EXPR, boolean_type_node, tmp, next_o_limit);
7426 tmp = build3 (COND_EXPR, void_type_node, tmp,
7427 build1 (GOTO_EXPR, void_type_node, lab_false),
7429 gimplify_and_add (tmp, pre_p);
7431 tmp = build1 (ADDR_EXPR, pptr_type_node, next_o);
7432 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
7433 gimplify_and_add (tmp, pre_p);
7435 tmp = build1 (GOTO_EXPR, void_type_node, lab_over);
7436 gimplify_and_add (tmp, pre_p);
7438 tmp = build1 (LABEL_EXPR, void_type_node, lab_false);
7439 gimplify_and_add (tmp, pre_p);
7441 if (size > 4 && ! (TARGET_SH4 || TARGET_SH2A))
7443 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node,
7444 next_o, next_o_limit);
7445 gimplify_and_add (tmp, pre_p);
7448 tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
7449 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
7450 gimplify_and_add (tmp, pre_p);
7455 tmp = build1 (LABEL_EXPR, void_type_node, lab_over);
7456 gimplify_and_add (tmp, pre_p);
7460 /* ??? In va-sh.h, there had been code to make values larger than
7461 size 8 indirect. This does not match the FUNCTION_ARG macros. */
7463 tmp = std_gimplify_va_arg_expr (valist, type, pre_p, NULL);
7466 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, result, tmp);
7467 gimplify_and_add (tmp, pre_p);
7469 tmp = build1 (LABEL_EXPR, void_type_node, lab_over);
7470 gimplify_and_add (tmp, pre_p);
7476 result = build_va_arg_indirect_ref (result);
7482 sh_promote_prototypes (const_tree type)
7488 return ! sh_attr_renesas_p (type);
7491 /* Whether an argument must be passed by reference. On SHcompact, we
7492 pretend arguments wider than 32-bits that would have been passed in
7493 registers are passed by reference, so that an SHmedia trampoline
7494 loads them into the full 64-bits registers. */
7497 shcompact_byref (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
7498 const_tree type, bool named)
7500 unsigned HOST_WIDE_INT size;
7503 size = int_size_in_bytes (type);
7505 size = GET_MODE_SIZE (mode);
7507 if (cum->arg_count[SH_ARG_INT] < NPARM_REGS (SImode)
7509 || GET_SH_ARG_CLASS (mode) == SH_ARG_INT
7510 || (GET_SH_ARG_CLASS (mode) == SH_ARG_FLOAT
7511 && cum->arg_count[SH_ARG_FLOAT] >= NPARM_REGS (SFmode)))
7513 && !SHCOMPACT_FORCE_ON_STACK (mode, type)
7514 && !SH5_WOULD_BE_PARTIAL_NREGS (*cum, mode, type, named))
7521 sh_pass_by_reference (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7522 const_tree type, bool named)
7524 if (targetm.calls.must_pass_in_stack (mode, type))
7527 /* ??? std_gimplify_va_arg_expr passes NULL for cum. That function
7528 wants to know about pass-by-reference semantics for incoming
7533 if (TARGET_SHCOMPACT)
7535 cum->byref = shcompact_byref (cum, mode, type, named);
7536 return cum->byref != 0;
7543 sh_callee_copies (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7544 const_tree type, bool named ATTRIBUTE_UNUSED)
7546 /* ??? How can it possibly be correct to return true only on the
7547 caller side of the equation? Is there someplace else in the
7548 sh backend that's magically producing the copies? */
7549 return (cum->outgoing
7550 && ((mode == BLKmode ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode))
7551 % SH_MIN_ALIGN_FOR_CALLEE_COPY == 0));
7555 sh_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7556 tree type, bool named ATTRIBUTE_UNUSED)
7561 && PASS_IN_REG_P (*cum, mode, type)
7562 && !(TARGET_SH4 || TARGET_SH2A_DOUBLE)
7563 && (ROUND_REG (*cum, mode)
7565 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
7566 : ROUND_ADVANCE (int_size_in_bytes (type)))
7567 > NPARM_REGS (mode)))
7568 words = NPARM_REGS (mode) - ROUND_REG (*cum, mode);
7570 else if (!TARGET_SHCOMPACT
7571 && SH5_WOULD_BE_PARTIAL_NREGS (*cum, mode, type, named))
7572 words = NPARM_REGS (SImode) - cum->arg_count[SH_ARG_INT];
7574 return words * UNITS_PER_WORD;
7578 /* Define where to put the arguments to a function.
7579 Value is zero to push the argument on the stack,
7580 or a hard register in which to store the argument.
7582 MODE is the argument's machine mode.
7583 TYPE is the data type of the argument (as a tree).
7584 This is null for libcalls where that information may
7586 CUM is a variable of type CUMULATIVE_ARGS which gives info about
7587 the preceding args and about the function being called.
7588 NAMED is nonzero if this argument is a named parameter
7589 (otherwise it is an extra parameter matching an ellipsis).
7591 On SH the first args are normally in registers
7592 and the rest are pushed. Any arg that starts within the first
7593 NPARM_REGS words is at least partially passed in a register unless
7594 its data type forbids. */
7598 sh_function_arg (CUMULATIVE_ARGS *ca, enum machine_mode mode,
7599 tree type, int named)
7601 if (! TARGET_SH5 && mode == VOIDmode)
7602 return GEN_INT (ca->renesas_abi ? 1 : 0);
7605 && PASS_IN_REG_P (*ca, mode, type)
7606 && (named || ! (TARGET_HITACHI || ca->renesas_abi)))
7610 if (mode == SCmode && TARGET_SH4 && TARGET_LITTLE_ENDIAN
7611 && (! FUNCTION_ARG_SCmode_WART || (ROUND_REG (*ca, mode) & 1)))
7613 rtx r1 = gen_rtx_EXPR_LIST (VOIDmode,
7614 gen_rtx_REG (SFmode,
7616 + (ROUND_REG (*ca, mode) ^ 1)),
7618 rtx r2 = gen_rtx_EXPR_LIST (VOIDmode,
7619 gen_rtx_REG (SFmode,
7621 + ((ROUND_REG (*ca, mode) + 1) ^ 1)),
7623 return gen_rtx_PARALLEL(SCmode, gen_rtvec(2, r1, r2));
7626 /* If the alignment of a DF value causes an SF register to be
7627 skipped, we will use that skipped register for the next SF
7629 if ((TARGET_HITACHI || ca->renesas_abi)
7630 && ca->free_single_fp_reg
7632 return gen_rtx_REG (mode, ca->free_single_fp_reg);
7634 regno = (BASE_ARG_REG (mode) + ROUND_REG (*ca, mode))
7635 ^ (mode == SFmode && TARGET_SH4
7636 && TARGET_LITTLE_ENDIAN != 0
7637 && ! TARGET_HITACHI && ! ca->renesas_abi);
7638 return gen_rtx_REG (mode, regno);
7644 if (mode == VOIDmode && TARGET_SHCOMPACT)
7645 return GEN_INT (ca->call_cookie);
7647 /* The following test assumes unnamed arguments are promoted to
7649 if (mode == SFmode && ca->free_single_fp_reg)
7650 return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode, ca->free_single_fp_reg);
7652 if ((GET_SH_ARG_CLASS (mode) == SH_ARG_FLOAT)
7653 && (named || ! ca->prototype_p)
7654 && ca->arg_count[(int) SH_ARG_FLOAT] < NPARM_REGS (SFmode))
7656 if (! ca->prototype_p && TARGET_SHMEDIA)
7657 return SH5_PROTOTYPELESS_FLOAT_ARG (*ca, mode);
7659 return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode,
7661 + ca->arg_count[(int) SH_ARG_FLOAT]);
7664 if (ca->arg_count[(int) SH_ARG_INT] < NPARM_REGS (SImode)
7665 && (! TARGET_SHCOMPACT
7666 || (! SHCOMPACT_FORCE_ON_STACK (mode, type)
7667 && ! SH5_WOULD_BE_PARTIAL_NREGS (*ca, mode,
7670 return gen_rtx_REG (mode, (FIRST_PARM_REG
7671 + ca->arg_count[(int) SH_ARG_INT]));
7680 /* Update the data in CUM to advance over an argument
7681 of mode MODE and data type TYPE.
7682 (TYPE is null for libcalls where that information may not be
7686 sh_function_arg_advance (CUMULATIVE_ARGS *ca, enum machine_mode mode,
7687 tree type, int named)
7691 else if (TARGET_SH5)
7693 tree type2 = (ca->byref && type
7696 enum machine_mode mode2 = (ca->byref && type
7699 int dwords = ((ca->byref
7702 ? int_size_in_bytes (type2)
7703 : GET_MODE_SIZE (mode2)) + 7) / 8;
7704 int numregs = MIN (dwords, NPARM_REGS (SImode)
7705 - ca->arg_count[(int) SH_ARG_INT]);
7709 ca->arg_count[(int) SH_ARG_INT] += numregs;
7710 if (TARGET_SHCOMPACT
7711 && SHCOMPACT_FORCE_ON_STACK (mode2, type2))
7714 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
7716 /* N.B. We want this also for outgoing. */
7717 ca->stack_regs += numregs;
7722 ca->stack_regs += numregs;
7723 ca->byref_regs += numregs;
7727 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
7731 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
7734 else if (dwords > numregs)
7736 int pushregs = numregs;
7738 if (TARGET_SHCOMPACT)
7739 ca->stack_regs += numregs;
7740 while (pushregs < NPARM_REGS (SImode) - 1
7741 && (CALL_COOKIE_INT_REG_GET
7743 NPARM_REGS (SImode) - pushregs)
7747 &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
7751 if (numregs == NPARM_REGS (SImode))
7753 |= CALL_COOKIE_INT_REG (0, 1)
7754 | CALL_COOKIE_STACKSEQ (numregs - 1);
7757 |= CALL_COOKIE_STACKSEQ (numregs);
7760 if (GET_SH_ARG_CLASS (mode2) == SH_ARG_FLOAT
7761 && (named || ! ca->prototype_p))
7763 if (mode2 == SFmode && ca->free_single_fp_reg)
7764 ca->free_single_fp_reg = 0;
7765 else if (ca->arg_count[(int) SH_ARG_FLOAT]
7766 < NPARM_REGS (SFmode))
7769 = MIN ((GET_MODE_SIZE (mode2) + 7) / 8 * 2,
7771 - ca->arg_count[(int) SH_ARG_FLOAT]);
7773 ca->arg_count[(int) SH_ARG_FLOAT] += numfpregs;
7775 if (TARGET_SHCOMPACT && ! ca->prototype_p)
7777 if (ca->outgoing && numregs > 0)
7781 |= (CALL_COOKIE_INT_REG
7782 (ca->arg_count[(int) SH_ARG_INT]
7783 - numregs + ((numfpregs - 2) / 2),
7784 4 + (ca->arg_count[(int) SH_ARG_FLOAT]
7787 while (numfpregs -= 2);
7789 else if (mode2 == SFmode && (named)
7790 && (ca->arg_count[(int) SH_ARG_FLOAT]
7791 < NPARM_REGS (SFmode)))
7792 ca->free_single_fp_reg
7793 = FIRST_FP_PARM_REG - numfpregs
7794 + ca->arg_count[(int) SH_ARG_FLOAT] + 1;
7800 if ((TARGET_HITACHI || ca->renesas_abi) && TARGET_FPU_DOUBLE)
7802 /* Note that we've used the skipped register. */
7803 if (mode == SFmode && ca->free_single_fp_reg)
7805 ca->free_single_fp_reg = 0;
7808 /* When we have a DF after an SF, there's an SF register that get
7809 skipped in order to align the DF value. We note this skipped
7810 register, because the next SF value will use it, and not the
7811 SF that follows the DF. */
7813 && ROUND_REG (*ca, DFmode) != ROUND_REG (*ca, SFmode))
7815 ca->free_single_fp_reg = (ROUND_REG (*ca, SFmode)
7816 + BASE_ARG_REG (mode));
7820 if (! ((TARGET_SH4 || TARGET_SH2A) || ca->renesas_abi)
7821 || PASS_IN_REG_P (*ca, mode, type))
7822 (ca->arg_count[(int) GET_SH_ARG_CLASS (mode)]
7823 = (ROUND_REG (*ca, mode)
7825 ? ROUND_ADVANCE (int_size_in_bytes (type))
7826 : ROUND_ADVANCE (GET_MODE_SIZE (mode)))));
7829 /* The Renesas calling convention doesn't quite fit into this scheme since
7830 the address is passed like an invisible argument, but one that is always
7831 passed in memory. */
7833 sh_struct_value_rtx (tree fndecl, int incoming ATTRIBUTE_UNUSED)
7835 if (TARGET_HITACHI || sh_attr_renesas_p (fndecl))
7837 return gen_rtx_REG (Pmode, 2);
7840 /* Worker function for TARGET_RETURN_IN_MEMORY. */
7843 sh_return_in_memory (const_tree type, const_tree fndecl)
7847 if (TYPE_MODE (type) == BLKmode)
7848 return ((unsigned HOST_WIDE_INT) int_size_in_bytes (type)) > 8;
7850 return GET_MODE_SIZE (TYPE_MODE (type)) > 8;
7854 return (TYPE_MODE (type) == BLKmode
7855 || ((TARGET_HITACHI || sh_attr_renesas_p (fndecl))
7856 && TREE_CODE (type) == RECORD_TYPE));
7860 /* We actually emit the code in sh_expand_prologue. We used to use
7861 a static variable to flag that we need to emit this code, but that
7862 doesn't when inlining, when functions are deferred and then emitted
7863 later. Fortunately, we already have two flags that are part of struct
7864 function that tell if a function uses varargs or stdarg. */
7866 sh_setup_incoming_varargs (CUMULATIVE_ARGS *ca,
7867 enum machine_mode mode,
7869 int *pretend_arg_size,
7870 int second_time ATTRIBUTE_UNUSED)
7872 gcc_assert (cfun->stdarg);
7873 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
7875 int named_parm_regs, anon_parm_regs;
7877 named_parm_regs = (ROUND_REG (*ca, mode)
7879 ? ROUND_ADVANCE (int_size_in_bytes (type))
7880 : ROUND_ADVANCE (GET_MODE_SIZE (mode))));
7881 anon_parm_regs = NPARM_REGS (SImode) - named_parm_regs;
7882 if (anon_parm_regs > 0)
7883 *pretend_arg_size = anon_parm_regs * 4;
7888 sh_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
7894 sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *ca)
7896 return ! (TARGET_HITACHI || ca->renesas_abi) && ! TARGET_SH5;
7900 /* Define the offset between two registers, one to be eliminated, and
7901 the other its replacement, at the start of a routine. */
7904 initial_elimination_offset (int from, int to)
7907 int regs_saved_rounding = 0;
7908 int total_saved_regs_space;
7909 int total_auto_space;
7910 int save_flags = target_flags;
7912 HARD_REG_SET live_regs_mask;
7914 shmedia_space_reserved_for_target_registers = false;
7915 regs_saved = calc_live_regs (&live_regs_mask);
7916 regs_saved += SHMEDIA_REGS_STACK_ADJUST ();
7918 if (shmedia_reserve_space_for_target_registers_p (regs_saved, &live_regs_mask))
7920 shmedia_space_reserved_for_target_registers = true;
7921 regs_saved += shmedia_target_regs_stack_adjust (&live_regs_mask);
7924 if (TARGET_SH5 && regs_saved % (STACK_BOUNDARY / BITS_PER_UNIT))
7925 regs_saved_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
7926 - regs_saved % (STACK_BOUNDARY / BITS_PER_UNIT));
7928 total_auto_space = rounded_frame_size (regs_saved) - regs_saved_rounding;
7929 copy_flags = target_flags;
7930 target_flags = save_flags;
7932 total_saved_regs_space = regs_saved + regs_saved_rounding;
7934 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7935 return total_saved_regs_space + total_auto_space
7936 + crtl->args.info.byref_regs * 8;
7938 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
7939 return total_saved_regs_space + total_auto_space
7940 + crtl->args.info.byref_regs * 8;
7942 /* Initial gap between fp and sp is 0. */
7943 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
7946 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
7947 return rounded_frame_size (0);
7949 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7950 return rounded_frame_size (0);
7952 gcc_assert (from == RETURN_ADDRESS_POINTER_REGNUM
7953 && (to == HARD_FRAME_POINTER_REGNUM
7954 || to == STACK_POINTER_REGNUM));
7957 int n = total_saved_regs_space;
7958 int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
7959 save_schedule schedule;
7962 n += total_auto_space;
7964 /* If it wasn't saved, there's not much we can do. */
7965 if (! TEST_HARD_REG_BIT (live_regs_mask, pr_reg))
7968 target_flags = copy_flags;
7970 sh5_schedule_saves (&live_regs_mask, &schedule, n);
7971 for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
7972 if (entry->reg == pr_reg)
7974 target_flags = save_flags;
7975 return entry->offset;
7980 return total_auto_space;
7983 /* Parse the -mfixed-range= option string. */
7985 sh_fix_range (const char *const_str)
7988 char *str, *dash, *comma;
7990 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
7991 REG2 are either register names or register numbers. The effect
7992 of this option is to mark the registers in the range from REG1 to
7993 REG2 as ``fixed'' so they won't be used by the compiler. */
7995 i = strlen (const_str);
7996 str = (char *) alloca (i + 1);
7997 memcpy (str, const_str, i + 1);
8001 dash = strchr (str, '-');
8004 warning (0, "value of -mfixed-range must have form REG1-REG2");
8008 comma = strchr (dash + 1, ',');
8012 first = decode_reg_name (str);
8015 warning (0, "unknown register name: %s", str);
8019 last = decode_reg_name (dash + 1);
8022 warning (0, "unknown register name: %s", dash + 1);
8030 warning (0, "%s-%s is an empty range", str, dash + 1);
8034 for (i = first; i <= last; ++i)
8035 fixed_regs[i] = call_used_regs[i] = 1;
8045 /* Insert any deferred function attributes from earlier pragmas. */
8047 sh_insert_attributes (tree node, tree *attributes)
8051 if (TREE_CODE (node) != FUNCTION_DECL)
8054 /* We are only interested in fields. */
8058 /* Append the attributes to the deferred attributes. */
8059 *sh_deferred_function_attributes_tail = *attributes;
8060 attrs = sh_deferred_function_attributes;
8064 /* Some attributes imply or require the interrupt attribute. */
8065 if (!lookup_attribute ("interrupt_handler", attrs)
8066 && !lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (node)))
8068 /* If we have a trapa_handler, but no interrupt_handler attribute,
8069 insert an interrupt_handler attribute. */
8070 if (lookup_attribute ("trapa_handler", attrs) != NULL_TREE)
8071 /* We can't use sh_pr_interrupt here because that's not in the
8074 = tree_cons (get_identifier("interrupt_handler"), NULL_TREE, attrs);
8075 /* However, for sp_switch, trap_exit, nosave_low_regs and resbank,
8076 if the interrupt attribute is missing, we ignore the attribute
8078 else if (lookup_attribute ("sp_switch", attrs)
8079 || lookup_attribute ("trap_exit", attrs)
8080 || lookup_attribute ("nosave_low_regs", attrs)
8081 || lookup_attribute ("resbank", attrs))
8085 for (tail = attributes; attrs; attrs = TREE_CHAIN (attrs))
8087 if (is_attribute_p ("sp_switch", TREE_PURPOSE (attrs))
8088 || is_attribute_p ("trap_exit", TREE_PURPOSE (attrs))
8089 || is_attribute_p ("nosave_low_regs", TREE_PURPOSE (attrs))
8090 || is_attribute_p ("resbank", TREE_PURPOSE (attrs)))
8091 warning (OPT_Wattributes,
8092 "%qs attribute only applies to interrupt functions",
8093 IDENTIFIER_POINTER (TREE_PURPOSE (attrs)));
8096 *tail = tree_cons (TREE_PURPOSE (attrs), NULL_TREE,
8098 tail = &TREE_CHAIN (*tail);
8101 attrs = *attributes;
8105 /* Install the processed list. */
8106 *attributes = attrs;
8108 /* Clear deferred attributes. */
8109 sh_deferred_function_attributes = NULL_TREE;
8110 sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
8115 /* Supported attributes:
8117 interrupt_handler -- specifies this function is an interrupt handler.
8119 trapa_handler - like above, but don't save all registers.
8121 sp_switch -- specifies an alternate stack for an interrupt handler
8124 trap_exit -- use a trapa to exit an interrupt function instead of
8127 nosave_low_regs - don't save r0..r7 in an interrupt handler.
8128 This is useful on the SH3 and upwards,
8129 which has a separate set of low regs for User and Supervisor modes.
8130 This should only be used for the lowest level of interrupts. Higher levels
8131 of interrupts must save the registers in case they themselves are
8134 renesas -- use Renesas calling/layout conventions (functions and
8137 resbank -- In case of an ISR, use a register bank to save registers
8138 R0-R14, MACH, MACL, GBR and PR. This is useful only on SH2A targets.
8141 const struct attribute_spec sh_attribute_table[] =
8143 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
8144 { "interrupt_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
8145 { "sp_switch", 1, 1, true, false, false, sh_handle_sp_switch_attribute },
8146 { "trap_exit", 1, 1, true, false, false, sh_handle_trap_exit_attribute },
8147 { "renesas", 0, 0, false, true, false, sh_handle_renesas_attribute },
8148 { "trapa_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
8149 { "nosave_low_regs", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
8150 { "resbank", 0, 0, true, false, false, sh_handle_resbank_handler_attribute },
8151 { "function_vector", 1, 1, true, false, false, sh2a_handle_function_vector_handler_attribute },
8153 /* Symbian support adds three new attributes:
8154 dllexport - for exporting a function/variable that will live in a dll
8155 dllimport - for importing a function/variable from a dll
8157 Microsoft allows multiple declspecs in one __declspec, separating
8158 them with spaces. We do NOT support this. Instead, use __declspec
8160 { "dllimport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
8161 { "dllexport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
8163 { NULL, 0, 0, false, false, false, NULL }
8166 /* Handle a 'resbank' attribute. */
8168 sh_handle_resbank_handler_attribute (tree * node, tree name,
8169 tree args ATTRIBUTE_UNUSED,
8170 int flags ATTRIBUTE_UNUSED,
8171 bool * no_add_attrs)
8175 warning (OPT_Wattributes, "%qs attribute is supported only for SH2A",
8176 IDENTIFIER_POINTER (name));
8177 *no_add_attrs = true;
8179 if (TREE_CODE (*node) != FUNCTION_DECL)
8181 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8182 IDENTIFIER_POINTER (name));
8183 *no_add_attrs = true;
8189 /* Handle an "interrupt_handler" attribute; arguments as in
8190 struct attribute_spec.handler. */
8192 sh_handle_interrupt_handler_attribute (tree *node, tree name,
8193 tree args ATTRIBUTE_UNUSED,
8194 int flags ATTRIBUTE_UNUSED,
8197 if (TREE_CODE (*node) != FUNCTION_DECL)
8199 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8200 IDENTIFIER_POINTER (name));
8201 *no_add_attrs = true;
8203 else if (TARGET_SHCOMPACT)
8205 error ("attribute interrupt_handler is not compatible with -m5-compact");
8206 *no_add_attrs = true;
8212 /* Handle an 'function_vector' attribute; arguments as in
8213 struct attribute_spec.handler. */
8215 sh2a_handle_function_vector_handler_attribute (tree * node, tree name,
8216 tree args ATTRIBUTE_UNUSED,
8217 int flags ATTRIBUTE_UNUSED,
8218 bool * no_add_attrs)
8222 warning (OPT_Wattributes, "%qs attribute only applies to SH2A",
8223 IDENTIFIER_POINTER (name));
8224 *no_add_attrs = true;
8226 else if (TREE_CODE (*node) != FUNCTION_DECL)
8228 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8229 IDENTIFIER_POINTER (name));
8230 *no_add_attrs = true;
8232 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
8234 /* The argument must be a constant integer. */
8235 warning (OPT_Wattributes,
8236 "`%s' attribute argument not an integer constant",
8237 IDENTIFIER_POINTER (name));
8238 *no_add_attrs = true;
8240 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
8242 /* The argument value must be between 0 to 255. */
8243 warning (OPT_Wattributes,
8244 "`%s' attribute argument should be between 0 to 255",
8245 IDENTIFIER_POINTER (name));
8246 *no_add_attrs = true;
8251 /* Returns 1 if current function has been assigned the attribute
8252 'function_vector'. */
8254 sh2a_is_function_vector_call (rtx x)
8256 if (GET_CODE (x) == SYMBOL_REF
8257 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
8259 tree tr = SYMBOL_REF_DECL (x);
8261 if (sh2a_function_vector_p (tr))
8268 /* Returns the function vector number, if the the attribute
8269 'function_vector' is assigned, otherwise returns zero. */
8271 sh2a_get_function_vector_number (rtx x)
8276 if ((GET_CODE (x) == SYMBOL_REF)
8277 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
8279 t = SYMBOL_REF_DECL (x);
8281 if (TREE_CODE (t) != FUNCTION_DECL)
8284 list = SH_ATTRIBUTES (t);
8287 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
8289 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
8293 list = TREE_CHAIN (list);
8302 /* Handle an "sp_switch" attribute; arguments as in
8303 struct attribute_spec.handler. */
8305 sh_handle_sp_switch_attribute (tree *node, tree name, tree args,
8306 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
8308 if (TREE_CODE (*node) != FUNCTION_DECL)
8310 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8311 IDENTIFIER_POINTER (name));
8312 *no_add_attrs = true;
8314 else if (TREE_CODE (TREE_VALUE (args)) != STRING_CST)
8316 /* The argument must be a constant string. */
8317 warning (OPT_Wattributes, "%qs attribute argument not a string constant",
8318 IDENTIFIER_POINTER (name));
8319 *no_add_attrs = true;
8325 /* Handle an "trap_exit" attribute; arguments as in
8326 struct attribute_spec.handler. */
8328 sh_handle_trap_exit_attribute (tree *node, tree name, tree args,
8329 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
8331 if (TREE_CODE (*node) != FUNCTION_DECL)
8333 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8334 IDENTIFIER_POINTER (name));
8335 *no_add_attrs = true;
8337 /* The argument specifies a trap number to be used in a trapa instruction
8338 at function exit (instead of an rte instruction). */
8339 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
8341 /* The argument must be a constant integer. */
8342 warning (OPT_Wattributes, "%qs attribute argument not an "
8343 "integer constant", IDENTIFIER_POINTER (name));
8344 *no_add_attrs = true;
8351 sh_handle_renesas_attribute (tree *node ATTRIBUTE_UNUSED,
8352 tree name ATTRIBUTE_UNUSED,
8353 tree args ATTRIBUTE_UNUSED,
8354 int flags ATTRIBUTE_UNUSED,
8355 bool *no_add_attrs ATTRIBUTE_UNUSED)
8360 /* True if __attribute__((renesas)) or -mrenesas. */
8362 sh_attr_renesas_p (const_tree td)
8369 td = TREE_TYPE (td);
8370 if (td == error_mark_node)
8372 return (lookup_attribute ("renesas", TYPE_ATTRIBUTES (td))
8376 /* True if __attribute__((renesas)) or -mrenesas, for the current
8379 sh_cfun_attr_renesas_p (void)
8381 return sh_attr_renesas_p (current_function_decl);
8385 sh_cfun_interrupt_handler_p (void)
8387 return (lookup_attribute ("interrupt_handler",
8388 DECL_ATTRIBUTES (current_function_decl))
8392 /* Returns 1 if FUNC has been assigned the attribute
8393 "function_vector". */
8395 sh2a_function_vector_p (tree func)
8398 if (TREE_CODE (func) != FUNCTION_DECL)
8401 list = SH_ATTRIBUTES (func);
8404 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
8407 list = TREE_CHAIN (list);
8412 /* Returns TRUE if given tree has the "resbank" attribute. */
8415 sh_cfun_resbank_handler_p (void)
8417 return ((lookup_attribute ("resbank",
8418 DECL_ATTRIBUTES (current_function_decl))
8420 && (lookup_attribute ("interrupt_handler",
8421 DECL_ATTRIBUTES (current_function_decl))
8422 != NULL_TREE) && TARGET_SH2A);
8425 /* Implement TARGET_CHECK_PCH_TARGET_FLAGS. */
8428 sh_check_pch_target_flags (int old_flags)
8430 if ((old_flags ^ target_flags) & (MASK_SH1 | MASK_SH2 | MASK_SH3
8431 | MASK_SH_E | MASK_HARD_SH4
8432 | MASK_FPU_SINGLE | MASK_SH4))
8433 return _("created and used with different architectures / ABIs");
8434 if ((old_flags ^ target_flags) & MASK_HITACHI)
8435 return _("created and used with different ABIs");
8436 if ((old_flags ^ target_flags) & MASK_LITTLE_ENDIAN)
8437 return _("created and used with different endianness");
8441 /* Predicates used by the templates. */
8443 /* Returns 1 if OP is MACL, MACH or PR. The input must be a REG rtx.
8444 Used only in general_movsrc_operand. */
8447 system_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8459 /* Nonzero if OP is a floating point value with value 0.0. */
8462 fp_zero_operand (rtx op)
8466 if (GET_MODE (op) != SFmode)
8469 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
8470 return REAL_VALUES_EQUAL (r, dconst0) && ! REAL_VALUE_MINUS_ZERO (r);
8473 /* Nonzero if OP is a floating point value with value 1.0. */
8476 fp_one_operand (rtx op)
8480 if (GET_MODE (op) != SFmode)
8483 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
8484 return REAL_VALUES_EQUAL (r, dconst1);
8487 /* For -m4 and -m4-single-only, mode switching is used. If we are
8488 compiling without -mfmovd, movsf_ie isn't taken into account for
8489 mode switching. We could check in machine_dependent_reorg for
8490 cases where we know we are in single precision mode, but there is
8491 interface to find that out during reload, so we must avoid
8492 choosing an fldi alternative during reload and thus failing to
8493 allocate a scratch register for the constant loading. */
8497 return ! TARGET_SH4 || TARGET_FMOVD || reload_completed;
8501 tertiary_reload_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8503 enum rtx_code code = GET_CODE (op);
8504 return code == MEM || (TARGET_SH4 && code == CONST_DOUBLE);
8507 /* Return the TLS type for TLS symbols, 0 for otherwise. */
8509 tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8511 if (GET_CODE (op) != SYMBOL_REF)
8513 return SYMBOL_REF_TLS_MODEL (op);
8516 /* Return the destination address of a branch. */
8519 branch_dest (rtx branch)
8521 rtx dest = SET_SRC (PATTERN (branch));
8524 if (GET_CODE (dest) == IF_THEN_ELSE)
8525 dest = XEXP (dest, 1);
8526 dest = XEXP (dest, 0);
8527 dest_uid = INSN_UID (dest);
8528 return INSN_ADDRESSES (dest_uid);
8531 /* Return nonzero if REG is not used after INSN.
8532 We assume REG is a reload reg, and therefore does
8533 not live past labels. It may live past calls or jumps though. */
8535 reg_unused_after (rtx reg, rtx insn)
8540 /* If the reg is set by this instruction, then it is safe for our
8541 case. Disregard the case where this is a store to memory, since
8542 we are checking a register used in the store address. */
8543 set = single_set (insn);
8544 if (set && GET_CODE (SET_DEST (set)) != MEM
8545 && reg_overlap_mentioned_p (reg, SET_DEST (set)))
8548 while ((insn = NEXT_INSN (insn)))
8554 code = GET_CODE (insn);
8557 /* If this is a label that existed before reload, then the register
8558 if dead here. However, if this is a label added by reorg, then
8559 the register may still be live here. We can't tell the difference,
8560 so we just ignore labels completely. */
8561 if (code == CODE_LABEL)
8566 if (code == JUMP_INSN)
8569 /* If this is a sequence, we must handle them all at once.
8570 We could have for instance a call that sets the target register,
8571 and an insn in a delay slot that uses the register. In this case,
8572 we must return 0. */
8573 else if (code == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
8578 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8580 rtx this_insn = XVECEXP (PATTERN (insn), 0, i);
8581 rtx set = single_set (this_insn);
8583 if (GET_CODE (this_insn) == CALL_INSN)
8585 else if (GET_CODE (this_insn) == JUMP_INSN)
8587 if (INSN_ANNULLED_BRANCH_P (this_insn))
8592 if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
8594 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
8596 if (GET_CODE (SET_DEST (set)) != MEM)
8602 && reg_overlap_mentioned_p (reg, PATTERN (this_insn)))
8607 else if (code == JUMP_INSN)
8611 set = single_set (insn);
8612 if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
8614 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
8615 return GET_CODE (SET_DEST (set)) != MEM;
8616 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
8619 if (code == CALL_INSN && call_really_used_regs[REGNO (reg)])
8627 static GTY(()) rtx fpscr_rtx;
8629 get_fpscr_rtx (void)
8633 fpscr_rtx = gen_rtx_REG (PSImode, FPSCR_REG);
8634 REG_USERVAR_P (fpscr_rtx) = 1;
8635 mark_user_reg (fpscr_rtx);
8637 if (! reload_completed || mdep_reorg_phase != SH_AFTER_MDEP_REORG)
8638 mark_user_reg (fpscr_rtx);
8642 static GTY(()) tree fpscr_values;
8645 emit_fpu_switch (rtx scratch, int index)
8649 if (fpscr_values == NULL)
8653 t = build_index_type (integer_one_node);
8654 t = build_array_type (integer_type_node, t);
8655 t = build_decl (VAR_DECL, get_identifier ("__fpscr_values"), t);
8656 DECL_ARTIFICIAL (t) = 1;
8657 DECL_IGNORED_P (t) = 1;
8658 DECL_EXTERNAL (t) = 1;
8659 TREE_STATIC (t) = 1;
8660 TREE_PUBLIC (t) = 1;
8666 src = DECL_RTL (fpscr_values);
8667 if (!can_create_pseudo_p ())
8669 emit_move_insn (scratch, XEXP (src, 0));
8671 emit_insn (gen_addsi3 (scratch, scratch, GEN_INT (index * 4)));
8672 src = adjust_automodify_address (src, PSImode, scratch, index * 4);
8675 src = adjust_address (src, PSImode, index * 4);
8677 dst = get_fpscr_rtx ();
8678 emit_move_insn (dst, src);
8682 emit_sf_insn (rtx pat)
8688 emit_df_insn (rtx pat)
8694 expand_sf_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
8696 emit_sf_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
8700 expand_sf_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
8702 emit_sf_insn ((*fun) (operands[0], operands[1], operands[2],
8707 expand_df_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
8709 emit_df_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
8713 expand_df_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
8715 emit_df_insn ((*fun) (operands[0], operands[1], operands[2],
8719 static rtx get_free_reg (HARD_REG_SET);
8721 /* This function returns a register to use to load the address to load
8722 the fpscr from. Currently it always returns r1 or r7, but when we are
8723 able to use pseudo registers after combine, or have a better mechanism
8724 for choosing a register, it should be done here. */
8725 /* REGS_LIVE is the liveness information for the point for which we
8726 need this allocation. In some bare-bones exit blocks, r1 is live at the
8727 start. We can even have all of r0..r3 being live:
8728 __complex__ long long f (double d) { if (d == 0) return 2; else return 3; }
8729 INSN before which new insns are placed with will clobber the register
8730 we return. If a basic block consists only of setting the return value
8731 register to a pseudo and using that register, the return value is not
8732 live before or after this block, yet we we'll insert our insns right in
8736 get_free_reg (HARD_REG_SET regs_live)
8738 if (! TEST_HARD_REG_BIT (regs_live, 1))
8739 return gen_rtx_REG (Pmode, 1);
8741 /* Hard reg 1 is live; since this is a SMALL_REGISTER_CLASSES target,
8742 there shouldn't be anything but a jump before the function end. */
8743 gcc_assert (!TEST_HARD_REG_BIT (regs_live, 7));
8744 return gen_rtx_REG (Pmode, 7);
8747 /* This function will set the fpscr from memory.
8748 MODE is the mode we are setting it to. */
8750 fpscr_set_from_mem (int mode, HARD_REG_SET regs_live)
8752 enum attr_fp_mode fp_mode = mode;
8753 enum attr_fp_mode norm_mode = ACTUAL_NORMAL_MODE (FP_MODE);
8756 addr_reg = !can_create_pseudo_p () ? get_free_reg (regs_live) : NULL_RTX;
8757 emit_fpu_switch (addr_reg, fp_mode == norm_mode);
8760 /* Is the given character a logical line separator for the assembler? */
8761 #ifndef IS_ASM_LOGICAL_LINE_SEPARATOR
8762 #define IS_ASM_LOGICAL_LINE_SEPARATOR(C, STR) ((C) == ';')
8766 sh_insn_length_adjustment (rtx insn)
8768 /* Instructions with unfilled delay slots take up an extra two bytes for
8769 the nop in the delay slot. */
8770 if (((GET_CODE (insn) == INSN
8771 && GET_CODE (PATTERN (insn)) != USE
8772 && GET_CODE (PATTERN (insn)) != CLOBBER)
8773 || GET_CODE (insn) == CALL_INSN
8774 || (GET_CODE (insn) == JUMP_INSN
8775 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
8776 && GET_CODE (PATTERN (insn)) != ADDR_VEC))
8777 && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE
8778 && get_attr_needs_delay_slot (insn) == NEEDS_DELAY_SLOT_YES)
8781 /* SH2e has a bug that prevents the use of annulled branches, so if
8782 the delay slot is not filled, we'll have to put a NOP in it. */
8783 if (sh_cpu == CPU_SH2E
8784 && GET_CODE (insn) == JUMP_INSN
8785 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
8786 && GET_CODE (PATTERN (insn)) != ADDR_VEC
8787 && get_attr_type (insn) == TYPE_CBRANCH
8788 && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE)
8791 /* sh-dsp parallel processing insn take four bytes instead of two. */
8793 if (GET_CODE (insn) == INSN)
8796 rtx body = PATTERN (insn);
8797 const char *template;
8799 int maybe_label = 1;
8801 if (GET_CODE (body) == ASM_INPUT)
8802 template = XSTR (body, 0);
8803 else if (asm_noperands (body) >= 0)
8805 = decode_asm_operands (body, NULL, NULL, NULL, NULL, NULL);
8814 while (c == ' ' || c == '\t');
8815 /* all sh-dsp parallel-processing insns start with p.
8816 The only non-ppi sh insn starting with p is pref.
8817 The only ppi starting with pr is prnd. */
8818 if ((c == 'p' || c == 'P') && strncasecmp ("re", template, 2))
8820 /* The repeat pseudo-insn expands two three insns, a total of
8821 six bytes in size. */
8822 else if ((c == 'r' || c == 'R')
8823 && ! strncasecmp ("epeat", template, 5))
8825 while (c && c != '\n'
8826 && ! IS_ASM_LOGICAL_LINE_SEPARATOR (c, template))
8828 /* If this is a label, it is obviously not a ppi insn. */
8829 if (c == ':' && maybe_label)
8834 else if (c == '\'' || c == '"')
8839 maybe_label = c != ':';
8847 /* Return TRUE if X references a SYMBOL_REF or LABEL_REF whose symbol
8848 isn't protected by a PIC unspec. */
8850 nonpic_symbol_mentioned_p (rtx x)
8852 register const char *fmt;
8855 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF
8856 || GET_CODE (x) == PC)
8859 /* We don't want to look into the possible MEM location of a
8860 CONST_DOUBLE, since we're not going to use it, in general. */
8861 if (GET_CODE (x) == CONST_DOUBLE)
8864 if (GET_CODE (x) == UNSPEC
8865 && (XINT (x, 1) == UNSPEC_PIC
8866 || XINT (x, 1) == UNSPEC_GOT
8867 || XINT (x, 1) == UNSPEC_GOTOFF
8868 || XINT (x, 1) == UNSPEC_GOTPLT
8869 || XINT (x, 1) == UNSPEC_GOTTPOFF
8870 || XINT (x, 1) == UNSPEC_DTPOFF
8871 || XINT (x, 1) == UNSPEC_PLT))
8874 fmt = GET_RTX_FORMAT (GET_CODE (x));
8875 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8881 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8882 if (nonpic_symbol_mentioned_p (XVECEXP (x, i, j)))
8885 else if (fmt[i] == 'e' && nonpic_symbol_mentioned_p (XEXP (x, i)))
8892 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
8893 @GOTOFF in `reg'. */
8895 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
8898 if (tls_symbolic_operand (orig, Pmode))
8901 if (GET_CODE (orig) == LABEL_REF
8902 || (GET_CODE (orig) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (orig)))
8905 reg = gen_reg_rtx (Pmode);
8907 emit_insn (gen_symGOTOFF2reg (reg, orig));
8910 else if (GET_CODE (orig) == SYMBOL_REF)
8913 reg = gen_reg_rtx (Pmode);
8915 emit_insn (gen_symGOT2reg (reg, orig));
8921 /* Mark the use of a constant in the literal table. If the constant
8922 has multiple labels, make it unique. */
8924 mark_constant_pool_use (rtx x)
8926 rtx insn, lab, pattern;
8931 switch (GET_CODE (x))
8941 /* Get the first label in the list of labels for the same constant
8942 and delete another labels in the list. */
8944 for (insn = PREV_INSN (x); insn; insn = PREV_INSN (insn))
8946 if (GET_CODE (insn) != CODE_LABEL
8947 || LABEL_REFS (insn) != NEXT_INSN (insn))
8952 for (insn = LABEL_REFS (lab); insn; insn = LABEL_REFS (insn))
8953 INSN_DELETED_P (insn) = 1;
8955 /* Mark constants in a window. */
8956 for (insn = NEXT_INSN (x); insn; insn = NEXT_INSN (insn))
8958 if (GET_CODE (insn) != INSN)
8961 pattern = PATTERN (insn);
8962 if (GET_CODE (pattern) != UNSPEC_VOLATILE)
8965 switch (XINT (pattern, 1))
8967 case UNSPECV_CONST2:
8968 case UNSPECV_CONST4:
8969 case UNSPECV_CONST8:
8970 XVECEXP (pattern, 0, 1) = const1_rtx;
8972 case UNSPECV_WINDOW_END:
8973 if (XVECEXP (pattern, 0, 0) == x)
8976 case UNSPECV_CONST_END:
8986 /* Return true if it's possible to redirect BRANCH1 to the destination
8987 of an unconditional jump BRANCH2. We only want to do this if the
8988 resulting branch will have a short displacement. */
8990 sh_can_redirect_branch (rtx branch1, rtx branch2)
8992 if (flag_expensive_optimizations && simplejump_p (branch2))
8994 rtx dest = XEXP (SET_SRC (single_set (branch2)), 0);
8998 for (distance = 0, insn = NEXT_INSN (branch1);
8999 insn && distance < 256;
9000 insn = PREV_INSN (insn))
9005 distance += get_attr_length (insn);
9007 for (distance = 0, insn = NEXT_INSN (branch1);
9008 insn && distance < 256;
9009 insn = NEXT_INSN (insn))
9014 distance += get_attr_length (insn);
9020 /* Return nonzero if register old_reg can be renamed to register new_reg. */
9022 sh_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
9023 unsigned int new_reg)
9025 /* Interrupt functions can only use registers that have already been
9026 saved by the prologue, even if they would normally be
9029 if (sh_cfun_interrupt_handler_p () && !df_regs_ever_live_p (new_reg))
9035 /* Function to update the integer COST
9036 based on the relationship between INSN that is dependent on
9037 DEP_INSN through the dependence LINK. The default is to make no
9038 adjustment to COST. This can be used for example to specify to
9039 the scheduler that an output- or anti-dependence does not incur
9040 the same cost as a data-dependence. The return value should be
9041 the new value for COST. */
9043 sh_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx dep_insn, int cost)
9049 /* On SHmedia, if the dependence is an anti-dependence or
9050 output-dependence, there is no cost. */
9051 if (REG_NOTE_KIND (link) != 0)
9053 /* However, dependencies between target register loads and
9054 uses of the register in a subsequent block that are separated
9055 by a conditional branch are not modelled - we have to do with
9056 the anti-dependency between the target register load and the
9057 conditional branch that ends the current block. */
9058 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
9059 && GET_CODE (PATTERN (dep_insn)) == SET
9060 && (get_attr_type (dep_insn) == TYPE_PT_MEDIA
9061 || get_attr_type (dep_insn) == TYPE_PTABS_MEDIA)
9062 && get_attr_type (insn) == TYPE_CBRANCH_MEDIA)
9064 int orig_cost = cost;
9065 rtx note = find_reg_note (insn, REG_BR_PROB, 0);
9066 rtx target = ((! note
9067 || INTVAL (XEXP (note, 0)) * 2 < REG_BR_PROB_BASE)
9068 ? insn : JUMP_LABEL (insn));
9069 /* On the likely path, the branch costs 1, on the unlikely path,
9073 target = next_active_insn (target);
9074 while (target && ! flow_dependent_p (target, dep_insn)
9076 /* If two branches are executed in immediate succession, with the
9077 first branch properly predicted, this causes a stall at the
9078 second branch, hence we won't need the target for the
9079 second branch for two cycles after the launch of the first
9081 if (cost > orig_cost - 2)
9082 cost = orig_cost - 2;
9088 else if (get_attr_is_mac_media (insn)
9089 && get_attr_is_mac_media (dep_insn))
9092 else if (! reload_completed
9093 && GET_CODE (PATTERN (insn)) == SET
9094 && GET_CODE (SET_SRC (PATTERN (insn))) == FLOAT
9095 && GET_CODE (PATTERN (dep_insn)) == SET
9096 && fp_arith_reg_operand (SET_SRC (PATTERN (dep_insn)), VOIDmode)
9099 /* Schedule the ptabs for a casesi_jump_media in preference to stuff
9100 that is needed at the target. */
9101 else if (get_attr_type (insn) == TYPE_JUMP_MEDIA
9102 && ! flow_dependent_p (insn, dep_insn))
9105 else if (REG_NOTE_KIND (link) == 0)
9107 enum attr_type type;
9110 if (recog_memoized (insn) < 0
9111 || recog_memoized (dep_insn) < 0)
9114 dep_set = single_set (dep_insn);
9116 /* The latency that we specify in the scheduling description refers
9117 to the actual output, not to an auto-increment register; for that,
9118 the latency is one. */
9119 if (dep_set && MEM_P (SET_SRC (dep_set)) && cost > 1)
9121 rtx set = single_set (insn);
9124 && !reg_mentioned_p (SET_DEST (dep_set), SET_SRC (set))
9125 && (!MEM_P (SET_DEST (set))
9126 || !reg_mentioned_p (SET_DEST (dep_set),
9127 XEXP (SET_DEST (set), 0))))
9130 /* The only input for a call that is timing-critical is the
9131 function's address. */
9132 if (GET_CODE (insn) == CALL_INSN)
9134 rtx call = PATTERN (insn);
9136 if (GET_CODE (call) == PARALLEL)
9137 call = XVECEXP (call, 0 ,0);
9138 if (GET_CODE (call) == SET)
9139 call = SET_SRC (call);
9140 if (GET_CODE (call) == CALL && GET_CODE (XEXP (call, 0)) == MEM
9141 /* sibcalli_thunk uses a symbol_ref in an unspec. */
9142 && (GET_CODE (XEXP (XEXP (call, 0), 0)) == UNSPEC
9143 || ! reg_set_p (XEXP (XEXP (call, 0), 0), dep_insn)))
9144 cost -= TARGET_SH4_300 ? 3 : 6;
9146 /* Likewise, the most timing critical input for an sfuncs call
9147 is the function address. However, sfuncs typically start
9148 using their arguments pretty quickly.
9149 Assume a four cycle delay for SH4 before they are needed.
9150 Cached ST40-300 calls are quicker, so assume only a one
9152 ??? Maybe we should encode the delays till input registers
9153 are needed by sfuncs into the sfunc call insn. */
9154 /* All sfunc calls are parallels with at least four components.
9155 Exploit this to avoid unnecessary calls to sfunc_uses_reg. */
9156 else if (GET_CODE (PATTERN (insn)) == PARALLEL
9157 && XVECLEN (PATTERN (insn), 0) >= 4
9158 && (reg = sfunc_uses_reg (insn)))
9160 if (! reg_set_p (reg, dep_insn))
9161 cost -= TARGET_SH4_300 ? 1 : 4;
9163 if (TARGET_HARD_SH4 && !TARGET_SH4_300)
9165 enum attr_type dep_type = get_attr_type (dep_insn);
9167 if (dep_type == TYPE_FLOAD || dep_type == TYPE_PCFLOAD)
9169 else if ((dep_type == TYPE_LOAD_SI || dep_type == TYPE_PCLOAD_SI)
9170 && (type = get_attr_type (insn)) != TYPE_CALL
9171 && type != TYPE_SFUNC)
9173 /* When the preceding instruction loads the shift amount of
9174 the following SHAD/SHLD, the latency of the load is increased
9176 if (get_attr_type (insn) == TYPE_DYN_SHIFT
9177 && get_attr_any_int_load (dep_insn) == ANY_INT_LOAD_YES
9178 && reg_overlap_mentioned_p (SET_DEST (dep_set),
9179 XEXP (SET_SRC (single_set (insn)),
9182 /* When an LS group instruction with a latency of less than
9183 3 cycles is followed by a double-precision floating-point
9184 instruction, FIPR, or FTRV, the latency of the first
9185 instruction is increased to 3 cycles. */
9187 && get_attr_insn_class (dep_insn) == INSN_CLASS_LS_GROUP
9188 && get_attr_dfp_comp (insn) == DFP_COMP_YES)
9190 /* The lsw register of a double-precision computation is ready one
9192 else if (reload_completed
9193 && get_attr_dfp_comp (dep_insn) == DFP_COMP_YES
9194 && (use_pat = single_set (insn))
9195 && ! regno_use_in (REGNO (SET_DEST (single_set (dep_insn))),
9199 if (get_attr_any_fp_comp (dep_insn) == ANY_FP_COMP_YES
9200 && get_attr_late_fp_use (insn) == LATE_FP_USE_YES)
9203 else if (TARGET_SH4_300)
9205 /* Stores need their input register two cycles later. */
9206 if (dep_set && cost >= 1
9207 && ((type = get_attr_type (insn)) == TYPE_STORE
9208 || type == TYPE_PSTORE
9209 || type == TYPE_FSTORE || type == TYPE_MAC_MEM))
9211 rtx set = single_set (insn);
9213 if (!reg_mentioned_p (SET_SRC (set), XEXP (SET_DEST (set), 0))
9214 && rtx_equal_p (SET_SRC (set), SET_DEST (dep_set)))
9217 /* But don't reduce the cost below 1 if the address depends
9218 on a side effect of dep_insn. */
9220 && modified_in_p (XEXP (SET_DEST (set), 0), dep_insn))
9226 /* An anti-dependence penalty of two applies if the first insn is a double
9227 precision fadd / fsub / fmul. */
9228 else if (!TARGET_SH4_300
9229 && REG_NOTE_KIND (link) == REG_DEP_ANTI
9230 && recog_memoized (dep_insn) >= 0
9231 && (get_attr_type (dep_insn) == TYPE_DFP_ARITH
9232 || get_attr_type (dep_insn) == TYPE_DFP_MUL)
9233 /* A lot of alleged anti-flow dependences are fake,
9234 so check this one is real. */
9235 && flow_dependent_p (dep_insn, insn))
9241 /* Check if INSN is flow-dependent on DEP_INSN. Can also be used to check
9242 if DEP_INSN is anti-flow dependent on INSN. */
9244 flow_dependent_p (rtx insn, rtx dep_insn)
9246 rtx tmp = PATTERN (insn);
9248 note_stores (PATTERN (dep_insn), flow_dependent_p_1, &tmp);
9249 return tmp == NULL_RTX;
9252 /* A helper function for flow_dependent_p called through note_stores. */
9254 flow_dependent_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
9256 rtx * pinsn = (rtx *) data;
9258 if (*pinsn && reg_referenced_p (x, *pinsn))
9262 /* For use by sh_allocate_initial_value. Note that sh.md contains some
9263 'special function' patterns (type sfunc) that clobber pr, but that
9264 do not look like function calls to leaf_function_p. Hence we must
9265 do this extra check. */
9269 return DF_REG_DEF_COUNT (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
9272 /* Return where to allocate pseudo for a given hard register initial
9275 sh_allocate_initial_value (rtx hard_reg)
9279 if (REGNO (hard_reg) == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG))
9281 if (current_function_is_leaf
9282 && ! sh_pr_n_sets ()
9283 && ! (TARGET_SHCOMPACT
9284 && ((crtl->args.info.call_cookie
9285 & ~ CALL_COOKIE_RET_TRAMP (1))
9286 || crtl->saves_all_registers)))
9289 x = gen_frame_mem (Pmode, return_address_pointer_rtx);
9297 /* This function returns "2" to indicate dual issue for the SH4
9298 processor. To be used by the DFA pipeline description. */
9300 sh_issue_rate (void)
9302 if (TARGET_SUPERSCALAR)
9308 /* Functions for ready queue reordering for sched1. */
9310 /* Get weight for mode for a set x. */
9312 find_set_regmode_weight (rtx x, enum machine_mode mode)
9314 if (GET_CODE (x) == CLOBBER && register_operand (SET_DEST (x), mode))
9316 if (GET_CODE (x) == SET && register_operand (SET_DEST (x), mode))
9318 if (GET_CODE (SET_DEST (x)) == REG)
9320 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
9330 /* Get regmode weight for insn. */
9332 find_insn_regmode_weight (rtx insn, enum machine_mode mode)
9334 short reg_weight = 0;
9337 /* Increment weight for each register born here. */
9339 reg_weight += find_set_regmode_weight (x, mode);
9340 if (GET_CODE (x) == PARALLEL)
9343 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
9345 x = XVECEXP (PATTERN (insn), 0, j);
9346 reg_weight += find_set_regmode_weight (x, mode);
9349 /* Decrement weight for each register that dies here. */
9350 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
9352 if (REG_NOTE_KIND (x) == REG_DEAD || REG_NOTE_KIND (x) == REG_UNUSED)
9354 rtx note = XEXP (x, 0);
9355 if (GET_CODE (note) == REG && GET_MODE (note) == mode)
9362 /* Calculate regmode weights for all insns of a basic block. */
9364 find_regmode_weight (basic_block b, enum machine_mode mode)
9366 rtx insn, next_tail, head, tail;
9368 get_ebb_head_tail (b, b, &head, &tail);
9369 next_tail = NEXT_INSN (tail);
9371 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
9373 /* Handle register life information. */
9378 INSN_REGMODE_WEIGHT (insn, mode) =
9379 find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DFmode);
9380 else if (mode == SImode)
9381 INSN_REGMODE_WEIGHT (insn, mode) =
9382 find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DImode);
9386 /* Comparison function for ready queue sorting. */
9388 rank_for_reorder (const void *x, const void *y)
9390 rtx tmp = *(const rtx *) y;
9391 rtx tmp2 = *(const rtx *) x;
9393 /* The insn in a schedule group should be issued the first. */
9394 if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
9395 return SCHED_GROUP_P (tmp2) ? 1 : -1;
9397 /* If insns are equally good, sort by INSN_LUID (original insn order), This
9398 minimizes instruction movement, thus minimizing sched's effect on
9399 register pressure. */
9400 return INSN_LUID (tmp) - INSN_LUID (tmp2);
9403 /* Resort the array A in which only element at index N may be out of order. */
9405 swap_reorder (rtx *a, int n)
9407 rtx insn = a[n - 1];
9410 while (i >= 0 && rank_for_reorder (a + i, &insn) >= 0)
9418 #define SCHED_REORDER(READY, N_READY) \
9421 if ((N_READY) == 2) \
9422 swap_reorder (READY, N_READY); \
9423 else if ((N_READY) > 2) \
9424 qsort (READY, N_READY, sizeof (rtx), rank_for_reorder); \
9428 /* Sort the ready list READY by ascending priority, using the SCHED_REORDER
9431 ready_reorder (rtx *ready, int nready)
9433 SCHED_REORDER (ready, nready);
9436 /* Count life regions of r0 for a block. */
9438 find_r0_life_regions (basic_block b)
9447 if (REGNO_REG_SET_P (df_get_live_in (b), R0_REG))
9460 r0_reg = gen_rtx_REG (SImode, R0_REG);
9465 if (find_regno_note (insn, REG_DEAD, R0_REG))
9471 && (pset = single_set (insn))
9472 && reg_overlap_mentioned_p (r0_reg, SET_DEST (pset))
9473 && !find_regno_note (insn, REG_UNUSED, R0_REG))
9481 insn = NEXT_INSN (insn);
9486 /* Calculate regmode weights for all insns of all basic block. */
9488 sh_md_init_global (FILE *dump ATTRIBUTE_UNUSED,
9489 int verbose ATTRIBUTE_UNUSED,
9494 regmode_weight[0] = (short *) xcalloc (old_max_uid, sizeof (short));
9495 regmode_weight[1] = (short *) xcalloc (old_max_uid, sizeof (short));
9496 r0_life_regions = 0;
9498 FOR_EACH_BB_REVERSE (b)
9500 find_regmode_weight (b, SImode);
9501 find_regmode_weight (b, SFmode);
9502 if (!reload_completed)
9503 r0_life_regions += find_r0_life_regions (b);
9506 CURR_REGMODE_PRESSURE (SImode) = 0;
9507 CURR_REGMODE_PRESSURE (SFmode) = 0;
9513 sh_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
9514 int verbose ATTRIBUTE_UNUSED)
9516 if (regmode_weight[0])
9518 free (regmode_weight[0]);
9519 regmode_weight[0] = NULL;
9521 if (regmode_weight[1])
9523 free (regmode_weight[1]);
9524 regmode_weight[1] = NULL;
9528 /* The scalar modes supported differs from the default version in TImode
9529 for 32-bit SHMEDIA. */
9531 sh_scalar_mode_supported_p (enum machine_mode mode)
9533 if (TARGET_SHMEDIA32 && mode == TImode)
9536 return default_scalar_mode_supported_p (mode);
9539 /* Cache the can_issue_more so that we can return it from reorder2. Also,
9540 keep count of register pressures on SImode and SFmode. */
9542 sh_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
9543 int sched_verbose ATTRIBUTE_UNUSED,
9547 if (GET_CODE (PATTERN (insn)) != USE
9548 && GET_CODE (PATTERN (insn)) != CLOBBER)
9549 cached_can_issue_more = can_issue_more - 1;
9551 cached_can_issue_more = can_issue_more;
9553 if (reload_completed)
9554 return cached_can_issue_more;
9556 CURR_REGMODE_PRESSURE (SImode) += INSN_REGMODE_WEIGHT (insn, SImode);
9557 CURR_REGMODE_PRESSURE (SFmode) += INSN_REGMODE_WEIGHT (insn, SFmode);
9559 return cached_can_issue_more;
9563 sh_md_init (FILE *dump ATTRIBUTE_UNUSED,
9564 int verbose ATTRIBUTE_UNUSED,
9565 int veclen ATTRIBUTE_UNUSED)
9567 CURR_REGMODE_PRESSURE (SImode) = 0;
9568 CURR_REGMODE_PRESSURE (SFmode) = 0;
9571 /* Some magic numbers. */
9572 /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
9573 functions that already have high pressure on r0. */
9574 #define R0_MAX_LIFE_REGIONS 2
9575 /* Register Pressure thresholds for SImode and SFmode registers. */
9576 #define SIMODE_MAX_WEIGHT 5
9577 #define SFMODE_MAX_WEIGHT 10
9579 /* Return true if the pressure is high for MODE. */
9581 high_pressure (enum machine_mode mode)
9583 /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
9584 functions that already have high pressure on r0. */
9585 if (r0_life_regions >= R0_MAX_LIFE_REGIONS)
9589 return (CURR_REGMODE_PRESSURE (SFmode) > SFMODE_MAX_WEIGHT);
9591 return (CURR_REGMODE_PRESSURE (SImode) > SIMODE_MAX_WEIGHT);
9594 /* Reorder ready queue if register pressure is high. */
9596 sh_reorder (FILE *dump ATTRIBUTE_UNUSED,
9597 int sched_verbose ATTRIBUTE_UNUSED,
9600 int clock_var ATTRIBUTE_UNUSED)
9602 if (reload_completed)
9603 return sh_issue_rate ();
9605 if (high_pressure (SFmode) || high_pressure (SImode))
9607 ready_reorder (ready, *n_readyp);
9610 return sh_issue_rate ();
9613 /* Skip cycles if the current register pressure is high. */
9615 sh_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
9616 int sched_verbose ATTRIBUTE_UNUSED,
9617 rtx *ready ATTRIBUTE_UNUSED,
9618 int *n_readyp ATTRIBUTE_UNUSED,
9619 int clock_var ATTRIBUTE_UNUSED)
9621 if (reload_completed)
9622 return cached_can_issue_more;
9624 if (high_pressure(SFmode) || high_pressure (SImode))
9627 return cached_can_issue_more;
9630 /* Skip cycles without sorting the ready queue. This will move insn from
9631 Q->R. If this is the last cycle we are skipping; allow sorting of ready
9632 queue by sh_reorder. */
9634 /* Generally, skipping these many cycles are sufficient for all insns to move
9639 sh_dfa_new_cycle (FILE *sched_dump ATTRIBUTE_UNUSED,
9640 int sched_verbose ATTRIBUTE_UNUSED,
9641 rtx insn ATTRIBUTE_UNUSED,
9646 if (reload_completed)
9651 if ((clock_var - last_clock_var) < MAX_SKIPS)
9656 /* If this is the last cycle we are skipping, allow reordering of R. */
9657 if ((clock_var - last_clock_var) == MAX_SKIPS)
9669 /* SHmedia requires registers for branches, so we can't generate new
9670 branches past reload. */
9672 sh_cannot_modify_jumps_p (void)
9674 return (TARGET_SHMEDIA && (reload_in_progress || reload_completed));
9678 sh_target_reg_class (void)
9680 return TARGET_SHMEDIA ? TARGET_REGS : NO_REGS;
9684 sh_optimize_target_register_callee_saved (bool after_prologue_epilogue_gen)
9691 if (! shmedia_space_reserved_for_target_registers)
9693 if (after_prologue_epilogue_gen && ! TARGET_SAVE_ALL_TARGET_REGS)
9695 if (calc_live_regs (&dummy) >= 6 * 8)
9701 sh_ms_bitfield_layout_p (const_tree record_type ATTRIBUTE_UNUSED)
9703 return (TARGET_SH5 || TARGET_HITACHI || sh_attr_renesas_p (record_type));
9707 On the SH1..SH4, the trampoline looks like
9708 2 0002 D202 mov.l l2,r2
9709 1 0000 D301 mov.l l1,r3
9712 5 0008 00000000 l1: .long area
9713 6 000c 00000000 l2: .long function
9715 SH5 (compact) uses r1 instead of r3 for the static chain. */
9718 /* Emit RTL insns to initialize the variable parts of a trampoline.
9719 FNADDR is an RTX for the address of the function's pure code.
9720 CXT is an RTX for the static chain value for the function. */
9723 sh_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
9725 rtx tramp_mem = gen_frame_mem (BLKmode, tramp);
9727 if (TARGET_SHMEDIA64)
9732 rtx movi1 = GEN_INT (0xcc000010);
9733 rtx shori1 = GEN_INT (0xc8000010);
9736 /* The following trampoline works within a +- 128 KB range for cxt:
9737 ptb/u cxt,tr1; movi fnaddr >> 48,r0; shori fnaddr >> 32,r0;
9738 shori fnaddr >> 16,r0; shori fnaddr,r0; ptabs/l r0,tr0
9739 gettr tr1,r1; blink tr0,r63 */
9740 /* Address rounding makes it hard to compute the exact bounds of the
9741 offset for this trampoline, but we have a rather generous offset
9742 range, so frame_offset should do fine as an upper bound. */
9743 if (cxt == virtual_stack_vars_rtx && frame_offset < 0x20000)
9745 /* ??? could optimize this trampoline initialization
9746 by writing DImode words with two insns each. */
9747 rtx mask = force_reg (DImode, GEN_INT (0x3fffc00));
9748 rtx insn = gen_rtx_MINUS (DImode, cxt, tramp);
9749 insn = gen_rtx_ASHIFT (DImode, insn, GEN_INT (10-2));
9750 insn = gen_rtx_AND (DImode, insn, mask);
9751 /* Or in ptb/u .,tr1 pattern */
9752 insn = gen_rtx_IOR (DImode, insn, gen_int_mode (0xec000010, SImode));
9753 insn = force_operand (insn, NULL_RTX);
9754 insn = gen_lowpart (SImode, insn);
9755 emit_move_insn (change_address (tramp_mem, SImode, NULL_RTX), insn);
9756 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (38));
9757 insn = gen_rtx_AND (DImode, insn, mask);
9758 insn = force_operand (gen_rtx_IOR (DImode, movi1, insn), NULL_RTX);
9759 insn = gen_lowpart (SImode, insn);
9760 emit_move_insn (adjust_address (tramp_mem, SImode, 4), insn);
9761 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (22));
9762 insn = gen_rtx_AND (DImode, insn, mask);
9763 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
9764 insn = gen_lowpart (SImode, insn);
9765 emit_move_insn (adjust_address (tramp_mem, SImode, 8), insn);
9766 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (6));
9767 insn = gen_rtx_AND (DImode, insn, mask);
9768 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
9769 insn = gen_lowpart (SImode, insn);
9770 emit_move_insn (adjust_address (tramp_mem, SImode, 12), insn);
9771 insn = gen_rtx_ASHIFT (DImode, fnaddr, GEN_INT (10));
9772 insn = gen_rtx_AND (DImode, insn, mask);
9773 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
9774 insn = gen_lowpart (SImode, insn);
9775 emit_move_insn (adjust_address (tramp_mem, SImode, 16), insn);
9776 emit_move_insn (adjust_address (tramp_mem, SImode, 20),
9777 GEN_INT (0x6bf10600));
9778 emit_move_insn (adjust_address (tramp_mem, SImode, 24),
9779 GEN_INT (0x4415fc10));
9780 emit_move_insn (adjust_address (tramp_mem, SImode, 28),
9781 GEN_INT (0x4401fff0));
9782 emit_insn (gen_ic_invalidate_line (tramp));
9785 tramp_templ = gen_rtx_SYMBOL_REF (Pmode,"__GCC_nested_trampoline");
9786 fixed_len = TRAMPOLINE_SIZE - 2 * GET_MODE_SIZE (Pmode);
9788 tramp_templ = gen_datalabel_ref (tramp_templ);
9790 src = gen_const_mem (BLKmode, tramp_templ);
9791 set_mem_align (dst, 256);
9792 set_mem_align (src, 64);
9793 emit_block_move (dst, src, GEN_INT (fixed_len), BLOCK_OP_NORMAL);
9795 emit_move_insn (adjust_address (tramp_mem, Pmode, fixed_len), fnaddr);
9796 emit_move_insn (adjust_address (tramp_mem, Pmode,
9797 fixed_len + GET_MODE_SIZE (Pmode)),
9799 emit_insn (gen_ic_invalidate_line (tramp));
9802 else if (TARGET_SHMEDIA)
9804 /* movi fnaddr >> 16,r1; shori fnaddr,r1; ptabs/l r1,tr0
9805 movi cxt >> 16,r1; shori cxt,r1; blink tr0,r63 */
9806 rtx quad0 = gen_reg_rtx (DImode), cxtload = gen_reg_rtx (DImode);
9807 rtx quad1 = gen_reg_rtx (DImode), quad2 = gen_reg_rtx (DImode);
9808 /* movi 0,r1: 0xcc000010 shori 0,r1: c8000010 concatenated,
9809 rotated 10 right, and higher 16 bit of every 32 selected. */
9811 = force_reg (V2HImode, (simplify_gen_subreg
9812 (V2HImode, GEN_INT (0x4330432), SImode, 0)));
9813 rtx ptabs = force_reg (DImode, GEN_INT (0x6bf10600));
9814 rtx blink = force_reg (DImode, GEN_INT (0x4401fff0));
9816 tramp = force_reg (Pmode, tramp);
9817 fnaddr = force_reg (SImode, fnaddr);
9818 cxt = force_reg (SImode, cxt);
9819 emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, quad0, 0),
9820 gen_rtx_SUBREG (V2HImode, fnaddr, 0),
9822 emit_insn (gen_rotrdi3_mextr (quad0, quad0,
9823 GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
9824 emit_insn (gen_ashldi3_media (quad0, quad0, const2_rtx));
9825 emit_move_insn (change_address (tramp_mem, DImode, NULL_RTX), quad0);
9826 emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, cxtload, 0),
9827 gen_rtx_SUBREG (V2HImode, cxt, 0),
9829 emit_insn (gen_rotrdi3_mextr (cxtload, cxtload,
9830 GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
9831 emit_insn (gen_ashldi3_media (cxtload, cxtload, const2_rtx));
9832 if (TARGET_LITTLE_ENDIAN)
9834 emit_insn (gen_mshflo_l_di (quad1, ptabs, cxtload));
9835 emit_insn (gen_mextr4 (quad2, cxtload, blink));
9839 emit_insn (gen_mextr4 (quad1, cxtload, ptabs));
9840 emit_insn (gen_mshflo_l_di (quad2, blink, cxtload));
9842 emit_move_insn (adjust_address (tramp_mem, DImode, 8), quad1);
9843 emit_move_insn (adjust_address (tramp_mem, DImode, 16), quad2);
9844 emit_insn (gen_ic_invalidate_line (tramp));
9847 else if (TARGET_SHCOMPACT)
9849 emit_insn (gen_initialize_trampoline (tramp, cxt, fnaddr));
9852 emit_move_insn (change_address (tramp_mem, SImode, NULL_RTX),
9853 gen_int_mode (TARGET_LITTLE_ENDIAN ? 0xd301d202 : 0xd202d301,
9855 emit_move_insn (adjust_address (tramp_mem, SImode, 4),
9856 gen_int_mode (TARGET_LITTLE_ENDIAN ? 0x0009422b : 0x422b0009,
9858 emit_move_insn (adjust_address (tramp_mem, SImode, 8), cxt);
9859 emit_move_insn (adjust_address (tramp_mem, SImode, 12), fnaddr);
9862 if (!TARGET_INLINE_IC_INVALIDATE
9863 || (!(TARGET_SH4A_ARCH || TARGET_SH4_300) && TARGET_USERMODE))
9864 emit_library_call (function_symbol (NULL, "__ic_invalidate",
9866 0, VOIDmode, 1, tramp, SImode);
9868 emit_insn (gen_ic_invalidate_line (tramp));
9872 /* FIXME: This is overly conservative. A SHcompact function that
9873 receives arguments ``by reference'' will have them stored in its
9874 own stack frame, so it must not pass pointers or references to
9875 these arguments to other functions by means of sibling calls. */
9876 /* If PIC, we cannot make sibling calls to global functions
9877 because the PLT requires r12 to be live. */
9879 sh_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
9882 && (! TARGET_SHCOMPACT
9883 || crtl->args.info.stack_regs == 0)
9884 && ! sh_cfun_interrupt_handler_p ()
9886 || (decl && ! TREE_PUBLIC (decl))
9887 || (decl && DECL_VISIBILITY (decl) != VISIBILITY_DEFAULT)));
9890 /* Machine specific built-in functions. */
9892 struct builtin_description
9894 const enum insn_code icode;
9895 const char *const name;
9899 /* describe number and signedness of arguments; arg[0] == result
9900 (1: unsigned, 2: signed, 4: don't care, 8: pointer 0: no argument */
9901 /* 9: 64-bit pointer, 10: 32-bit pointer */
9902 static const char signature_args[][4] =
9904 #define SH_BLTIN_V2SI2 0
9906 #define SH_BLTIN_V4HI2 1
9908 #define SH_BLTIN_V2SI3 2
9910 #define SH_BLTIN_V4HI3 3
9912 #define SH_BLTIN_V8QI3 4
9914 #define SH_BLTIN_MAC_HISI 5
9916 #define SH_BLTIN_SH_HI 6
9918 #define SH_BLTIN_SH_SI 7
9920 #define SH_BLTIN_V4HI2V2SI 8
9922 #define SH_BLTIN_V4HI2V8QI 9
9924 #define SH_BLTIN_SISF 10
9926 #define SH_BLTIN_LDUA_L 11
9928 #define SH_BLTIN_LDUA_Q 12
9930 #define SH_BLTIN_STUA_L 13
9932 #define SH_BLTIN_STUA_Q 14
9934 #define SH_BLTIN_LDUA_L64 15
9936 #define SH_BLTIN_LDUA_Q64 16
9938 #define SH_BLTIN_STUA_L64 17
9940 #define SH_BLTIN_STUA_Q64 18
9942 #define SH_BLTIN_NUM_SHARED_SIGNATURES 19
9943 #define SH_BLTIN_2 19
9944 #define SH_BLTIN_SU 19
9946 #define SH_BLTIN_3 20
9947 #define SH_BLTIN_SUS 20
9949 #define SH_BLTIN_PSSV 21
9951 #define SH_BLTIN_XXUU 22
9952 #define SH_BLTIN_UUUU 22
9954 #define SH_BLTIN_PV 23
9957 /* mcmv: operands considered unsigned. */
9958 /* mmulsum_wq, msad_ubq: result considered unsigned long long. */
9959 /* mperm: control value considered unsigned int. */
9960 /* mshalds, mshard, mshards, mshlld, mshlrd: shift count is unsigned int. */
9961 /* mshards_q: returns signed short. */
9962 /* nsb: takes long long arg, returns unsigned char. */
9963 static const struct builtin_description bdesc[] =
9965 { CODE_FOR_absv2si2, "__builtin_absv2si2", SH_BLTIN_V2SI2 },
9966 { CODE_FOR_absv4hi2, "__builtin_absv4hi2", SH_BLTIN_V4HI2 },
9967 { CODE_FOR_addv2si3, "__builtin_addv2si3", SH_BLTIN_V2SI3 },
9968 { CODE_FOR_addv4hi3, "__builtin_addv4hi3", SH_BLTIN_V4HI3 },
9969 { CODE_FOR_ssaddv2si3,"__builtin_ssaddv2si3", SH_BLTIN_V2SI3 },
9970 { CODE_FOR_usaddv8qi3,"__builtin_usaddv8qi3", SH_BLTIN_V8QI3 },
9971 { CODE_FOR_ssaddv4hi3,"__builtin_ssaddv4hi3", SH_BLTIN_V4HI3 },
9972 { CODE_FOR_alloco_i, "__builtin_sh_media_ALLOCO", SH_BLTIN_PV },
9973 { CODE_FOR_negcmpeqv8qi,"__builtin_sh_media_MCMPEQ_B", SH_BLTIN_V8QI3 },
9974 { CODE_FOR_negcmpeqv2si,"__builtin_sh_media_MCMPEQ_L", SH_BLTIN_V2SI3 },
9975 { CODE_FOR_negcmpeqv4hi,"__builtin_sh_media_MCMPEQ_W", SH_BLTIN_V4HI3 },
9976 { CODE_FOR_negcmpgtuv8qi,"__builtin_sh_media_MCMPGT_UB", SH_BLTIN_V8QI3 },
9977 { CODE_FOR_negcmpgtv2si,"__builtin_sh_media_MCMPGT_L", SH_BLTIN_V2SI3 },
9978 { CODE_FOR_negcmpgtv4hi,"__builtin_sh_media_MCMPGT_W", SH_BLTIN_V4HI3 },
9979 { CODE_FOR_mcmv, "__builtin_sh_media_MCMV", SH_BLTIN_UUUU },
9980 { CODE_FOR_mcnvs_lw, "__builtin_sh_media_MCNVS_LW", SH_BLTIN_3 },
9981 { CODE_FOR_mcnvs_wb, "__builtin_sh_media_MCNVS_WB", SH_BLTIN_V4HI2V8QI },
9982 { CODE_FOR_mcnvs_wub, "__builtin_sh_media_MCNVS_WUB", SH_BLTIN_V4HI2V8QI },
9983 { CODE_FOR_mextr1, "__builtin_sh_media_MEXTR1", SH_BLTIN_V8QI3 },
9984 { CODE_FOR_mextr2, "__builtin_sh_media_MEXTR2", SH_BLTIN_V8QI3 },
9985 { CODE_FOR_mextr3, "__builtin_sh_media_MEXTR3", SH_BLTIN_V8QI3 },
9986 { CODE_FOR_mextr4, "__builtin_sh_media_MEXTR4", SH_BLTIN_V8QI3 },
9987 { CODE_FOR_mextr5, "__builtin_sh_media_MEXTR5", SH_BLTIN_V8QI3 },
9988 { CODE_FOR_mextr6, "__builtin_sh_media_MEXTR6", SH_BLTIN_V8QI3 },
9989 { CODE_FOR_mextr7, "__builtin_sh_media_MEXTR7", SH_BLTIN_V8QI3 },
9990 { CODE_FOR_mmacfx_wl, "__builtin_sh_media_MMACFX_WL", SH_BLTIN_MAC_HISI },
9991 { CODE_FOR_mmacnfx_wl,"__builtin_sh_media_MMACNFX_WL", SH_BLTIN_MAC_HISI },
9992 { CODE_FOR_mulv2si3, "__builtin_mulv2si3", SH_BLTIN_V2SI3, },
9993 { CODE_FOR_mulv4hi3, "__builtin_mulv4hi3", SH_BLTIN_V4HI3 },
9994 { CODE_FOR_mmulfx_l, "__builtin_sh_media_MMULFX_L", SH_BLTIN_V2SI3 },
9995 { CODE_FOR_mmulfx_w, "__builtin_sh_media_MMULFX_W", SH_BLTIN_V4HI3 },
9996 { CODE_FOR_mmulfxrp_w,"__builtin_sh_media_MMULFXRP_W", SH_BLTIN_V4HI3 },
9997 { CODE_FOR_mmulhi_wl, "__builtin_sh_media_MMULHI_WL", SH_BLTIN_V4HI2V2SI },
9998 { CODE_FOR_mmullo_wl, "__builtin_sh_media_MMULLO_WL", SH_BLTIN_V4HI2V2SI },
9999 { CODE_FOR_mmulsum_wq,"__builtin_sh_media_MMULSUM_WQ", SH_BLTIN_XXUU },
10000 { CODE_FOR_mperm_w, "__builtin_sh_media_MPERM_W", SH_BLTIN_SH_HI },
10001 { CODE_FOR_msad_ubq, "__builtin_sh_media_MSAD_UBQ", SH_BLTIN_XXUU },
10002 { CODE_FOR_mshalds_l, "__builtin_sh_media_MSHALDS_L", SH_BLTIN_SH_SI },
10003 { CODE_FOR_mshalds_w, "__builtin_sh_media_MSHALDS_W", SH_BLTIN_SH_HI },
10004 { CODE_FOR_ashrv2si3, "__builtin_ashrv2si3", SH_BLTIN_SH_SI },
10005 { CODE_FOR_ashrv4hi3, "__builtin_ashrv4hi3", SH_BLTIN_SH_HI },
10006 { CODE_FOR_mshards_q, "__builtin_sh_media_MSHARDS_Q", SH_BLTIN_SUS },
10007 { CODE_FOR_mshfhi_b, "__builtin_sh_media_MSHFHI_B", SH_BLTIN_V8QI3 },
10008 { CODE_FOR_mshfhi_l, "__builtin_sh_media_MSHFHI_L", SH_BLTIN_V2SI3 },
10009 { CODE_FOR_mshfhi_w, "__builtin_sh_media_MSHFHI_W", SH_BLTIN_V4HI3 },
10010 { CODE_FOR_mshflo_b, "__builtin_sh_media_MSHFLO_B", SH_BLTIN_V8QI3 },
10011 { CODE_FOR_mshflo_l, "__builtin_sh_media_MSHFLO_L", SH_BLTIN_V2SI3 },
10012 { CODE_FOR_mshflo_w, "__builtin_sh_media_MSHFLO_W", SH_BLTIN_V4HI3 },
10013 { CODE_FOR_ashlv2si3, "__builtin_ashlv2si3", SH_BLTIN_SH_SI },
10014 { CODE_FOR_ashlv4hi3, "__builtin_ashlv4hi3", SH_BLTIN_SH_HI },
10015 { CODE_FOR_lshrv2si3, "__builtin_lshrv2si3", SH_BLTIN_SH_SI },
10016 { CODE_FOR_lshrv4hi3, "__builtin_lshrv4hi3", SH_BLTIN_SH_HI },
10017 { CODE_FOR_subv2si3, "__builtin_subv2si3", SH_BLTIN_V2SI3 },
10018 { CODE_FOR_subv4hi3, "__builtin_subv4hi3", SH_BLTIN_V4HI3 },
10019 { CODE_FOR_sssubv2si3,"__builtin_sssubv2si3", SH_BLTIN_V2SI3 },
10020 { CODE_FOR_ussubv8qi3,"__builtin_ussubv8qi3", SH_BLTIN_V8QI3 },
10021 { CODE_FOR_sssubv4hi3,"__builtin_sssubv4hi3", SH_BLTIN_V4HI3 },
10022 { CODE_FOR_fcosa_s, "__builtin_sh_media_FCOSA_S", SH_BLTIN_SISF },
10023 { CODE_FOR_fsina_s, "__builtin_sh_media_FSINA_S", SH_BLTIN_SISF },
10024 { CODE_FOR_fipr, "__builtin_sh_media_FIPR_S", SH_BLTIN_3 },
10025 { CODE_FOR_ftrv, "__builtin_sh_media_FTRV_S", SH_BLTIN_3 },
10026 { CODE_FOR_mac_media, "__builtin_sh_media_FMAC_S", SH_BLTIN_3 },
10027 { CODE_FOR_sqrtdf2, "__builtin_sh_media_FSQRT_D", SH_BLTIN_2 },
10028 { CODE_FOR_sqrtsf2, "__builtin_sh_media_FSQRT_S", SH_BLTIN_2 },
10029 { CODE_FOR_fsrra_s, "__builtin_sh_media_FSRRA_S", SH_BLTIN_2 },
10030 { CODE_FOR_ldhi_l, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L },
10031 { CODE_FOR_ldhi_q, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q },
10032 { CODE_FOR_ldlo_l, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L },
10033 { CODE_FOR_ldlo_q, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q },
10034 { CODE_FOR_sthi_l, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L },
10035 { CODE_FOR_sthi_q, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q },
10036 { CODE_FOR_stlo_l, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L },
10037 { CODE_FOR_stlo_q, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q },
10038 { CODE_FOR_ldhi_l64, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L64 },
10039 { CODE_FOR_ldhi_q64, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q64 },
10040 { CODE_FOR_ldlo_l64, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L64 },
10041 { CODE_FOR_ldlo_q64, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q64 },
10042 { CODE_FOR_sthi_l64, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L64 },
10043 { CODE_FOR_sthi_q64, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q64 },
10044 { CODE_FOR_stlo_l64, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L64 },
10045 { CODE_FOR_stlo_q64, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q64 },
10046 { CODE_FOR_nsb, "__builtin_sh_media_NSB", SH_BLTIN_SU },
10047 { CODE_FOR_byterev, "__builtin_sh_media_BYTEREV", SH_BLTIN_2 },
10048 { CODE_FOR_prefetch, "__builtin_sh_media_PREFO", SH_BLTIN_PSSV },
10052 sh_media_init_builtins (void)
10054 tree shared[SH_BLTIN_NUM_SHARED_SIGNATURES];
10055 const struct builtin_description *d;
10057 memset (shared, 0, sizeof shared);
10058 for (d = bdesc; d - bdesc < (int) ARRAY_SIZE (bdesc); d++)
10060 tree type, arg_type = 0;
10061 int signature = d->signature;
10064 if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES && shared[signature])
10065 type = shared[signature];
10068 int has_result = signature_args[signature][0] != 0;
10070 if ((signature_args[signature][1] & 8)
10071 && (((signature_args[signature][1] & 1) && TARGET_SHMEDIA32)
10072 || ((signature_args[signature][1] & 2) && TARGET_SHMEDIA64)))
10074 if (! TARGET_FPU_ANY
10075 && FLOAT_MODE_P (insn_data[d->icode].operand[0].mode))
10077 type = void_list_node;
10080 int arg = signature_args[signature][i];
10081 int opno = i - 1 + has_result;
10084 arg_type = ptr_type_node;
10086 arg_type = (*lang_hooks.types.type_for_mode)
10087 (insn_data[d->icode].operand[opno].mode,
10092 arg_type = void_type_node;
10095 type = tree_cons (NULL_TREE, arg_type, type);
10097 type = build_function_type (arg_type, type);
10098 if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES)
10099 shared[signature] = type;
10101 add_builtin_function (d->name, type, d - bdesc, BUILT_IN_MD,
10106 /* Implements target hook vector_mode_supported_p. */
10108 sh_vector_mode_supported_p (enum machine_mode mode)
10111 && ((mode == V2SFmode)
10112 || (mode == V4SFmode)
10113 || (mode == V16SFmode)))
10116 else if (TARGET_SHMEDIA
10117 && ((mode == V8QImode)
10118 || (mode == V2HImode)
10119 || (mode == V4HImode)
10120 || (mode == V2SImode)))
10126 /* Implements target hook dwarf_calling_convention. Return an enum
10127 of dwarf_calling_convention. */
10129 sh_dwarf_calling_convention (const_tree func)
10131 if (sh_attr_renesas_p (func))
10132 return DW_CC_GNU_renesas_sh;
10134 return DW_CC_normal;
10138 sh_init_builtins (void)
10140 if (TARGET_SHMEDIA)
10141 sh_media_init_builtins ();
10144 /* Expand an expression EXP that calls a built-in function,
10145 with result going to TARGET if that's convenient
10146 (and in mode MODE if that's convenient).
10147 SUBTARGET may be used as the target for computing one of EXP's operands.
10148 IGNORE is nonzero if the value is to be ignored. */
10151 sh_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10152 enum machine_mode mode ATTRIBUTE_UNUSED, int ignore)
10154 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10155 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10156 const struct builtin_description *d = &bdesc[fcode];
10157 enum insn_code icode = d->icode;
10158 int signature = d->signature;
10159 enum machine_mode tmode = VOIDmode;
10164 if (signature_args[signature][0])
10169 tmode = insn_data[icode].operand[0].mode;
10171 || GET_MODE (target) != tmode
10172 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10173 target = gen_reg_rtx (tmode);
10174 op[nop++] = target;
10179 for (i = 1; i <= 3; i++, nop++)
10182 enum machine_mode opmode, argmode;
10185 if (! signature_args[signature][i])
10187 arg = CALL_EXPR_ARG (exp, i - 1);
10188 if (arg == error_mark_node)
10190 if (signature_args[signature][i] & 8)
10193 optype = ptr_type_node;
10197 opmode = insn_data[icode].operand[nop].mode;
10198 optype = (*lang_hooks.types.type_for_mode) (opmode, 0);
10200 argmode = TYPE_MODE (TREE_TYPE (arg));
10201 if (argmode != opmode)
10202 arg = build1 (NOP_EXPR, optype, arg);
10203 op[nop] = expand_expr (arg, NULL_RTX, opmode, 0);
10204 if (! (*insn_data[icode].operand[nop].predicate) (op[nop], opmode))
10205 op[nop] = copy_to_mode_reg (opmode, op[nop]);
10211 pat = (*insn_data[d->icode].genfun) (op[0]);
10214 pat = (*insn_data[d->icode].genfun) (op[0], op[1]);
10217 pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2]);
10220 pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2], op[3]);
10223 gcc_unreachable ();
10232 sh_expand_unop_v2sf (enum rtx_code code, rtx op0, rtx op1)
10234 rtx sel0 = const0_rtx;
10235 rtx sel1 = const1_rtx;
10236 rtx (*fn) (rtx, rtx, rtx, rtx, rtx) = gen_unary_sf_op;
10237 rtx op = gen_rtx_fmt_e (code, SFmode, op1);
10239 emit_insn ((*fn) (op0, op1, op, sel0, sel0));
10240 emit_insn ((*fn) (op0, op1, op, sel1, sel1));
10244 sh_expand_binop_v2sf (enum rtx_code code, rtx op0, rtx op1, rtx op2)
10246 rtx op = gen_rtx_fmt_ee (code, SFmode, op1, op2);
10248 emit_insn (gen_binary_sf_op0 (op0, op1, op2, op));
10249 emit_insn (gen_binary_sf_op1 (op0, op1, op2, op));
10252 /* Return the class of registers for which a mode change from FROM to TO
10255 sh_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
10256 enum reg_class class)
10258 /* We want to enable the use of SUBREGs as a means to
10259 VEC_SELECT a single element of a vector. */
10260 if (to == SFmode && VECTOR_MODE_P (from) && GET_MODE_INNER (from) == SFmode)
10261 return (reg_classes_intersect_p (GENERAL_REGS, class));
10263 if (GET_MODE_SIZE (from) != GET_MODE_SIZE (to))
10265 if (TARGET_LITTLE_ENDIAN)
10267 if (GET_MODE_SIZE (to) < 8 || GET_MODE_SIZE (from) < 8)
10268 return reg_classes_intersect_p (DF_REGS, class);
10272 if (GET_MODE_SIZE (from) < 8)
10273 return reg_classes_intersect_p (DF_HI_REGS, class);
10280 /* If ADDRESS refers to a CODE_LABEL, add NUSES to the number of times
10281 that label is used. */
10284 sh_mark_label (rtx address, int nuses)
10286 if (GOTOFF_P (address))
10288 /* Extract the label or symbol. */
10289 address = XEXP (address, 0);
10290 if (GET_CODE (address) == PLUS)
10291 address = XEXP (address, 0);
10292 address = XVECEXP (address, 0, 0);
10294 if (GET_CODE (address) == LABEL_REF
10295 && GET_CODE (XEXP (address, 0)) == CODE_LABEL)
10296 LABEL_NUSES (XEXP (address, 0)) += nuses;
10299 /* Compute extra cost of moving data between one register class
10302 /* If SECONDARY*_RELOAD_CLASS says something about the src/dst pair, regclass
10303 uses this information. Hence, the general register <-> floating point
10304 register information here is not used for SFmode. */
10307 sh_register_move_cost (enum machine_mode mode,
10308 enum reg_class srcclass, enum reg_class dstclass)
10310 if (dstclass == T_REGS || dstclass == PR_REGS)
10313 if (dstclass == MAC_REGS && srcclass == MAC_REGS)
10316 if (mode == SImode && ! TARGET_SHMEDIA && TARGET_FMOVD
10317 && REGCLASS_HAS_FP_REG (srcclass)
10318 && REGCLASS_HAS_FP_REG (dstclass))
10321 if (REGCLASS_HAS_FP_REG (dstclass) && srcclass == T_REGS)
10322 return ((TARGET_HARD_SH4 && !optimize_size) ? 10 : 7);
10324 if ((REGCLASS_HAS_FP_REG (dstclass) && srcclass == MAC_REGS)
10325 || (dstclass == MAC_REGS && REGCLASS_HAS_FP_REG (srcclass)))
10328 if ((REGCLASS_HAS_FP_REG (dstclass)
10329 && REGCLASS_HAS_GENERAL_REG (srcclass))
10330 || (REGCLASS_HAS_GENERAL_REG (dstclass)
10331 && REGCLASS_HAS_FP_REG (srcclass)))
10332 return ((TARGET_SHMEDIA ? 4 : TARGET_FMOVD ? 8 : 12)
10333 * ((GET_MODE_SIZE (mode) + 7) / 8U));
10335 if ((dstclass == FPUL_REGS
10336 && REGCLASS_HAS_GENERAL_REG (srcclass))
10337 || (srcclass == FPUL_REGS
10338 && REGCLASS_HAS_GENERAL_REG (dstclass)))
10341 if ((dstclass == FPUL_REGS
10342 && (srcclass == PR_REGS || srcclass == MAC_REGS || srcclass == T_REGS))
10343 || (srcclass == FPUL_REGS
10344 && (dstclass == PR_REGS || dstclass == MAC_REGS)))
10347 if ((srcclass == TARGET_REGS && ! REGCLASS_HAS_GENERAL_REG (dstclass))
10348 || ((dstclass) == TARGET_REGS && ! REGCLASS_HAS_GENERAL_REG (srcclass)))
10351 /* ??? ptabs faults on (value & 0x3) == 0x3 */
10353 && ((srcclass) == TARGET_REGS || (srcclass) == SIBCALL_REGS))
10355 if (sh_gettrcost >= 0)
10356 return sh_gettrcost;
10357 else if (!TARGET_PT_FIXED)
10361 if ((srcclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (dstclass))
10362 || (dstclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (srcclass)))
10367 && ! REGCLASS_HAS_GENERAL_REG (srcclass)
10368 && ! REGCLASS_HAS_GENERAL_REG (dstclass)))
10369 return 2 * ((GET_MODE_SIZE (mode) + 7) / 8U);
10371 return 2 * ((GET_MODE_SIZE (mode) + 3) / 4U);
10374 static rtx emit_load_ptr (rtx, rtx);
10377 emit_load_ptr (rtx reg, rtx addr)
10379 rtx mem = gen_const_mem (ptr_mode, addr);
10381 if (Pmode != ptr_mode)
10382 mem = gen_rtx_SIGN_EXTEND (Pmode, mem);
10383 return emit_move_insn (reg, mem);
10387 sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
10388 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10391 CUMULATIVE_ARGS cum;
10392 int structure_value_byref = 0;
10393 rtx this, this_value, sibcall, insns, funexp;
10394 tree funtype = TREE_TYPE (function);
10395 int simple_add = CONST_OK_FOR_ADD (delta);
10397 rtx scratch0, scratch1, scratch2;
10400 reload_completed = 1;
10401 epilogue_completed = 1;
10402 current_function_uses_only_leaf_regs = 1;
10404 emit_note (NOTE_INSN_PROLOGUE_END);
10406 /* Find the "this" pointer. We have such a wide range of ABIs for the
10407 SH that it's best to do this completely machine independently.
10408 "this" is passed as first argument, unless a structure return pointer
10409 comes first, in which case "this" comes second. */
10410 INIT_CUMULATIVE_ARGS (cum, funtype, NULL_RTX, 0, 1);
10411 #ifndef PCC_STATIC_STRUCT_RETURN
10412 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
10413 structure_value_byref = 1;
10414 #endif /* not PCC_STATIC_STRUCT_RETURN */
10415 if (structure_value_byref && sh_struct_value_rtx (function, 0) == 0)
10417 tree ptype = build_pointer_type (TREE_TYPE (funtype));
10419 FUNCTION_ARG_ADVANCE (cum, Pmode, ptype, 1);
10421 this = FUNCTION_ARG (cum, Pmode, ptr_type_node, 1);
10423 /* For SHcompact, we only have r0 for a scratch register: r1 is the
10424 static chain pointer (even if you can't have nested virtual functions
10425 right now, someone might implement them sometime), and the rest of the
10426 registers are used for argument passing, are callee-saved, or reserved. */
10427 /* We need to check call_used_regs / fixed_regs in case -fcall_saved-reg /
10428 -ffixed-reg has been used. */
10429 if (! call_used_regs[0] || fixed_regs[0])
10430 error ("r0 needs to be available as a call-clobbered register");
10431 scratch0 = scratch1 = scratch2 = gen_rtx_REG (Pmode, 0);
10434 if (call_used_regs[1] && ! fixed_regs[1])
10435 scratch1 = gen_rtx_REG (ptr_mode, 1);
10436 /* N.B., if not TARGET_HITACHI, register 2 is used to pass the pointer
10437 pointing where to return struct values. */
10438 if (call_used_regs[3] && ! fixed_regs[3])
10439 scratch2 = gen_rtx_REG (Pmode, 3);
10441 else if (TARGET_SHMEDIA)
10443 for (i = FIRST_GENERAL_REG; i <= LAST_GENERAL_REG; i++)
10444 if (i != REGNO (scratch0) &&
10445 call_used_regs[i] && ! fixed_regs[i] && ! FUNCTION_ARG_REGNO_P (i))
10447 scratch1 = gen_rtx_REG (ptr_mode, i);
10450 if (scratch1 == scratch0)
10451 error ("Need a second call-clobbered general purpose register");
10452 for (i = FIRST_TARGET_REG; i <= LAST_TARGET_REG; i++)
10453 if (call_used_regs[i] && ! fixed_regs[i])
10455 scratch2 = gen_rtx_REG (Pmode, i);
10458 if (scratch2 == scratch0)
10459 error ("Need a call-clobbered target register");
10462 this_value = plus_constant (this, delta);
10464 && (simple_add || scratch0 != scratch1)
10465 && strict_memory_address_p (ptr_mode, this_value))
10467 emit_load_ptr (scratch0, this_value);
10472 ; /* Do nothing. */
10473 else if (simple_add)
10474 emit_move_insn (this, this_value);
10477 emit_move_insn (scratch1, GEN_INT (delta));
10478 emit_insn (gen_add2_insn (this, scratch1));
10486 emit_load_ptr (scratch0, this);
10488 offset_addr = plus_constant (scratch0, vcall_offset);
10489 if (strict_memory_address_p (ptr_mode, offset_addr))
10490 ; /* Do nothing. */
10491 else if (! TARGET_SH5 && scratch0 != scratch1)
10493 /* scratch0 != scratch1, and we have indexed loads. Get better
10494 schedule by loading the offset into r1 and using an indexed
10495 load - then the load of r1 can issue before the load from
10496 (this + delta) finishes. */
10497 emit_move_insn (scratch1, GEN_INT (vcall_offset));
10498 offset_addr = gen_rtx_PLUS (Pmode, scratch0, scratch1);
10500 else if (CONST_OK_FOR_ADD (vcall_offset))
10502 emit_insn (gen_add2_insn (scratch0, GEN_INT (vcall_offset)));
10503 offset_addr = scratch0;
10505 else if (scratch0 != scratch1)
10507 emit_move_insn (scratch1, GEN_INT (vcall_offset));
10508 emit_insn (gen_add2_insn (scratch0, scratch1));
10509 offset_addr = scratch0;
10512 gcc_unreachable (); /* FIXME */
10513 emit_load_ptr (scratch0, offset_addr);
10515 if (Pmode != ptr_mode)
10516 scratch0 = gen_rtx_TRUNCATE (ptr_mode, scratch0);
10517 emit_insn (gen_add2_insn (this, scratch0));
10520 /* Generate a tail call to the target function. */
10521 if (! TREE_USED (function))
10523 assemble_external (function);
10524 TREE_USED (function) = 1;
10526 funexp = XEXP (DECL_RTL (function), 0);
10527 /* If the function is overridden, so is the thunk, hence we don't
10528 need GOT addressing even if this is a public symbol. */
10530 if (TARGET_SH1 && ! flag_weak)
10531 sibcall = gen_sibcalli_thunk (funexp, const0_rtx);
10534 if (TARGET_SH2 && flag_pic)
10536 sibcall = gen_sibcall_pcrel (funexp, const0_rtx);
10537 XEXP (XVECEXP (sibcall, 0, 2), 0) = scratch2;
10541 if (TARGET_SHMEDIA && flag_pic)
10543 funexp = gen_sym2PIC (funexp);
10544 PUT_MODE (funexp, Pmode);
10546 emit_move_insn (scratch2, funexp);
10547 funexp = gen_rtx_MEM (FUNCTION_MODE, scratch2);
10548 sibcall = gen_sibcall (funexp, const0_rtx, NULL_RTX);
10550 sibcall = emit_call_insn (sibcall);
10551 SIBLING_CALL_P (sibcall) = 1;
10552 use_reg (&CALL_INSN_FUNCTION_USAGE (sibcall), this);
10555 /* Run just enough of rest_of_compilation to do scheduling and get
10556 the insns emitted. Note that use_thunk calls
10557 assemble_start_function and assemble_end_function. */
10559 insn_locators_alloc ();
10560 insns = get_insns ();
10565 /* Initialize the bitmap obstacks. */
10566 bitmap_obstack_initialize (NULL);
10567 bitmap_obstack_initialize (®_obstack);
10570 rtl_register_cfg_hooks ();
10571 init_rtl_bb_info (ENTRY_BLOCK_PTR);
10572 init_rtl_bb_info (EXIT_BLOCK_PTR);
10573 ENTRY_BLOCK_PTR->flags |= BB_RTL;
10574 EXIT_BLOCK_PTR->flags |= BB_RTL;
10575 find_basic_blocks (insns);
10577 if (flag_schedule_insns_after_reload)
10579 life_analysis (PROP_FINAL);
10581 split_all_insns (1);
10585 /* We must split jmp insn in PIC case. */
10587 split_all_insns_noflow ();
10594 split_all_insns_noflow ();
10600 if (optimize > 0 && flag_delayed_branch)
10601 dbr_schedule (insns);
10603 shorten_branches (insns);
10604 final_start_function (insns, file, 1);
10605 final (insns, file, 1);
10606 final_end_function ();
10607 free_after_compilation (cfun);
10609 reload_completed = 0;
10610 epilogue_completed = 0;
10614 function_symbol (rtx target, const char *name, enum sh_function_kind kind)
10618 /* If this is not an ordinary function, the name usually comes from a
10619 string literal or an sprintf buffer. Make sure we use the same
10620 string consistently, so that cse will be able to unify address loads. */
10621 if (kind != FUNCTION_ORDINARY)
10622 name = IDENTIFIER_POINTER (get_identifier (name));
10623 sym = gen_rtx_SYMBOL_REF (Pmode, name);
10624 SYMBOL_REF_FLAGS (sym) = SYMBOL_FLAG_FUNCTION;
10628 case FUNCTION_ORDINARY:
10632 rtx reg = target ? target : gen_reg_rtx (Pmode);
10634 emit_insn (gen_symGOT2reg (reg, sym));
10640 /* ??? To allow cse to work, we use GOTOFF relocations.
10641 we could add combiner patterns to transform this into
10642 straight pc-relative calls with sym2PIC / bsrf when
10643 label load and function call are still 1:1 and in the
10644 same basic block during combine. */
10645 rtx reg = target ? target : gen_reg_rtx (Pmode);
10647 emit_insn (gen_symGOTOFF2reg (reg, sym));
10652 if (target && sym != target)
10654 emit_move_insn (target, sym);
10660 /* Find the number of a general purpose register in S. */
10662 scavenge_reg (HARD_REG_SET *s)
10665 for (r = FIRST_GENERAL_REG; r <= LAST_GENERAL_REG; r++)
10666 if (TEST_HARD_REG_BIT (*s, r))
10672 sh_get_pr_initial_val (void)
10676 /* ??? Unfortunately, get_hard_reg_initial_val doesn't always work for the
10677 PR register on SHcompact, because it might be clobbered by the prologue.
10678 We check first if that is known to be the case. */
10679 if (TARGET_SHCOMPACT
10680 && ((crtl->args.info.call_cookie
10681 & ~ CALL_COOKIE_RET_TRAMP (1))
10682 || crtl->saves_all_registers))
10683 return gen_frame_mem (SImode, return_address_pointer_rtx);
10685 /* If we haven't finished rtl generation, there might be a nonlocal label
10686 that we haven't seen yet.
10687 ??? get_hard_reg_initial_val fails if it is called after register
10688 allocation has started, unless it has been called before for the
10689 same register. And even then, we end in trouble if we didn't use
10690 the register in the same basic block before. So call
10691 get_hard_reg_initial_val now and wrap it in an unspec if we might
10692 need to replace it. */
10693 /* ??? We also must do this for TARGET_SH1 in general, because otherwise
10694 combine can put the pseudo returned by get_hard_reg_initial_val into
10695 instructions that need a general purpose registers, which will fail to
10696 be recognized when the pseudo becomes allocated to PR. */
10698 = get_hard_reg_initial_val (Pmode, TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
10700 return gen_rtx_UNSPEC (SImode, gen_rtvec (1, val), UNSPEC_RA);
10705 sh_expand_t_scc (enum rtx_code code, rtx target)
10707 rtx result = target;
10710 if (GET_CODE (sh_compare_op0) != REG || REGNO (sh_compare_op0) != T_REG
10711 || GET_CODE (sh_compare_op1) != CONST_INT)
10713 if (GET_CODE (result) != REG)
10714 result = gen_reg_rtx (SImode);
10715 val = INTVAL (sh_compare_op1);
10716 if ((code == EQ && val == 1) || (code == NE && val == 0))
10717 emit_insn (gen_movt (result));
10718 else if (TARGET_SH2A && ((code == EQ && val == 0)
10719 || (code == NE && val == 1)))
10720 emit_insn (gen_movrt (result));
10721 else if ((code == EQ && val == 0) || (code == NE && val == 1))
10723 emit_clobber (result);
10724 emit_insn (gen_subc (result, result, result));
10725 emit_insn (gen_addsi3 (result, result, const1_rtx));
10727 else if (code == EQ || code == NE)
10728 emit_insn (gen_move_insn (result, GEN_INT (code == NE)));
10731 if (result != target)
10732 emit_move_insn (target, result);
10736 /* INSN is an sfunc; return the rtx that describes the address used. */
10738 extract_sfunc_addr (rtx insn)
10740 rtx pattern, part = NULL_RTX;
10743 pattern = PATTERN (insn);
10744 len = XVECLEN (pattern, 0);
10745 for (i = 0; i < len; i++)
10747 part = XVECEXP (pattern, 0, i);
10748 if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == Pmode
10749 && GENERAL_REGISTER_P (true_regnum (XEXP (part, 0))))
10750 return XEXP (part, 0);
10752 gcc_assert (GET_CODE (XVECEXP (pattern, 0, 0)) == UNSPEC_VOLATILE);
10753 return XVECEXP (XVECEXP (pattern, 0, 0), 0, 1);
10756 /* Verify that the register in use_sfunc_addr still agrees with the address
10757 used in the sfunc. This prevents fill_slots_from_thread from changing
10759 INSN is the use_sfunc_addr instruction, and REG is the register it
10762 check_use_sfunc_addr (rtx insn, rtx reg)
10764 /* Search for the sfunc. It should really come right after INSN. */
10765 while ((insn = NEXT_INSN (insn)))
10767 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
10769 if (! INSN_P (insn))
10772 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
10773 insn = XVECEXP (PATTERN (insn), 0, 0);
10774 if (GET_CODE (PATTERN (insn)) != PARALLEL
10775 || get_attr_type (insn) != TYPE_SFUNC)
10777 return rtx_equal_p (extract_sfunc_addr (insn), reg);
10779 gcc_unreachable ();
10782 /* This function returns a constant rtx that represents pi / 2**15 in
10783 SFmode. it's used to scale SFmode angles, in radians, to a
10784 fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
10785 maps to 0x10000). */
10787 static GTY(()) rtx sh_fsca_sf2int_rtx;
10790 sh_fsca_sf2int (void)
10792 if (! sh_fsca_sf2int_rtx)
10794 REAL_VALUE_TYPE rv;
10796 real_from_string (&rv, "10430.378350470453");
10797 sh_fsca_sf2int_rtx = const_double_from_real_value (rv, SFmode);
10800 return sh_fsca_sf2int_rtx;
10803 /* This function returns a constant rtx that represents pi / 2**15 in
10804 DFmode. it's used to scale DFmode angles, in radians, to a
10805 fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
10806 maps to 0x10000). */
10808 static GTY(()) rtx sh_fsca_df2int_rtx;
10811 sh_fsca_df2int (void)
10813 if (! sh_fsca_df2int_rtx)
10815 REAL_VALUE_TYPE rv;
10817 real_from_string (&rv, "10430.378350470453");
10818 sh_fsca_df2int_rtx = const_double_from_real_value (rv, DFmode);
10821 return sh_fsca_df2int_rtx;
10824 /* This function returns a constant rtx that represents 2**15 / pi in
10825 SFmode. it's used to scale a fixed-point signed 16.16-bit fraction
10826 of a full circle back to a SFmode value, i.e., 0x10000 maps to
10829 static GTY(()) rtx sh_fsca_int2sf_rtx;
10832 sh_fsca_int2sf (void)
10834 if (! sh_fsca_int2sf_rtx)
10836 REAL_VALUE_TYPE rv;
10838 real_from_string (&rv, "9.587379924285257e-5");
10839 sh_fsca_int2sf_rtx = const_double_from_real_value (rv, SFmode);
10842 return sh_fsca_int2sf_rtx;
10845 /* Initialize the CUMULATIVE_ARGS structure. */
10848 sh_init_cumulative_args (CUMULATIVE_ARGS * pcum,
10850 rtx libname ATTRIBUTE_UNUSED,
10852 signed int n_named_args,
10853 enum machine_mode mode)
10855 pcum->arg_count [(int) SH_ARG_FLOAT] = 0;
10856 pcum->free_single_fp_reg = 0;
10857 pcum->stack_regs = 0;
10858 pcum->byref_regs = 0;
10860 pcum->outgoing = (n_named_args == -1) ? 0 : 1;
10862 /* XXX - Should we check TARGET_HITACHI here ??? */
10863 pcum->renesas_abi = sh_attr_renesas_p (fntype) ? 1 : 0;
10867 pcum->force_mem = ((TARGET_HITACHI || pcum->renesas_abi)
10868 && aggregate_value_p (TREE_TYPE (fntype), fndecl));
10869 pcum->prototype_p = TYPE_ARG_TYPES (fntype) ? TRUE : FALSE;
10870 pcum->arg_count [(int) SH_ARG_INT]
10871 = TARGET_SH5 && aggregate_value_p (TREE_TYPE (fntype), fndecl);
10874 = CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
10875 && pcum->arg_count [(int) SH_ARG_INT] == 0
10876 && (TYPE_MODE (TREE_TYPE (fntype)) == BLKmode
10877 ? int_size_in_bytes (TREE_TYPE (fntype))
10878 : GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (fntype)))) > 4
10879 && (BASE_RETURN_VALUE_REG (TYPE_MODE (TREE_TYPE (fntype)))
10880 == FIRST_RET_REG));
10884 pcum->arg_count [(int) SH_ARG_INT] = 0;
10885 pcum->prototype_p = FALSE;
10886 if (mode != VOIDmode)
10888 pcum->call_cookie =
10889 CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
10890 && GET_MODE_SIZE (mode) > 4
10891 && BASE_RETURN_VALUE_REG (mode) == FIRST_RET_REG);
10893 /* If the default ABI is the Renesas ABI then all library
10894 calls must assume that the library will be using the
10895 Renesas ABI. So if the function would return its result
10896 in memory then we must force the address of this memory
10897 block onto the stack. Ideally we would like to call
10898 targetm.calls.return_in_memory() here but we do not have
10899 the TYPE or the FNDECL available so we synthesize the
10900 contents of that function as best we can. */
10902 (TARGET_DEFAULT & MASK_HITACHI)
10903 && (mode == BLKmode
10904 || (GET_MODE_SIZE (mode) > 4
10905 && !(mode == DFmode
10906 && TARGET_FPU_DOUBLE)));
10910 pcum->call_cookie = 0;
10911 pcum->force_mem = FALSE;
10916 /* Replace any occurrence of FROM(n) in X with TO(n). The function does
10917 not enter into CONST_DOUBLE for the replace.
10919 Note that copying is not done so X must not be shared unless all copies
10920 are to be modified.
10922 This is like replace_rtx, except that we operate on N_REPLACEMENTS
10923 replacements simultaneously - FROM(n) is replacements[n*2] and to(n) is
10924 replacements[n*2+1] - and that we take mode changes into account.
10926 If a replacement is ambiguous, return NULL_RTX.
10928 If MODIFY is zero, don't modify any rtl in place,
10929 just return zero or nonzero for failure / success. */
10932 replace_n_hard_rtx (rtx x, rtx *replacements, int n_replacements, int modify)
10937 /* The following prevents loops occurrence when we change MEM in
10938 CONST_DOUBLE onto the same CONST_DOUBLE. */
10939 if (x != 0 && GET_CODE (x) == CONST_DOUBLE)
10942 for (i = n_replacements - 1; i >= 0 ; i--)
10943 if (x == replacements[i*2] && GET_MODE (x) == GET_MODE (replacements[i*2+1]))
10944 return replacements[i*2+1];
10946 /* Allow this function to make replacements in EXPR_LISTs. */
10950 if (GET_CODE (x) == SUBREG)
10952 rtx new = replace_n_hard_rtx (SUBREG_REG (x), replacements,
10953 n_replacements, modify);
10955 if (GET_CODE (new) == CONST_INT)
10957 x = simplify_subreg (GET_MODE (x), new,
10958 GET_MODE (SUBREG_REG (x)),
10964 SUBREG_REG (x) = new;
10968 else if (GET_CODE (x) == REG)
10970 unsigned regno = REGNO (x);
10971 unsigned nregs = (regno < FIRST_PSEUDO_REGISTER
10972 ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
10973 rtx result = NULL_RTX;
10975 for (i = n_replacements - 1; i >= 0; i--)
10977 rtx from = replacements[i*2];
10978 rtx to = replacements[i*2+1];
10979 unsigned from_regno, from_nregs, to_regno, new_regno;
10981 if (GET_CODE (from) != REG)
10983 from_regno = REGNO (from);
10984 from_nregs = (from_regno < FIRST_PSEUDO_REGISTER
10985 ? HARD_REGNO_NREGS (from_regno, GET_MODE (from)) : 1);
10986 if (regno < from_regno + from_nregs && regno + nregs > from_regno)
10988 if (regno < from_regno
10989 || regno + nregs > from_regno + nregs
10990 || GET_CODE (to) != REG
10993 to_regno = REGNO (to);
10994 if (to_regno < FIRST_PSEUDO_REGISTER)
10996 new_regno = regno + to_regno - from_regno;
10997 if ((unsigned) HARD_REGNO_NREGS (new_regno, GET_MODE (x))
11000 result = gen_rtx_REG (GET_MODE (x), new_regno);
11002 else if (GET_MODE (x) <= GET_MODE (to))
11003 result = gen_lowpart_common (GET_MODE (x), to);
11005 result = gen_lowpart_SUBREG (GET_MODE (x), to);
11008 return result ? result : x;
11010 else if (GET_CODE (x) == ZERO_EXTEND)
11012 rtx new = replace_n_hard_rtx (XEXP (x, 0), replacements,
11013 n_replacements, modify);
11015 if (GET_CODE (new) == CONST_INT)
11017 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
11018 new, GET_MODE (XEXP (x, 0)));
11028 fmt = GET_RTX_FORMAT (GET_CODE (x));
11029 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
11035 new = replace_n_hard_rtx (XEXP (x, i), replacements,
11036 n_replacements, modify);
11042 else if (fmt[i] == 'E')
11043 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
11045 new = replace_n_hard_rtx (XVECEXP (x, i, j), replacements,
11046 n_replacements, modify);
11050 XVECEXP (x, i, j) = new;
11058 sh_gen_truncate (enum machine_mode mode, rtx x, int need_sign_ext)
11060 enum rtx_code code = TRUNCATE;
11062 if (GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
11064 rtx inner = XEXP (x, 0);
11065 enum machine_mode inner_mode = GET_MODE (inner);
11067 if (inner_mode == mode)
11069 else if (GET_MODE_SIZE (inner_mode) >= GET_MODE_SIZE (mode))
11071 else if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode)
11072 && (! need_sign_ext || GET_CODE (x) == SIGN_EXTEND))
11074 code = GET_CODE (x);
11078 return gen_rtx_fmt_e (code, mode, x);
11081 /* called via for_each_rtx after reload, to clean up truncates of
11082 registers that span multiple actual hard registers. */
11084 shmedia_cleanup_truncate (rtx *p, void *n_changes)
11088 if (GET_CODE (x) != TRUNCATE)
11091 if (GET_MODE_SIZE (GET_MODE (reg)) > 8 && GET_CODE (reg) == REG)
11093 enum machine_mode reg_mode = GET_MODE (reg);
11094 XEXP (x, 0) = simplify_subreg (DImode, reg, reg_mode,
11095 subreg_lowpart_offset (DImode, reg_mode));
11096 *(int*) n_changes += 1;
11102 /* Load and store depend on the highpart of the address. However,
11103 set_attr_alternative does not give well-defined results before reload,
11104 so we must look at the rtl ourselves to see if any of the feeding
11105 registers is used in a memref. */
11107 /* Called by sh_contains_memref_p via for_each_rtx. */
11109 sh_contains_memref_p_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
11111 return (GET_CODE (*loc) == MEM);
11114 /* Return nonzero iff INSN contains a MEM. */
11116 sh_contains_memref_p (rtx insn)
11118 return for_each_rtx (&PATTERN (insn), &sh_contains_memref_p_1, NULL);
11121 /* Return nonzero iff INSN loads a banked register. */
11123 sh_loads_bankedreg_p (rtx insn)
11125 if (GET_CODE (PATTERN (insn)) == SET)
11127 rtx op = SET_DEST (PATTERN(insn));
11128 if (REG_P (op) && BANKED_REGISTER_P (REGNO (op)))
11135 /* FNADDR is the MEM expression from a call expander. Return an address
11136 to use in an SHmedia insn pattern. */
11138 shmedia_prepare_call_address (rtx fnaddr, int is_sibcall)
11142 fnaddr = XEXP (fnaddr, 0);
11143 is_sym = GET_CODE (fnaddr) == SYMBOL_REF;
11144 if (flag_pic && is_sym)
11146 if (! SYMBOL_REF_LOCAL_P (fnaddr))
11148 rtx reg = gen_reg_rtx (Pmode);
11150 /* We must not use GOTPLT for sibcalls, because PIC_REG
11151 must be restored before the PLT code gets to run. */
11153 emit_insn (gen_symGOT2reg (reg, fnaddr));
11155 emit_insn (gen_symGOTPLT2reg (reg, fnaddr));
11160 fnaddr = gen_sym2PIC (fnaddr);
11161 PUT_MODE (fnaddr, Pmode);
11164 /* If ptabs might trap, make this visible to the rest of the compiler.
11165 We generally assume that symbols pertain to valid locations, but
11166 it is possible to generate invalid symbols with asm or linker tricks.
11167 In a list of functions where each returns its successor, an invalid
11168 symbol might denote an empty list. */
11169 if (!TARGET_PT_FIXED
11170 && (!is_sym || TARGET_INVALID_SYMBOLS)
11171 && (!REG_P (fnaddr) || ! TARGET_REGISTER_P (REGNO (fnaddr))))
11173 rtx tr = gen_reg_rtx (PDImode);
11175 emit_insn (gen_ptabs (tr, fnaddr));
11178 else if (! target_reg_operand (fnaddr, Pmode))
11179 fnaddr = copy_to_mode_reg (Pmode, fnaddr);
11184 sh_secondary_reload (bool in_p, rtx x, enum reg_class class,
11185 enum machine_mode mode, secondary_reload_info *sri)
11189 if (REGCLASS_HAS_FP_REG (class)
11190 && ! TARGET_SHMEDIA
11191 && immediate_operand ((x), mode)
11192 && ! ((fp_zero_operand (x) || fp_one_operand (x))
11193 && mode == SFmode && fldi_ok ()))
11197 sri->icode = CODE_FOR_reload_insf__frn;
11200 sri->icode = CODE_FOR_reload_indf__frn;
11203 /* ??? If we knew that we are in the appropriate mode -
11204 single precision - we could use a reload pattern directly. */
11209 if (class == FPUL_REGS
11210 && ((GET_CODE (x) == REG
11211 && (REGNO (x) == MACL_REG || REGNO (x) == MACH_REG
11212 || REGNO (x) == T_REG))
11213 || GET_CODE (x) == PLUS))
11214 return GENERAL_REGS;
11215 if (class == FPUL_REGS && immediate_operand (x, mode))
11217 if (satisfies_constraint_I08 (x) || fp_zero_operand (x))
11218 return GENERAL_REGS;
11219 else if (mode == SFmode)
11221 sri->icode = CODE_FOR_reload_insi__i_fpul;
11224 if (class == FPSCR_REGS
11225 && ((GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
11226 || (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == PLUS)))
11227 return GENERAL_REGS;
11228 if (REGCLASS_HAS_FP_REG (class)
11230 && immediate_operand (x, mode)
11231 && x != CONST0_RTX (GET_MODE (x))
11232 && GET_MODE (x) != V4SFmode)
11233 return GENERAL_REGS;
11234 if ((mode == QImode || mode == HImode)
11235 && TARGET_SHMEDIA && inqhi_operand (x, mode))
11237 sri->icode = ((mode == QImode)
11238 ? CODE_FOR_reload_inqi : CODE_FOR_reload_inhi);
11241 if (TARGET_SHMEDIA && class == GENERAL_REGS
11242 && (GET_CODE (x) == LABEL_REF || PIC_DIRECT_ADDR_P (x)))
11243 return TARGET_REGS;
11244 } /* end of input-only processing. */
11246 if (((REGCLASS_HAS_FP_REG (class)
11247 && (GET_CODE (x) == REG
11248 && (GENERAL_OR_AP_REGISTER_P (REGNO (x))
11249 || (FP_REGISTER_P (REGNO (x)) && mode == SImode
11250 && TARGET_FMOVD))))
11251 || (REGCLASS_HAS_GENERAL_REG (class)
11252 && GET_CODE (x) == REG
11253 && FP_REGISTER_P (REGNO (x))))
11254 && ! TARGET_SHMEDIA
11255 && (mode == SFmode || mode == SImode))
11257 if ((class == FPUL_REGS
11258 || (REGCLASS_HAS_FP_REG (class)
11259 && ! TARGET_SHMEDIA && mode == SImode))
11260 && (GET_CODE (x) == MEM
11261 || (GET_CODE (x) == REG
11262 && (REGNO (x) >= FIRST_PSEUDO_REGISTER
11263 || REGNO (x) == T_REG
11264 || system_reg_operand (x, VOIDmode)))))
11266 if (class == FPUL_REGS)
11267 return GENERAL_REGS;
11270 if ((class == TARGET_REGS
11271 || (TARGET_SHMEDIA && class == SIBCALL_REGS))
11272 && !satisfies_constraint_Csy (x)
11273 && (GET_CODE (x) != REG || ! GENERAL_REGISTER_P (REGNO (x))))
11274 return GENERAL_REGS;
11275 if ((class == MAC_REGS || class == PR_REGS)
11276 && GET_CODE (x) == REG && ! GENERAL_REGISTER_P (REGNO (x))
11277 && class != REGNO_REG_CLASS (REGNO (x)))
11278 return GENERAL_REGS;
11279 if (class != GENERAL_REGS && GET_CODE (x) == REG
11280 && TARGET_REGISTER_P (REGNO (x)))
11281 return GENERAL_REGS;
11285 enum sh_divide_strategy_e sh_div_strategy = SH_DIV_STRATEGY_DEFAULT;