1 /* Output routines for GCC for Renesas / SuperH SH.
2 Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
4 Contributed by Steve Chamberlain (sac@cygnus.com).
5 Improved by Jim Wilson (wilson@cygnus.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
27 #include "insn-config.h"
35 #include "hard-reg-set.h"
37 #include "insn-attr.h"
40 #include "integrate.h"
44 #include "target-def.h"
46 #include "langhooks.h"
47 #include "basic-block.h"
49 #include "cfglayout.h"
51 #include "sched-int.h"
56 #include "alloc-pool.h"
57 #include "tm-constrs.h"
60 int code_for_indirect_jump_scratch = CODE_FOR_indirect_jump_scratch;
62 #define MSW (TARGET_LITTLE_ENDIAN ? 1 : 0)
63 #define LSW (TARGET_LITTLE_ENDIAN ? 0 : 1)
65 /* These are some macros to abstract register modes. */
66 #define CONST_OK_FOR_ADD(size) \
67 (TARGET_SHMEDIA ? CONST_OK_FOR_I10 (size) : CONST_OK_FOR_I08 (size))
68 #define GEN_MOV (*(TARGET_SHMEDIA64 ? gen_movdi : gen_movsi))
69 #define GEN_ADD3 (*(TARGET_SHMEDIA64 ? gen_adddi3 : gen_addsi3))
70 #define GEN_SUB3 (*(TARGET_SHMEDIA64 ? gen_subdi3 : gen_subsi3))
72 /* Used to simplify the logic below. Find the attributes wherever
74 #define SH_ATTRIBUTES(decl) \
75 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
76 : DECL_ATTRIBUTES (decl) \
77 ? (DECL_ATTRIBUTES (decl)) \
78 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
80 /* Set to 1 by expand_prologue() when the function is an interrupt handler. */
81 int current_function_interrupt;
83 tree sh_deferred_function_attributes;
84 tree *sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
86 /* Global variables for machine-dependent things. */
88 /* Which cpu are we scheduling for. */
89 enum processor_type sh_cpu;
91 /* Definitions used in ready queue reordering for first scheduling pass. */
93 /* Reg weights arrays for modes SFmode and SImode, indexed by insn LUID. */
94 static short *regmode_weight[2];
96 /* Total SFmode and SImode weights of scheduled insns. */
97 static int curr_regmode_pressure[2];
99 /* Number of r0 life regions. */
100 static int r0_life_regions;
102 /* If true, skip cycles for Q -> R movement. */
103 static int skip_cycles = 0;
105 /* Cached value of can_issue_more. This is cached in sh_variable_issue hook
106 and returned from sh_reorder2. */
107 static short cached_can_issue_more;
109 /* Provides the class number of the smallest class containing
112 enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
114 R0_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
115 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
116 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
117 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
118 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
119 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
120 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
121 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
122 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
123 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
124 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
125 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
126 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
127 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
128 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
129 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
130 FP0_REGS,FP_REGS, FP_REGS, FP_REGS,
131 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
132 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
133 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
134 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
135 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
136 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
137 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
138 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
139 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
140 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
141 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
142 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
143 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
144 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
145 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
146 TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
147 TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
148 DF_REGS, DF_REGS, DF_REGS, DF_REGS,
149 DF_REGS, DF_REGS, DF_REGS, DF_REGS,
150 NO_REGS, GENERAL_REGS, PR_REGS, T_REGS,
151 MAC_REGS, MAC_REGS, FPUL_REGS, FPSCR_REGS,
152 GENERAL_REGS, GENERAL_REGS,
155 char sh_register_names[FIRST_PSEUDO_REGISTER] \
156 [MAX_REGISTER_NAME_LENGTH + 1] = SH_REGISTER_NAMES_INITIALIZER;
158 char sh_additional_register_names[ADDREGNAMES_SIZE] \
159 [MAX_ADDITIONAL_REGISTER_NAME_LENGTH + 1]
160 = SH_ADDITIONAL_REGISTER_NAMES_INITIALIZER;
162 int assembler_dialect;
164 static bool shmedia_space_reserved_for_target_registers;
166 static bool sh_handle_option (size_t, const char *, int);
167 static void split_branches (rtx);
168 static int branch_dest (rtx);
169 static void force_into (rtx, rtx);
170 static void print_slot (rtx);
171 static rtx add_constant (rtx, enum machine_mode, rtx);
172 static void dump_table (rtx, rtx);
173 static int hi_const (rtx);
174 static int broken_move (rtx);
175 static int mova_p (rtx);
176 static rtx find_barrier (int, rtx, rtx);
177 static int noncall_uses_reg (rtx, rtx, rtx *);
178 static rtx gen_block_redirect (rtx, int, int);
179 static void sh_reorg (void);
180 static void output_stack_adjust (int, rtx, int, HARD_REG_SET *);
181 static rtx frame_insn (rtx);
182 static rtx push (int);
183 static void pop (int);
184 static void push_regs (HARD_REG_SET *, int);
185 static int calc_live_regs (HARD_REG_SET *);
186 static HOST_WIDE_INT rounded_frame_size (int);
187 static rtx mark_constant_pool_use (rtx);
188 static tree sh_handle_interrupt_handler_attribute (tree *, tree, tree, int, bool *);
189 static tree sh_handle_resbank_handler_attribute (tree *, tree,
191 static tree sh2a_handle_function_vector_handler_attribute (tree *, tree,
193 static tree sh_handle_sp_switch_attribute (tree *, tree, tree, int, bool *);
194 static tree sh_handle_trap_exit_attribute (tree *, tree, tree, int, bool *);
195 static tree sh_handle_renesas_attribute (tree *, tree, tree, int, bool *);
196 static void sh_output_function_epilogue (FILE *, HOST_WIDE_INT);
197 static void sh_insert_attributes (tree, tree *);
198 static const char *sh_check_pch_target_flags (int);
199 static int sh_adjust_cost (rtx, rtx, rtx, int);
200 static int sh_issue_rate (void);
201 static int sh_dfa_new_cycle (FILE *, int, rtx, int, int, int *sort_p);
202 static short find_set_regmode_weight (rtx, enum machine_mode);
203 static short find_insn_regmode_weight (rtx, enum machine_mode);
204 static void find_regmode_weight (basic_block, enum machine_mode);
205 static int find_r0_life_regions (basic_block);
206 static void sh_md_init_global (FILE *, int, int);
207 static void sh_md_finish_global (FILE *, int);
208 static int rank_for_reorder (const void *, const void *);
209 static void swap_reorder (rtx *, int);
210 static void ready_reorder (rtx *, int);
211 static short high_pressure (enum machine_mode);
212 static int sh_reorder (FILE *, int, rtx *, int *, int);
213 static int sh_reorder2 (FILE *, int, rtx *, int *, int);
214 static void sh_md_init (FILE *, int, int);
215 static int sh_variable_issue (FILE *, int, rtx, int);
217 static bool sh_function_ok_for_sibcall (tree, tree);
219 static bool sh_cannot_modify_jumps_p (void);
220 static enum reg_class sh_target_reg_class (void);
221 static bool sh_optimize_target_register_callee_saved (bool);
222 static bool sh_ms_bitfield_layout_p (const_tree);
224 static void sh_init_builtins (void);
225 static void sh_media_init_builtins (void);
226 static rtx sh_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
227 static void sh_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
228 static void sh_file_start (void);
229 static int flow_dependent_p (rtx, rtx);
230 static void flow_dependent_p_1 (rtx, const_rtx, void *);
231 static int shiftcosts (rtx);
232 static int andcosts (rtx);
233 static int addsubcosts (rtx);
234 static int multcosts (rtx);
235 static bool unspec_caller_rtx_p (rtx);
236 static bool sh_cannot_copy_insn_p (rtx);
237 static bool sh_rtx_costs (rtx, int, int, int *, bool);
238 static int sh_address_cost (rtx, bool);
239 static int sh_pr_n_sets (void);
240 static rtx sh_allocate_initial_value (rtx);
241 static bool sh_legitimate_address_p (enum machine_mode, rtx, bool);
242 static rtx sh_legitimize_address (rtx, rtx, enum machine_mode);
243 static int shmedia_target_regs_stack_space (HARD_REG_SET *);
244 static int shmedia_reserve_space_for_target_registers_p (int, HARD_REG_SET *);
245 static int shmedia_target_regs_stack_adjust (HARD_REG_SET *);
246 static int scavenge_reg (HARD_REG_SET *s);
247 struct save_schedule_s;
248 static struct save_entry_s *sh5_schedule_saves (HARD_REG_SET *,
249 struct save_schedule_s *, int);
251 static rtx sh_struct_value_rtx (tree, int);
252 static bool sh_return_in_memory (const_tree, const_tree);
253 static rtx sh_builtin_saveregs (void);
254 static void sh_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
255 static bool sh_strict_argument_naming (CUMULATIVE_ARGS *);
256 static bool sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *);
257 static tree sh_build_builtin_va_list (void);
258 static void sh_va_start (tree, rtx);
259 static tree sh_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
260 static enum machine_mode sh_promote_function_mode (const_tree type,
265 static bool sh_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
267 static bool sh_callee_copies (CUMULATIVE_ARGS *, enum machine_mode,
269 static int sh_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
271 static bool sh_scalar_mode_supported_p (enum machine_mode);
272 static int sh_dwarf_calling_convention (const_tree);
273 static void sh_encode_section_info (tree, rtx, int);
274 static int sh2a_function_vector_p (tree);
276 static const struct attribute_spec sh_attribute_table[] =
278 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
279 { "interrupt_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
280 { "sp_switch", 1, 1, true, false, false, sh_handle_sp_switch_attribute },
281 { "trap_exit", 1, 1, true, false, false, sh_handle_trap_exit_attribute },
282 { "renesas", 0, 0, false, true, false, sh_handle_renesas_attribute },
283 { "trapa_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
284 { "nosave_low_regs", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
285 { "resbank", 0, 0, true, false, false, sh_handle_resbank_handler_attribute },
286 { "function_vector", 1, 1, true, false, false, sh2a_handle_function_vector_handler_attribute },
288 /* Symbian support adds three new attributes:
289 dllexport - for exporting a function/variable that will live in a dll
290 dllimport - for importing a function/variable from a dll
292 Microsoft allows multiple declspecs in one __declspec, separating
293 them with spaces. We do NOT support this. Instead, use __declspec
295 { "dllimport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
296 { "dllexport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
298 { NULL, 0, 0, false, false, false, NULL }
301 /* Initialize the GCC target structure. */
302 #undef TARGET_ATTRIBUTE_TABLE
303 #define TARGET_ATTRIBUTE_TABLE sh_attribute_table
305 /* The next two are used for debug info when compiling with -gdwarf. */
306 #undef TARGET_ASM_UNALIGNED_HI_OP
307 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uaword\t"
308 #undef TARGET_ASM_UNALIGNED_SI_OP
309 #define TARGET_ASM_UNALIGNED_SI_OP "\t.ualong\t"
311 /* These are NULLed out on non-SH5 in OVERRIDE_OPTIONS. */
312 #undef TARGET_ASM_UNALIGNED_DI_OP
313 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaquad\t"
314 #undef TARGET_ASM_ALIGNED_DI_OP
315 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
317 #undef TARGET_ASM_FUNCTION_EPILOGUE
318 #define TARGET_ASM_FUNCTION_EPILOGUE sh_output_function_epilogue
320 #undef TARGET_ASM_OUTPUT_MI_THUNK
321 #define TARGET_ASM_OUTPUT_MI_THUNK sh_output_mi_thunk
323 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
324 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
326 #undef TARGET_ASM_FILE_START
327 #define TARGET_ASM_FILE_START sh_file_start
328 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
329 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
331 #undef TARGET_DEFAULT_TARGET_FLAGS
332 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
333 #undef TARGET_HANDLE_OPTION
334 #define TARGET_HANDLE_OPTION sh_handle_option
336 #undef TARGET_INSERT_ATTRIBUTES
337 #define TARGET_INSERT_ATTRIBUTES sh_insert_attributes
339 #undef TARGET_SCHED_ADJUST_COST
340 #define TARGET_SCHED_ADJUST_COST sh_adjust_cost
342 #undef TARGET_SCHED_ISSUE_RATE
343 #define TARGET_SCHED_ISSUE_RATE sh_issue_rate
345 /* The next 5 hooks have been implemented for reenabling sched1. With the
346 help of these macros we are limiting the movement of insns in sched1 to
347 reduce the register pressure. The overall idea is to keep count of SImode
348 and SFmode regs required by already scheduled insns. When these counts
349 cross some threshold values; give priority to insns that free registers.
350 The insn that frees registers is most likely to be the insn with lowest
351 LUID (original insn order); but such an insn might be there in the stalled
352 queue (Q) instead of the ready queue (R). To solve this, we skip cycles
353 upto a max of 8 cycles so that such insns may move from Q -> R.
355 The description of the hooks are as below:
357 TARGET_SCHED_INIT_GLOBAL: Added a new target hook in the generic
358 scheduler; it is called inside the sched_init function just after
359 find_insn_reg_weights function call. It is used to calculate the SImode
360 and SFmode weights of insns of basic blocks; much similar to what
361 find_insn_reg_weights does.
362 TARGET_SCHED_FINISH_GLOBAL: Corresponding cleanup hook.
364 TARGET_SCHED_DFA_NEW_CYCLE: Skip cycles if high register pressure is
365 indicated by TARGET_SCHED_REORDER2; doing this may move insns from
368 TARGET_SCHED_REORDER: If the register pressure for SImode or SFmode is
369 high; reorder the ready queue so that the insn with lowest LUID will be
372 TARGET_SCHED_REORDER2: If the register pressure is high, indicate to
373 TARGET_SCHED_DFA_NEW_CYCLE to skip cycles.
375 TARGET_SCHED_VARIABLE_ISSUE: Cache the value of can_issue_more so that it
376 can be returned from TARGET_SCHED_REORDER2.
378 TARGET_SCHED_INIT: Reset the register pressure counting variables. */
380 #undef TARGET_SCHED_DFA_NEW_CYCLE
381 #define TARGET_SCHED_DFA_NEW_CYCLE sh_dfa_new_cycle
383 #undef TARGET_SCHED_INIT_GLOBAL
384 #define TARGET_SCHED_INIT_GLOBAL sh_md_init_global
386 #undef TARGET_SCHED_FINISH_GLOBAL
387 #define TARGET_SCHED_FINISH_GLOBAL sh_md_finish_global
389 #undef TARGET_SCHED_VARIABLE_ISSUE
390 #define TARGET_SCHED_VARIABLE_ISSUE sh_variable_issue
392 #undef TARGET_SCHED_REORDER
393 #define TARGET_SCHED_REORDER sh_reorder
395 #undef TARGET_SCHED_REORDER2
396 #define TARGET_SCHED_REORDER2 sh_reorder2
398 #undef TARGET_SCHED_INIT
399 #define TARGET_SCHED_INIT sh_md_init
401 #undef TARGET_LEGITIMIZE_ADDRESS
402 #define TARGET_LEGITIMIZE_ADDRESS sh_legitimize_address
404 #undef TARGET_CANNOT_MODIFY_JUMPS_P
405 #define TARGET_CANNOT_MODIFY_JUMPS_P sh_cannot_modify_jumps_p
406 #undef TARGET_BRANCH_TARGET_REGISTER_CLASS
407 #define TARGET_BRANCH_TARGET_REGISTER_CLASS sh_target_reg_class
408 #undef TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED
409 #define TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED \
410 sh_optimize_target_register_callee_saved
412 #undef TARGET_MS_BITFIELD_LAYOUT_P
413 #define TARGET_MS_BITFIELD_LAYOUT_P sh_ms_bitfield_layout_p
415 #undef TARGET_INIT_BUILTINS
416 #define TARGET_INIT_BUILTINS sh_init_builtins
417 #undef TARGET_EXPAND_BUILTIN
418 #define TARGET_EXPAND_BUILTIN sh_expand_builtin
420 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
421 #define TARGET_FUNCTION_OK_FOR_SIBCALL sh_function_ok_for_sibcall
423 #undef TARGET_CANNOT_COPY_INSN_P
424 #define TARGET_CANNOT_COPY_INSN_P sh_cannot_copy_insn_p
425 #undef TARGET_RTX_COSTS
426 #define TARGET_RTX_COSTS sh_rtx_costs
427 #undef TARGET_ADDRESS_COST
428 #define TARGET_ADDRESS_COST sh_address_cost
429 #undef TARGET_ALLOCATE_INITIAL_VALUE
430 #define TARGET_ALLOCATE_INITIAL_VALUE sh_allocate_initial_value
432 #undef TARGET_MACHINE_DEPENDENT_REORG
433 #define TARGET_MACHINE_DEPENDENT_REORG sh_reorg
435 #undef TARGET_DWARF_REGISTER_SPAN
436 #define TARGET_DWARF_REGISTER_SPAN sh_dwarf_register_span
439 #undef TARGET_HAVE_TLS
440 #define TARGET_HAVE_TLS true
443 #undef TARGET_PROMOTE_PROTOTYPES
444 #define TARGET_PROMOTE_PROTOTYPES sh_promote_prototypes
445 #undef TARGET_PROMOTE_FUNCTION_MODE
446 #define TARGET_PROMOTE_FUNCTION_MODE sh_promote_function_mode
448 #undef TARGET_STRUCT_VALUE_RTX
449 #define TARGET_STRUCT_VALUE_RTX sh_struct_value_rtx
450 #undef TARGET_RETURN_IN_MEMORY
451 #define TARGET_RETURN_IN_MEMORY sh_return_in_memory
453 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
454 #define TARGET_EXPAND_BUILTIN_SAVEREGS sh_builtin_saveregs
455 #undef TARGET_SETUP_INCOMING_VARARGS
456 #define TARGET_SETUP_INCOMING_VARARGS sh_setup_incoming_varargs
457 #undef TARGET_STRICT_ARGUMENT_NAMING
458 #define TARGET_STRICT_ARGUMENT_NAMING sh_strict_argument_naming
459 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
460 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED sh_pretend_outgoing_varargs_named
461 #undef TARGET_MUST_PASS_IN_STACK
462 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
463 #undef TARGET_PASS_BY_REFERENCE
464 #define TARGET_PASS_BY_REFERENCE sh_pass_by_reference
465 #undef TARGET_CALLEE_COPIES
466 #define TARGET_CALLEE_COPIES sh_callee_copies
467 #undef TARGET_ARG_PARTIAL_BYTES
468 #define TARGET_ARG_PARTIAL_BYTES sh_arg_partial_bytes
470 #undef TARGET_BUILD_BUILTIN_VA_LIST
471 #define TARGET_BUILD_BUILTIN_VA_LIST sh_build_builtin_va_list
472 #undef TARGET_EXPAND_BUILTIN_VA_START
473 #define TARGET_EXPAND_BUILTIN_VA_START sh_va_start
474 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
475 #define TARGET_GIMPLIFY_VA_ARG_EXPR sh_gimplify_va_arg_expr
477 #undef TARGET_SCALAR_MODE_SUPPORTED_P
478 #define TARGET_SCALAR_MODE_SUPPORTED_P sh_scalar_mode_supported_p
479 #undef TARGET_VECTOR_MODE_SUPPORTED_P
480 #define TARGET_VECTOR_MODE_SUPPORTED_P sh_vector_mode_supported_p
482 #undef TARGET_CHECK_PCH_TARGET_FLAGS
483 #define TARGET_CHECK_PCH_TARGET_FLAGS sh_check_pch_target_flags
485 #undef TARGET_DWARF_CALLING_CONVENTION
486 #define TARGET_DWARF_CALLING_CONVENTION sh_dwarf_calling_convention
488 /* Return regmode weight for insn. */
489 #define INSN_REGMODE_WEIGHT(INSN, MODE) regmode_weight[((MODE) == SImode) ? 0 : 1][INSN_UID (INSN)]
491 /* Return current register pressure for regmode. */
492 #define CURR_REGMODE_PRESSURE(MODE) curr_regmode_pressure[((MODE) == SImode) ? 0 : 1]
494 #undef TARGET_ENCODE_SECTION_INFO
495 #define TARGET_ENCODE_SECTION_INFO sh_encode_section_info
499 #undef TARGET_ENCODE_SECTION_INFO
500 #define TARGET_ENCODE_SECTION_INFO sh_symbian_encode_section_info
501 #undef TARGET_STRIP_NAME_ENCODING
502 #define TARGET_STRIP_NAME_ENCODING sh_symbian_strip_name_encoding
503 #undef TARGET_CXX_IMPORT_EXPORT_CLASS
504 #define TARGET_CXX_IMPORT_EXPORT_CLASS symbian_import_export_class
508 #undef TARGET_SECONDARY_RELOAD
509 #define TARGET_SECONDARY_RELOAD sh_secondary_reload
511 #undef TARGET_LEGITIMATE_ADDRESS_P
512 #define TARGET_LEGITIMATE_ADDRESS_P sh_legitimate_address_p
514 /* Machine-specific symbol_ref flags. */
515 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
517 struct gcc_target targetm = TARGET_INITIALIZER;
519 /* Implement TARGET_HANDLE_OPTION. */
522 sh_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED,
523 int value ATTRIBUTE_UNUSED)
528 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH1;
532 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2;
536 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A;
540 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_NOFPU;
544 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_SINGLE;
547 case OPT_m2a_single_only:
548 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_SINGLE_ONLY;
552 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2E;
556 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH3;
560 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH3E;
567 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4;
571 case OPT_m4_100_nofpu:
572 case OPT_m4_200_nofpu:
573 case OPT_m4_300_nofpu:
577 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_NOFPU;
581 case OPT_m4_100_single:
582 case OPT_m4_200_single:
583 case OPT_m4_300_single:
584 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_SINGLE;
587 case OPT_m4_single_only:
588 case OPT_m4_100_single_only:
589 case OPT_m4_200_single_only:
590 case OPT_m4_300_single_only:
591 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_SINGLE_ONLY;
595 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A;
600 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_NOFPU;
604 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_SINGLE;
607 case OPT_m4a_single_only:
608 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_SINGLE_ONLY;
612 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_32MEDIA;
615 case OPT_m5_32media_nofpu:
616 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_32MEDIA_NOFPU;
620 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_64MEDIA;
623 case OPT_m5_64media_nofpu:
624 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_64MEDIA_NOFPU;
628 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_COMPACT;
631 case OPT_m5_compact_nofpu:
632 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_COMPACT_NOFPU;
640 /* Set default optimization options. */
642 sh_optimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED)
646 flag_omit_frame_pointer = 2;
648 sh_div_str = "inv:minlat";
652 target_flags |= MASK_SMALLCODE;
653 sh_div_str = SH_DIV_STR_FOR_SIZE ;
656 TARGET_CBRANCHDI4 = 1;
657 /* We can't meaningfully test TARGET_SHMEDIA here, because -m options
658 haven't been parsed yet, hence we'd read only the default.
659 sh_target_reg_class will return NO_REGS if this is not SHMEDIA, so
660 it's OK to always set flag_branch_target_load_optimize. */
663 flag_branch_target_load_optimize = 1;
665 target_flags |= MASK_SAVE_ALL_TARGET_REGS;
667 /* Likewise, we can't meaningfully test TARGET_SH2E / TARGET_IEEE
668 here, so leave it to OVERRIDE_OPTIONS to set
669 flag_finite_math_only. We set it to 2 here so we know if the user
670 explicitly requested this to be on or off. */
671 flag_finite_math_only = 2;
672 /* If flag_schedule_insns is 1, we set it to 2 here so we know if
673 the user explicitly requested this to be on or off. */
674 if (flag_schedule_insns > 0)
675 flag_schedule_insns = 2;
677 set_param_value ("simultaneous-prefetches", 2);
680 /* Implement OVERRIDE_OPTIONS macro. Validate and override various
681 options, and do some machine dependent initialization. */
683 sh_override_options (void)
687 SUBTARGET_OVERRIDE_OPTIONS;
688 if (flag_finite_math_only == 2)
689 flag_finite_math_only
690 = !flag_signaling_nans && TARGET_SH2E && ! TARGET_IEEE;
691 if (TARGET_SH2E && !flag_finite_math_only)
692 target_flags |= MASK_IEEE;
693 sh_cpu = PROCESSOR_SH1;
694 assembler_dialect = 0;
696 sh_cpu = PROCESSOR_SH2;
698 sh_cpu = PROCESSOR_SH2E;
700 sh_cpu = PROCESSOR_SH2A;
702 sh_cpu = PROCESSOR_SH3;
704 sh_cpu = PROCESSOR_SH3E;
707 assembler_dialect = 1;
708 sh_cpu = PROCESSOR_SH4;
710 if (TARGET_SH4A_ARCH)
712 assembler_dialect = 1;
713 sh_cpu = PROCESSOR_SH4A;
717 sh_cpu = PROCESSOR_SH5;
718 target_flags |= MASK_ALIGN_DOUBLE;
719 if (TARGET_SHMEDIA_FPU)
720 target_flags |= MASK_FMOVD;
723 /* There are no delay slots on SHmedia. */
724 flag_delayed_branch = 0;
725 /* Relaxation isn't yet supported for SHmedia */
726 target_flags &= ~MASK_RELAX;
727 /* After reload, if conversion does little good but can cause
729 - find_if_block doesn't do anything for SH because we don't
730 have conditional execution patterns. (We use conditional
731 move patterns, which are handled differently, and only
733 - find_cond_trap doesn't do anything for the SH because we
734 don't have conditional traps.
735 - find_if_case_1 uses redirect_edge_and_branch_force in
736 the only path that does an optimization, and this causes
737 an ICE when branch targets are in registers.
738 - find_if_case_2 doesn't do anything for the SHmedia after
739 reload except when it can redirect a tablejump - and
740 that's rather rare. */
741 flag_if_conversion2 = 0;
742 if (! strcmp (sh_div_str, "call"))
743 sh_div_strategy = SH_DIV_CALL;
744 else if (! strcmp (sh_div_str, "call2"))
745 sh_div_strategy = SH_DIV_CALL2;
746 if (! strcmp (sh_div_str, "fp") && TARGET_FPU_ANY)
747 sh_div_strategy = SH_DIV_FP;
748 else if (! strcmp (sh_div_str, "inv"))
749 sh_div_strategy = SH_DIV_INV;
750 else if (! strcmp (sh_div_str, "inv:minlat"))
751 sh_div_strategy = SH_DIV_INV_MINLAT;
752 else if (! strcmp (sh_div_str, "inv20u"))
753 sh_div_strategy = SH_DIV_INV20U;
754 else if (! strcmp (sh_div_str, "inv20l"))
755 sh_div_strategy = SH_DIV_INV20L;
756 else if (! strcmp (sh_div_str, "inv:call2"))
757 sh_div_strategy = SH_DIV_INV_CALL2;
758 else if (! strcmp (sh_div_str, "inv:call"))
759 sh_div_strategy = SH_DIV_INV_CALL;
760 else if (! strcmp (sh_div_str, "inv:fp"))
763 sh_div_strategy = SH_DIV_INV_FP;
765 sh_div_strategy = SH_DIV_INV;
767 TARGET_CBRANCHDI4 = 0;
768 /* Assembler CFI isn't yet fully supported for SHmedia. */
769 flag_dwarf2_cfi_asm = 0;
774 /* Only the sh64-elf assembler fully supports .quad properly. */
775 targetm.asm_out.aligned_op.di = NULL;
776 targetm.asm_out.unaligned_op.di = NULL;
780 if (! strcmp (sh_div_str, "call-div1"))
781 sh_div_strategy = SH_DIV_CALL_DIV1;
782 else if (! strcmp (sh_div_str, "call-fp")
783 && (TARGET_FPU_DOUBLE
784 || (TARGET_HARD_SH4 && TARGET_SH2E)
785 || (TARGET_SHCOMPACT && TARGET_FPU_ANY)))
786 sh_div_strategy = SH_DIV_CALL_FP;
787 else if (! strcmp (sh_div_str, "call-table") && TARGET_SH2)
788 sh_div_strategy = SH_DIV_CALL_TABLE;
790 /* Pick one that makes most sense for the target in general.
791 It is not much good to use different functions depending
792 on -Os, since then we'll end up with two different functions
793 when some of the code is compiled for size, and some for
796 /* SH4 tends to emphasize speed. */
798 sh_div_strategy = SH_DIV_CALL_TABLE;
799 /* These have their own way of doing things. */
800 else if (TARGET_SH2A)
801 sh_div_strategy = SH_DIV_INTRINSIC;
802 /* ??? Should we use the integer SHmedia function instead? */
803 else if (TARGET_SHCOMPACT && TARGET_FPU_ANY)
804 sh_div_strategy = SH_DIV_CALL_FP;
805 /* SH1 .. SH3 cores often go into small-footprint systems, so
806 default to the smallest implementation available. */
807 else if (TARGET_SH2) /* ??? EXPERIMENTAL */
808 sh_div_strategy = SH_DIV_CALL_TABLE;
810 sh_div_strategy = SH_DIV_CALL_DIV1;
813 TARGET_PRETEND_CMOVE = 0;
814 if (sh_divsi3_libfunc[0])
815 ; /* User supplied - leave it alone. */
816 else if (TARGET_DIVIDE_CALL_FP)
817 sh_divsi3_libfunc = "__sdivsi3_i4";
818 else if (TARGET_DIVIDE_CALL_TABLE)
819 sh_divsi3_libfunc = "__sdivsi3_i4i";
821 sh_divsi3_libfunc = "__sdivsi3_1";
823 sh_divsi3_libfunc = "__sdivsi3";
824 if (sh_branch_cost == -1)
826 = TARGET_SH5 ? 1 : ! TARGET_SH2 || TARGET_HARD_SH4 ? 2 : 1;
828 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
829 if (! VALID_REGISTER_P (regno))
830 sh_register_names[regno][0] = '\0';
832 for (regno = 0; regno < ADDREGNAMES_SIZE; regno++)
833 if (! VALID_REGISTER_P (ADDREGNAMES_REGNO (regno)))
834 sh_additional_register_names[regno][0] = '\0';
836 if (flag_omit_frame_pointer == 2)
838 /* The debugging information is sufficient,
839 but gdb doesn't implement this yet */
841 flag_omit_frame_pointer
842 = (PREFERRED_DEBUGGING_TYPE == DWARF2_DEBUG);
844 flag_omit_frame_pointer = 0;
847 if ((flag_pic && ! TARGET_PREFERGOT)
848 || (TARGET_SHMEDIA && !TARGET_PT_FIXED))
849 flag_no_function_cse = 1;
851 if (SMALL_REGISTER_CLASSES)
853 /* Never run scheduling before reload, since that can
854 break global alloc, and generates slower code anyway due
855 to the pressure on R0. */
856 /* Enable sched1 for SH4 if the user explicitly requests.
857 When sched1 is enabled, the ready queue will be reordered by
858 the target hooks if pressure is high. We can not do this for
859 PIC, SH3 and lower as they give spill failures for R0. */
860 if (!TARGET_HARD_SH4 || flag_pic)
861 flag_schedule_insns = 0;
862 /* ??? Current exception handling places basic block boundaries
863 after call_insns. It causes the high pressure on R0 and gives
864 spill failures for R0 in reload. See PR 22553 and the thread
866 <http://gcc.gnu.org/ml/gcc-patches/2005-10/msg00816.html>. */
867 else if (flag_exceptions)
869 if (flag_schedule_insns == 1)
870 warning (0, "ignoring -fschedule-insns because of exception handling bug");
871 flag_schedule_insns = 0;
873 else if (flag_schedule_insns == 2)
874 flag_schedule_insns = 0;
877 if (align_loops == 0)
878 align_loops = 1 << (TARGET_SH5 ? 3 : 2);
879 if (align_jumps == 0)
880 align_jumps = 1 << CACHE_LOG;
881 else if (align_jumps < (TARGET_SHMEDIA ? 4 : 2))
882 align_jumps = TARGET_SHMEDIA ? 4 : 2;
884 /* Allocation boundary (in *bytes*) for the code of a function.
885 SH1: 32 bit alignment is faster, because instructions are always
886 fetched as a pair from a longword boundary.
887 SH2 .. SH5 : align to cache line start. */
888 if (align_functions == 0)
890 = TARGET_SMALLCODE ? FUNCTION_BOUNDARY/8 : (1 << CACHE_LOG);
891 /* The linker relaxation code breaks when a function contains
892 alignments that are larger than that at the start of a
897 = align_loops > align_jumps ? align_loops : align_jumps;
899 /* Also take possible .long constants / mova tables int account. */
902 if (align_functions < min_align)
903 align_functions = min_align;
906 if (sh_fixed_range_str)
907 sh_fix_range (sh_fixed_range_str);
910 /* Print the operand address in x to the stream. */
913 print_operand_address (FILE *stream, rtx x)
915 switch (GET_CODE (x))
919 fprintf (stream, "@%s", reg_names[true_regnum (x)]);
924 rtx base = XEXP (x, 0);
925 rtx index = XEXP (x, 1);
927 switch (GET_CODE (index))
930 fprintf (stream, "@(%d,%s)", (int) INTVAL (index),
931 reg_names[true_regnum (base)]);
937 int base_num = true_regnum (base);
938 int index_num = true_regnum (index);
940 fprintf (stream, "@(r0,%s)",
941 reg_names[MAX (base_num, index_num)]);
952 fprintf (stream, "@-%s", reg_names[true_regnum (XEXP (x, 0))]);
956 fprintf (stream, "@%s+", reg_names[true_regnum (XEXP (x, 0))]);
960 x = mark_constant_pool_use (x);
961 output_addr_const (stream, x);
966 /* Print operand x (an rtx) in assembler syntax to file stream
967 according to modifier code.
969 '.' print a .s if insn needs delay slot
970 ',' print LOCAL_LABEL_PREFIX
971 '@' print trap, rte or rts depending upon pragma interruptness
972 '#' output a nop if there is nothing to put in the delay slot
973 ''' print likelihood suffix (/u for unlikely).
974 '>' print branch target if -fverbose-asm
975 'O' print a constant without the #
976 'R' print the LSW of a dp value - changes if in little endian
977 'S' print the MSW of a dp value - changes if in little endian
978 'T' print the next word of a dp value - same as 'R' in big endian mode.
979 'M' SHMEDIA: print an `x' if `m' will print `base,index'.
980 otherwise: print .b / .w / .l / .s / .d suffix if operand is a MEM.
981 'N' print 'r63' if the operand is (const_int 0).
982 'd' print a V2SF reg as dN instead of fpN.
983 'm' print a pair `base,offset' or `base,index', for LD and ST.
984 'U' Likewise for {LD,ST}{HI,LO}.
985 'V' print the position of a single bit set.
986 'W' print the position of a single bit cleared.
987 't' print a memory address which is a register.
988 'u' prints the lowest 16 bits of CONST_INT, as an unsigned value.
989 'o' output an operator. */
992 print_operand (FILE *stream, rtx x, int code)
995 enum machine_mode mode;
1003 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
1004 && get_attr_length (XVECEXP (final_sequence, 0, 1)))
1005 fprintf (stream, ASSEMBLER_DIALECT ? "/s" : ".s");
1008 fprintf (stream, "%s", LOCAL_LABEL_PREFIX);
1011 trapa_attr = lookup_attribute ("trap_exit",
1012 DECL_ATTRIBUTES (current_function_decl));
1014 fprintf (stream, "trapa #%ld",
1015 (long) TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (trapa_attr))));
1016 else if (sh_cfun_interrupt_handler_p ())
1018 if (sh_cfun_resbank_handler_p ())
1019 fprintf (stream, "resbank\n");
1020 fprintf (stream, "rte");
1023 fprintf (stream, "rts");
1026 /* Output a nop if there's nothing in the delay slot. */
1027 if (dbr_sequence_length () == 0)
1028 fprintf (stream, "\n\tnop");
1032 rtx note = find_reg_note (current_output_insn, REG_BR_PROB, 0);
1034 if (note && INTVAL (XEXP (note, 0)) * 2 < REG_BR_PROB_BASE)
1035 fputs ("/u", stream);
1039 if (flag_verbose_asm && JUMP_LABEL (current_output_insn))
1041 fputs ("\t! target: ", stream);
1042 output_addr_const (stream, JUMP_LABEL (current_output_insn));
1046 x = mark_constant_pool_use (x);
1047 output_addr_const (stream, x);
1049 /* N.B.: %R / %S / %T adjust memory addresses by four.
1050 For SHMEDIA, that means they can be used to access the first and
1051 second 32 bit part of a 64 bit (or larger) value that
1052 might be held in floating point registers or memory.
1053 While they can be used to access 64 bit parts of a larger value
1054 held in general purpose registers, that won't work with memory -
1055 neither for fp registers, since the frxx names are used. */
1057 if (REG_P (x) || GET_CODE (x) == SUBREG)
1059 regno = true_regnum (x);
1060 regno += FP_REGISTER_P (regno) ? 1 : LSW;
1061 fputs (reg_names[regno], (stream));
1065 x = adjust_address (x, SImode, 4 * LSW);
1066 print_operand_address (stream, XEXP (x, 0));
1072 mode = GET_MODE (x);
1073 if (mode == VOIDmode)
1075 if (GET_MODE_SIZE (mode) >= 8)
1076 sub = simplify_subreg (SImode, x, mode, 4 * LSW);
1078 print_operand (stream, sub, 0);
1080 output_operand_lossage ("invalid operand to %%R");
1084 if (REG_P (x) || GET_CODE (x) == SUBREG)
1086 regno = true_regnum (x);
1087 regno += FP_REGISTER_P (regno) ? 0 : MSW;
1088 fputs (reg_names[regno], (stream));
1092 x = adjust_address (x, SImode, 4 * MSW);
1093 print_operand_address (stream, XEXP (x, 0));
1099 mode = GET_MODE (x);
1100 if (mode == VOIDmode)
1102 if (GET_MODE_SIZE (mode) >= 8)
1103 sub = simplify_subreg (SImode, x, mode, 4 * MSW);
1105 print_operand (stream, sub, 0);
1107 output_operand_lossage ("invalid operand to %%S");
1111 /* Next word of a double. */
1112 switch (GET_CODE (x))
1115 fputs (reg_names[REGNO (x) + 1], (stream));
1118 if (GET_CODE (XEXP (x, 0)) != PRE_DEC
1119 && GET_CODE (XEXP (x, 0)) != POST_INC)
1120 x = adjust_address (x, SImode, 4);
1121 print_operand_address (stream, XEXP (x, 0));
1129 gcc_assert (MEM_P (x));
1131 switch (GET_CODE (x))
1135 print_operand (stream, x, 0);
1143 switch (GET_CODE (x))
1145 case PLUS: fputs ("add", stream); break;
1146 case MINUS: fputs ("sub", stream); break;
1147 case MULT: fputs ("mul", stream); break;
1148 case DIV: fputs ("div", stream); break;
1149 case EQ: fputs ("eq", stream); break;
1150 case NE: fputs ("ne", stream); break;
1151 case GT: case LT: fputs ("gt", stream); break;
1152 case GE: case LE: fputs ("ge", stream); break;
1153 case GTU: case LTU: fputs ("gtu", stream); break;
1154 case GEU: case LEU: fputs ("geu", stream); break;
1163 && GET_CODE (XEXP (x, 0)) == PLUS
1164 && (REG_P (XEXP (XEXP (x, 0), 1))
1165 || GET_CODE (XEXP (XEXP (x, 0), 1)) == SUBREG))
1166 fputc ('x', stream);
1172 switch (GET_MODE (x))
1174 case QImode: fputs (".b", stream); break;
1175 case HImode: fputs (".w", stream); break;
1176 case SImode: fputs (".l", stream); break;
1177 case SFmode: fputs (".s", stream); break;
1178 case DFmode: fputs (".d", stream); break;
1179 default: gcc_unreachable ();
1186 gcc_assert (MEM_P (x));
1190 switch (GET_CODE (x))
1194 print_operand (stream, x, 0);
1195 fputs (", 0", stream);
1199 print_operand (stream, XEXP (x, 0), 0);
1200 fputs (", ", stream);
1201 print_operand (stream, XEXP (x, 1), 0);
1211 int num = exact_log2 (INTVAL (x));
1212 gcc_assert (num >= 0);
1213 fprintf (stream, "#%d", num);
1219 int num = exact_log2 (~INTVAL (x));
1220 gcc_assert (num >= 0);
1221 fprintf (stream, "#%d", num);
1226 gcc_assert (REG_P (x) && GET_MODE (x) == V2SFmode);
1228 fprintf ((stream), "d%s", reg_names[REGNO (x)] + 1);
1232 if (x == CONST0_RTX (GET_MODE (x)))
1234 fprintf ((stream), "r63");
1237 goto default_output;
1239 if (CONST_INT_P (x))
1241 fprintf ((stream), "%u", (unsigned) INTVAL (x) & (0x10000 - 1));
1249 mode = GET_MODE (x);
1251 switch (GET_CODE (x))
1255 rtx inner = XEXP (x, 0);
1257 enum machine_mode inner_mode;
1259 /* We might see SUBREGs with vector mode registers inside. */
1260 if (GET_CODE (inner) == SUBREG
1261 && (GET_MODE_SIZE (GET_MODE (inner))
1262 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1263 && subreg_lowpart_p (inner))
1264 inner = SUBREG_REG (inner);
1265 if (CONST_INT_P (inner))
1267 x = GEN_INT (trunc_int_for_mode (INTVAL (inner), GET_MODE (x)));
1268 goto default_output;
1270 inner_mode = GET_MODE (inner);
1271 if (GET_CODE (inner) == SUBREG
1272 && (GET_MODE_SIZE (GET_MODE (inner))
1273 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1274 && REG_P (SUBREG_REG (inner)))
1276 offset = subreg_regno_offset (REGNO (SUBREG_REG (inner)),
1277 GET_MODE (SUBREG_REG (inner)),
1278 SUBREG_BYTE (inner),
1280 inner = SUBREG_REG (inner);
1282 if (!REG_P (inner) || GET_MODE_SIZE (inner_mode) > 8)
1284 /* Floating point register pairs are always big endian;
1285 general purpose registers are 64 bit wide. */
1286 regno = REGNO (inner);
1287 regno = (HARD_REGNO_NREGS (regno, inner_mode)
1288 - HARD_REGNO_NREGS (regno, mode))
1296 /* FIXME: We need this on SHmedia32 because reload generates
1297 some sign-extended HI or QI loads into DImode registers
1298 but, because Pmode is SImode, the address ends up with a
1299 subreg:SI of the DImode register. Maybe reload should be
1300 fixed so as to apply alter_subreg to such loads? */
1302 gcc_assert (trapping_target_operand (x, VOIDmode));
1303 x = XEXP (XEXP (x, 2), 0);
1304 goto default_output;
1306 gcc_assert (SUBREG_BYTE (x) == 0
1307 && REG_P (SUBREG_REG (x)));
1315 if (FP_REGISTER_P (regno)
1316 && mode == V16SFmode)
1317 fprintf ((stream), "mtrx%s", reg_names[regno] + 2);
1318 else if (FP_REGISTER_P (REGNO (x))
1319 && mode == V4SFmode)
1320 fprintf ((stream), "fv%s", reg_names[regno] + 2);
1322 && mode == V2SFmode)
1323 fprintf ((stream), "fp%s", reg_names[regno] + 2);
1324 else if (FP_REGISTER_P (REGNO (x))
1325 && GET_MODE_SIZE (mode) > 4)
1326 fprintf ((stream), "d%s", reg_names[regno] + 1);
1328 fputs (reg_names[regno], (stream));
1332 output_address (XEXP (x, 0));
1337 fputc ('#', stream);
1338 output_addr_const (stream, x);
1346 /* Encode symbol attributes of a SYMBOL_REF into its
1347 SYMBOL_REF_FLAGS. */
1349 sh_encode_section_info (tree decl, rtx rtl, int first)
1351 default_encode_section_info (decl, rtl, first);
1353 if (TREE_CODE (decl) == FUNCTION_DECL
1354 && sh2a_function_vector_p (decl) && TARGET_SH2A)
1355 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FUNCVEC_FUNCTION;
1358 /* Like force_operand, but guarantees that VALUE ends up in TARGET. */
1360 force_into (rtx value, rtx target)
1362 value = force_operand (value, target);
1363 if (! rtx_equal_p (value, target))
1364 emit_insn (gen_move_insn (target, value));
1367 /* Emit code to perform a block move. Choose the best method.
1369 OPERANDS[0] is the destination.
1370 OPERANDS[1] is the source.
1371 OPERANDS[2] is the size.
1372 OPERANDS[3] is the alignment safe to use. */
1375 expand_block_move (rtx *operands)
1377 int align = INTVAL (operands[3]);
1378 int constp = (CONST_INT_P (operands[2]));
1379 int bytes = (constp ? INTVAL (operands[2]) : 0);
1384 /* If we could use mov.l to move words and dest is word-aligned, we
1385 can use movua.l for loads and still generate a relatively short
1386 and efficient sequence. */
1387 if (TARGET_SH4A_ARCH && align < 4
1388 && MEM_ALIGN (operands[0]) >= 32
1389 && can_move_by_pieces (bytes, 32))
1391 rtx dest = copy_rtx (operands[0]);
1392 rtx src = copy_rtx (operands[1]);
1393 /* We could use different pseudos for each copied word, but
1394 since movua can only load into r0, it's kind of
1396 rtx temp = gen_reg_rtx (SImode);
1397 rtx src_addr = copy_addr_to_reg (XEXP (src, 0));
1400 while (copied + 4 <= bytes)
1402 rtx to = adjust_address (dest, SImode, copied);
1403 rtx from = adjust_automodify_address (src, BLKmode,
1406 set_mem_size (from, GEN_INT (4));
1407 emit_insn (gen_movua (temp, from));
1408 emit_move_insn (src_addr, plus_constant (src_addr, 4));
1409 emit_move_insn (to, temp);
1414 move_by_pieces (adjust_address (dest, BLKmode, copied),
1415 adjust_automodify_address (src, BLKmode,
1417 bytes - copied, align, 0);
1422 /* If it isn't a constant number of bytes, or if it doesn't have 4 byte
1423 alignment, or if it isn't a multiple of 4 bytes, then fail. */
1424 if (align < 4 || (bytes % 4 != 0))
1427 if (TARGET_HARD_SH4)
1431 else if (bytes == 12)
1433 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1434 rtx r4 = gen_rtx_REG (SImode, 4);
1435 rtx r5 = gen_rtx_REG (SImode, 5);
1437 function_symbol (func_addr_rtx, "__movmemSI12_i4", SFUNC_STATIC);
1438 force_into (XEXP (operands[0], 0), r4);
1439 force_into (XEXP (operands[1], 0), r5);
1440 emit_insn (gen_block_move_real_i4 (func_addr_rtx));
1443 else if (! TARGET_SMALLCODE)
1445 const char *entry_name;
1446 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1448 rtx r4 = gen_rtx_REG (SImode, 4);
1449 rtx r5 = gen_rtx_REG (SImode, 5);
1450 rtx r6 = gen_rtx_REG (SImode, 6);
1452 entry_name = (bytes & 4 ? "__movmem_i4_odd" : "__movmem_i4_even");
1453 function_symbol (func_addr_rtx, entry_name, SFUNC_STATIC);
1454 force_into (XEXP (operands[0], 0), r4);
1455 force_into (XEXP (operands[1], 0), r5);
1457 dwords = bytes >> 3;
1458 emit_insn (gen_move_insn (r6, GEN_INT (dwords - 1)));
1459 emit_insn (gen_block_lump_real_i4 (func_addr_rtx));
1468 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1469 rtx r4 = gen_rtx_REG (SImode, 4);
1470 rtx r5 = gen_rtx_REG (SImode, 5);
1472 sprintf (entry, "__movmemSI%d", bytes);
1473 function_symbol (func_addr_rtx, entry, SFUNC_STATIC);
1474 force_into (XEXP (operands[0], 0), r4);
1475 force_into (XEXP (operands[1], 0), r5);
1476 emit_insn (gen_block_move_real (func_addr_rtx));
1480 /* This is the same number of bytes as a memcpy call, but to a different
1481 less common function name, so this will occasionally use more space. */
1482 if (! TARGET_SMALLCODE)
1484 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1485 int final_switch, while_loop;
1486 rtx r4 = gen_rtx_REG (SImode, 4);
1487 rtx r5 = gen_rtx_REG (SImode, 5);
1488 rtx r6 = gen_rtx_REG (SImode, 6);
1490 function_symbol (func_addr_rtx, "__movmem", SFUNC_STATIC);
1491 force_into (XEXP (operands[0], 0), r4);
1492 force_into (XEXP (operands[1], 0), r5);
1494 /* r6 controls the size of the move. 16 is decremented from it
1495 for each 64 bytes moved. Then the negative bit left over is used
1496 as an index into a list of move instructions. e.g., a 72 byte move
1497 would be set up with size(r6) = 14, for one iteration through the
1498 big while loop, and a switch of -2 for the last part. */
1500 final_switch = 16 - ((bytes / 4) % 16);
1501 while_loop = ((bytes / 4) / 16 - 1) * 16;
1502 emit_insn (gen_move_insn (r6, GEN_INT (while_loop + final_switch)));
1503 emit_insn (gen_block_lump_real (func_addr_rtx));
1510 /* Prepare operands for a move define_expand; specifically, one of the
1511 operands must be in a register. */
1514 prepare_move_operands (rtx operands[], enum machine_mode mode)
1516 if ((mode == SImode || mode == DImode)
1518 && ! ((mode == Pmode || mode == ptr_mode)
1519 && tls_symbolic_operand (operands[1], Pmode) != TLS_MODEL_NONE))
1522 if (SYMBOLIC_CONST_P (operands[1]))
1524 if (MEM_P (operands[0]))
1525 operands[1] = force_reg (Pmode, operands[1]);
1526 else if (TARGET_SHMEDIA
1527 && GET_CODE (operands[1]) == LABEL_REF
1528 && target_reg_operand (operands[0], mode))
1532 temp = (!can_create_pseudo_p ()
1534 : gen_reg_rtx (Pmode));
1535 operands[1] = legitimize_pic_address (operands[1], mode, temp);
1538 else if (GET_CODE (operands[1]) == CONST
1539 && GET_CODE (XEXP (operands[1], 0)) == PLUS
1540 && SYMBOLIC_CONST_P (XEXP (XEXP (operands[1], 0), 0)))
1542 temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
1543 temp = legitimize_pic_address (XEXP (XEXP (operands[1], 0), 0),
1545 operands[1] = expand_binop (mode, add_optab, temp,
1546 XEXP (XEXP (operands[1], 0), 1),
1547 (!can_create_pseudo_p ()
1549 : gen_reg_rtx (Pmode)),
1550 0, OPTAB_LIB_WIDEN);
1554 if (! reload_in_progress && ! reload_completed)
1556 /* Copy the source to a register if both operands aren't registers. */
1557 if (! register_operand (operands[0], mode)
1558 && ! sh_register_operand (operands[1], mode))
1559 operands[1] = copy_to_mode_reg (mode, operands[1]);
1561 if (MEM_P (operands[0]) && ! memory_operand (operands[0], mode))
1563 /* This is like change_address_1 (operands[0], mode, 0, 1) ,
1564 except that we can't use that function because it is static. */
1565 rtx new_rtx = change_address (operands[0], mode, 0);
1566 MEM_COPY_ATTRIBUTES (new_rtx, operands[0]);
1567 operands[0] = new_rtx;
1570 /* This case can happen while generating code to move the result
1571 of a library call to the target. Reject `st r0,@(rX,rY)' because
1572 reload will fail to find a spill register for rX, since r0 is already
1573 being used for the source. */
1575 && refers_to_regno_p (R0_REG, R0_REG + 1, operands[1], (rtx *)0)
1576 && MEM_P (operands[0])
1577 && GET_CODE (XEXP (operands[0], 0)) == PLUS
1578 && REG_P (XEXP (XEXP (operands[0], 0), 1)))
1579 operands[1] = copy_to_mode_reg (mode, operands[1]);
1582 if (mode == Pmode || mode == ptr_mode)
1585 enum tls_model tls_kind;
1589 if (GET_CODE (op1) == CONST
1590 && GET_CODE (XEXP (op1, 0)) == PLUS
1591 && (tls_symbolic_operand (XEXP (XEXP (op1, 0), 0), Pmode)
1594 opc = XEXP (XEXP (op1, 0), 1);
1595 op1 = XEXP (XEXP (op1, 0), 0);
1600 if ((tls_kind = tls_symbolic_operand (op1, Pmode)) != TLS_MODEL_NONE)
1602 rtx tga_op1, tga_ret, tmp, tmp2;
1606 case TLS_MODEL_GLOBAL_DYNAMIC:
1607 tga_ret = gen_rtx_REG (Pmode, R0_REG);
1608 emit_call_insn (gen_tls_global_dynamic (tga_ret, op1));
1612 case TLS_MODEL_LOCAL_DYNAMIC:
1613 tga_ret = gen_rtx_REG (Pmode, R0_REG);
1614 emit_call_insn (gen_tls_local_dynamic (tga_ret, op1));
1616 tmp = gen_reg_rtx (Pmode);
1617 emit_move_insn (tmp, tga_ret);
1619 if (register_operand (op0, Pmode))
1622 tmp2 = gen_reg_rtx (Pmode);
1624 emit_insn (gen_symDTPOFF2reg (tmp2, op1, tmp));
1628 case TLS_MODEL_INITIAL_EXEC:
1631 /* Don't schedule insns for getting GOT address when
1632 the first scheduling is enabled, to avoid spill
1634 if (flag_schedule_insns)
1635 emit_insn (gen_blockage ());
1636 emit_insn (gen_GOTaddr2picreg ());
1637 emit_use (gen_rtx_REG (SImode, PIC_REG));
1638 if (flag_schedule_insns)
1639 emit_insn (gen_blockage ());
1641 tga_op1 = !can_create_pseudo_p () ? op0 : gen_reg_rtx (Pmode);
1642 tmp = gen_sym2GOTTPOFF (op1);
1643 emit_insn (gen_tls_initial_exec (tga_op1, tmp));
1647 case TLS_MODEL_LOCAL_EXEC:
1648 tmp2 = gen_reg_rtx (Pmode);
1649 emit_insn (gen_load_gbr (tmp2));
1650 tmp = gen_reg_rtx (Pmode);
1651 emit_insn (gen_symTPOFF2reg (tmp, op1));
1653 if (register_operand (op0, Pmode))
1656 op1 = gen_reg_rtx (Pmode);
1658 emit_insn (gen_addsi3 (op1, tmp, tmp2));
1665 emit_insn (gen_addsi3 (op1, op1, force_reg (SImode, opc)));
1674 prepare_cbranch_operands (rtx *operands, enum machine_mode mode,
1675 enum rtx_code comparison)
1678 rtx scratch = NULL_RTX;
1680 if (comparison == LAST_AND_UNUSED_RTX_CODE)
1681 comparison = GET_CODE (operands[0]);
1683 scratch = operands[4];
1684 if (CONST_INT_P (operands[1])
1685 && !CONST_INT_P (operands[2]))
1687 rtx tmp = operands[1];
1689 operands[1] = operands[2];
1691 comparison = swap_condition (comparison);
1693 if (CONST_INT_P (operands[2]))
1695 HOST_WIDE_INT val = INTVAL (operands[2]);
1696 if ((val == -1 || val == -0x81)
1697 && (comparison == GT || comparison == LE))
1699 comparison = (comparison == GT) ? GE : LT;
1700 operands[2] = gen_int_mode (val + 1, mode);
1702 else if ((val == 1 || val == 0x80)
1703 && (comparison == GE || comparison == LT))
1705 comparison = (comparison == GE) ? GT : LE;
1706 operands[2] = gen_int_mode (val - 1, mode);
1708 else if (val == 1 && (comparison == GEU || comparison == LTU))
1710 comparison = (comparison == GEU) ? NE : EQ;
1711 operands[2] = CONST0_RTX (mode);
1713 else if (val == 0x80 && (comparison == GEU || comparison == LTU))
1715 comparison = (comparison == GEU) ? GTU : LEU;
1716 operands[2] = gen_int_mode (val - 1, mode);
1718 else if (val == 0 && (comparison == GTU || comparison == LEU))
1719 comparison = (comparison == GTU) ? NE : EQ;
1720 else if (mode == SImode
1721 && ((val == 0x7fffffff
1722 && (comparison == GTU || comparison == LEU))
1723 || ((unsigned HOST_WIDE_INT) val
1724 == (unsigned HOST_WIDE_INT) 0x7fffffff + 1
1725 && (comparison == GEU || comparison == LTU))))
1727 comparison = (comparison == GTU || comparison == GEU) ? LT : GE;
1728 operands[2] = CONST0_RTX (mode);
1732 if (can_create_pseudo_p ())
1733 operands[1] = force_reg (mode, op1);
1734 /* When we are handling DImode comparisons, we want to keep constants so
1735 that we can optimize the component comparisons; however, memory loads
1736 are better issued as a whole so that they can be scheduled well.
1737 SImode equality comparisons allow I08 constants, but only when they
1738 compare r0. Hence, if operands[1] has to be loaded from somewhere else
1739 into a register, that register might as well be r0, and we allow the
1740 constant. If it is already in a register, this is likely to be
1741 allocated to a different hard register, thus we load the constant into
1742 a register unless it is zero. */
1743 if (!REG_P (operands[2])
1744 && (!CONST_INT_P (operands[2])
1745 || (mode == SImode && operands[2] != CONST0_RTX (SImode)
1746 && ((comparison != EQ && comparison != NE)
1747 || (REG_P (op1) && REGNO (op1) != R0_REG)
1748 || !satisfies_constraint_I08 (operands[2])))))
1750 if (scratch && GET_MODE (scratch) == mode)
1752 emit_move_insn (scratch, operands[2]);
1753 operands[2] = scratch;
1755 else if (can_create_pseudo_p ())
1756 operands[2] = force_reg (mode, operands[2]);
1762 expand_cbranchsi4 (rtx *operands, enum rtx_code comparison, int probability)
1764 rtx (*branch_expander) (rtx) = gen_branch_true;
1767 comparison = prepare_cbranch_operands (operands, SImode, comparison);
1770 case NE: case LT: case LE: case LTU: case LEU:
1771 comparison = reverse_condition (comparison);
1772 branch_expander = gen_branch_false;
1775 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, T_REG),
1776 gen_rtx_fmt_ee (comparison, SImode,
1777 operands[1], operands[2])));
1778 jump = emit_jump_insn (branch_expander (operands[3]));
1779 if (probability >= 0)
1780 add_reg_note (jump, REG_BR_PROB, GEN_INT (probability));
1784 /* ??? How should we distribute probabilities when more than one branch
1785 is generated. So far we only have soem ad-hoc observations:
1786 - If the operands are random, they are likely to differ in both parts.
1787 - If comparing items in a hash chain, the operands are random or equal;
1788 operation should be EQ or NE.
1789 - If items are searched in an ordered tree from the root, we can expect
1790 the highpart to be unequal about half of the time; operation should be
1791 an inequality comparison, operands non-constant, and overall probability
1792 about 50%. Likewise for quicksort.
1793 - Range checks will be often made against constants. Even if we assume for
1794 simplicity an even distribution of the non-constant operand over a
1795 sub-range here, the same probability could be generated with differently
1796 wide sub-ranges - as long as the ratio of the part of the subrange that
1797 is before the threshold to the part that comes after the threshold stays
1798 the same. Thus, we can't really tell anything here;
1799 assuming random distribution is at least simple.
1803 expand_cbranchdi4 (rtx *operands, enum rtx_code comparison)
1805 enum rtx_code msw_taken, msw_skip, lsw_taken;
1806 rtx skip_label = NULL_RTX;
1807 rtx op1h, op1l, op2h, op2l;
1810 int msw_taken_prob = -1, msw_skip_prob = -1, lsw_taken_prob = -1;
1811 rtx scratch = operands[4];
1813 comparison = prepare_cbranch_operands (operands, DImode, comparison);
1814 op1h = gen_highpart_mode (SImode, DImode, operands[1]);
1815 op2h = gen_highpart_mode (SImode, DImode, operands[2]);
1816 op1l = gen_lowpart (SImode, operands[1]);
1817 op2l = gen_lowpart (SImode, operands[2]);
1818 msw_taken = msw_skip = lsw_taken = LAST_AND_UNUSED_RTX_CODE;
1819 prob = split_branch_probability;
1820 rev_prob = REG_BR_PROB_BASE - prob;
1823 /* ??? Should we use the cmpeqdi_t pattern for equality comparisons?
1824 That costs 1 cycle more when the first branch can be predicted taken,
1825 but saves us mispredicts because only one branch needs prediction.
1826 It also enables generating the cmpeqdi_t-1 pattern. */
1828 if (TARGET_CMPEQDI_T)
1830 emit_insn (gen_cmpeqdi_t (operands[1], operands[2]));
1831 emit_jump_insn (gen_branch_true (operands[3]));
1838 /* If we had more precision, we'd use rev_prob - (rev_prob >> 32) .
1840 msw_skip_prob = rev_prob;
1841 if (REG_BR_PROB_BASE <= 65535)
1842 lsw_taken_prob = prob ? REG_BR_PROB_BASE : 0;
1845 gcc_assert (HOST_BITS_PER_WIDEST_INT >= 64);
1849 - ((HOST_WIDEST_INT) REG_BR_PROB_BASE * rev_prob
1850 / ((HOST_WIDEST_INT) prob << 32)))
1856 if (TARGET_CMPEQDI_T)
1858 emit_insn (gen_cmpeqdi_t (operands[1], operands[2]));
1859 emit_jump_insn (gen_branch_false (operands[3]));
1863 msw_taken_prob = prob;
1868 msw_taken = comparison;
1869 if (CONST_INT_P (op2l) && INTVAL (op2l) == -1)
1871 if (comparison != GTU || op2h != CONST0_RTX (SImode))
1872 msw_skip = swap_condition (msw_taken);
1876 if (op2l == CONST0_RTX (SImode))
1877 msw_taken = comparison;
1880 msw_taken = comparison == GE ? GT : GTU;
1881 msw_skip = swap_condition (msw_taken);
1886 msw_taken = comparison;
1887 if (op2l == CONST0_RTX (SImode))
1889 msw_skip = swap_condition (msw_taken);
1893 if (CONST_INT_P (op2l) && INTVAL (op2l) == -1)
1894 msw_taken = comparison;
1898 if (comparison == LE)
1900 else if (op2h != CONST0_RTX (SImode))
1904 msw_skip = swap_condition (msw_taken);
1907 default: return false;
1909 num_branches = ((msw_taken != LAST_AND_UNUSED_RTX_CODE)
1910 + (msw_skip != LAST_AND_UNUSED_RTX_CODE)
1911 + (lsw_taken != LAST_AND_UNUSED_RTX_CODE));
1912 if (comparison != EQ && comparison != NE && num_branches > 1)
1914 if (!CONSTANT_P (operands[2])
1915 && prob >= (int) (REG_BR_PROB_BASE * 3 / 8U)
1916 && prob <= (int) (REG_BR_PROB_BASE * 5 / 8U))
1918 msw_taken_prob = prob / 2U;
1920 = REG_BR_PROB_BASE * rev_prob / (REG_BR_PROB_BASE + rev_prob);
1921 lsw_taken_prob = prob;
1925 msw_taken_prob = prob;
1926 msw_skip_prob = REG_BR_PROB_BASE;
1927 /* ??? If we have a constant op2h, should we use that when
1928 calculating lsw_taken_prob? */
1929 lsw_taken_prob = prob;
1934 operands[4] = NULL_RTX;
1935 if (reload_completed
1936 && ! arith_reg_or_0_operand (op2h, SImode)
1937 && (true_regnum (op1h) || (comparison != EQ && comparison != NE))
1938 && (msw_taken != LAST_AND_UNUSED_RTX_CODE
1939 || msw_skip != LAST_AND_UNUSED_RTX_CODE))
1941 emit_move_insn (scratch, operands[2]);
1942 operands[2] = scratch;
1944 if (msw_taken != LAST_AND_UNUSED_RTX_CODE)
1945 expand_cbranchsi4 (operands, msw_taken, msw_taken_prob);
1946 if (msw_skip != LAST_AND_UNUSED_RTX_CODE)
1948 rtx taken_label = operands[3];
1950 /* Operands were possibly modified, but msw_skip doesn't expect this.
1951 Always use the original ones. */
1952 if (msw_taken != LAST_AND_UNUSED_RTX_CODE)
1958 operands[3] = skip_label = gen_label_rtx ();
1959 expand_cbranchsi4 (operands, msw_skip, msw_skip_prob);
1960 operands[3] = taken_label;
1964 if (lsw_taken != LAST_AND_UNUSED_RTX_CODE)
1966 if (reload_completed
1967 && ! arith_reg_or_0_operand (op2l, SImode)
1968 && (true_regnum (op1l) || (lsw_taken != EQ && lsw_taken != NE)))
1970 emit_move_insn (scratch, operands[2]);
1971 operands[2] = scratch;
1973 expand_cbranchsi4 (operands, lsw_taken, lsw_taken_prob);
1975 if (msw_skip != LAST_AND_UNUSED_RTX_CODE)
1976 emit_label (skip_label);
1980 /* Emit INSN, possibly in a PARALLEL with an USE of fpscr for SH4. */
1983 sh_emit_set_t_insn (rtx insn, enum machine_mode mode)
1985 if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
1987 insn = gen_rtx_PARALLEL (VOIDmode,
1989 gen_rtx_USE (VOIDmode, get_fpscr_rtx ())));
1990 (mode == SFmode ? emit_sf_insn : emit_df_insn) (insn);
1996 /* Prepare the operands for an scc instruction; make sure that the
1997 compare has been done and the result is in T_REG. */
1999 sh_emit_scc_to_t (enum rtx_code code, rtx op0, rtx op1)
2001 rtx t_reg = gen_rtx_REG (SImode, T_REG);
2002 enum rtx_code oldcode = code;
2003 enum machine_mode mode;
2005 /* First need a compare insn. */
2009 /* It isn't possible to handle this case. */
2026 if (code != oldcode)
2033 mode = GET_MODE (op0);
2034 if (mode == VOIDmode)
2035 mode = GET_MODE (op1);
2037 op0 = force_reg (mode, op0);
2038 if ((code != EQ && code != NE
2039 && (op1 != const0_rtx
2040 || code == GTU || code == GEU || code == LTU || code == LEU))
2041 || (mode == DImode && op1 != const0_rtx)
2042 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
2043 op1 = force_reg (mode, op1);
2045 sh_emit_set_t_insn (gen_rtx_SET (VOIDmode, t_reg,
2046 gen_rtx_fmt_ee (code, SImode, op0, op1)),
2051 sh_emit_cheap_store_flag (enum machine_mode mode, enum rtx_code code,
2054 rtx target = gen_reg_rtx (SImode);
2057 gcc_assert (TARGET_SHMEDIA);
2066 tmp = gen_rtx_fmt_ee (code, SImode, op0, op1);
2067 emit_insn (gen_cstore4_media (target, tmp, op0, op1));
2077 tmp = gen_rtx_fmt_ee (reverse_condition (code), mode, op0, op1);
2078 emit_insn (gen_cstore4_media (target, tmp, op0, op1));
2096 rtx t2 = gen_reg_rtx (DImode);
2097 emit_insn (gen_extendsidi2 (t2, target));
2101 return gen_rtx_fmt_ee (code, VOIDmode, target, const0_rtx);
2104 /* Called from the md file, set up the operands of a compare instruction. */
2107 sh_emit_compare_and_branch (rtx *operands, enum machine_mode mode)
2109 enum rtx_code code = GET_CODE (operands[0]);
2110 enum rtx_code branch_code;
2111 rtx op0 = operands[1];
2112 rtx op1 = operands[2];
2114 bool need_ccmpeq = false;
2116 if (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT)
2118 op0 = force_reg (mode, op0);
2119 op1 = force_reg (mode, op1);
2123 if (code != EQ || mode == DImode)
2125 /* Force args into regs, since we can't use constants here. */
2126 op0 = force_reg (mode, op0);
2127 if (op1 != const0_rtx || code == GTU || code == GEU)
2128 op1 = force_reg (mode, op1);
2132 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2135 || (code == LE && TARGET_IEEE && TARGET_SH2E)
2136 || (code == GE && !(TARGET_IEEE && TARGET_SH2E)))
2138 tem = op0, op0 = op1, op1 = tem;
2139 code = swap_condition (code);
2142 /* GE becomes fcmp/gt+fcmp/eq, for SH2E and TARGET_IEEE only. */
2145 gcc_assert (TARGET_IEEE && TARGET_SH2E);
2150 /* Now we can have EQ, NE, GT, LE. NE and LE are then transformed
2151 to EQ/GT respectively. */
2152 gcc_assert (code == EQ || code == GT || code == NE || code == LE);
2169 branch_code = reverse_condition (code);
2175 insn = gen_rtx_SET (VOIDmode,
2176 gen_rtx_REG (SImode, T_REG),
2177 gen_rtx_fmt_ee (branch_code, SImode, op0, op1));
2179 sh_emit_set_t_insn (insn, mode);
2181 sh_emit_set_t_insn (gen_ieee_ccmpeqsf_t (op0, op1), mode);
2183 if (branch_code == code)
2184 emit_jump_insn (gen_branch_true (operands[3]));
2186 emit_jump_insn (gen_branch_false (operands[3]));
2190 sh_emit_compare_and_set (rtx *operands, enum machine_mode mode)
2192 enum rtx_code code = GET_CODE (operands[1]);
2193 rtx op0 = operands[2];
2194 rtx op1 = operands[3];
2196 bool invert = false;
2199 op0 = force_reg (mode, op0);
2200 if ((code != EQ && code != NE
2201 && (op1 != const0_rtx
2202 || code == GTU || code == GEU || code == LTU || code == LEU))
2203 || (mode == DImode && op1 != const0_rtx)
2204 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
2205 op1 = force_reg (mode, op1);
2207 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2209 if (code == LT || code == LE)
2211 code = swap_condition (code);
2212 tem = op0, op0 = op1, op1 = tem;
2218 lab = gen_label_rtx ();
2219 sh_emit_scc_to_t (EQ, op0, op1);
2220 emit_jump_insn (gen_branch_true (lab));
2237 sh_emit_scc_to_t (code, op0, op1);
2241 emit_insn (gen_movnegt (operands[0]));
2243 emit_move_insn (operands[0], gen_rtx_REG (SImode, T_REG));
2246 /* Functions to output assembly code. */
2248 /* Return a sequence of instructions to perform DI or DF move.
2250 Since the SH cannot move a DI or DF in one instruction, we have
2251 to take care when we see overlapping source and dest registers. */
2254 output_movedouble (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
2255 enum machine_mode mode)
2257 rtx dst = operands[0];
2258 rtx src = operands[1];
2261 && GET_CODE (XEXP (dst, 0)) == PRE_DEC)
2262 return "mov.l %T1,%0\n\tmov.l %1,%0";
2264 if (register_operand (dst, mode)
2265 && register_operand (src, mode))
2267 if (REGNO (src) == MACH_REG)
2268 return "sts mach,%S0\n\tsts macl,%R0";
2270 /* When mov.d r1,r2 do r2->r3 then r1->r2;
2271 when mov.d r1,r0 do r1->r0 then r2->r1. */
2273 if (REGNO (src) + 1 == REGNO (dst))
2274 return "mov %T1,%T0\n\tmov %1,%0";
2276 return "mov %1,%0\n\tmov %T1,%T0";
2278 else if (CONST_INT_P (src))
2280 if (INTVAL (src) < 0)
2281 output_asm_insn ("mov #-1,%S0", operands);
2283 output_asm_insn ("mov #0,%S0", operands);
2285 return "mov %1,%R0";
2287 else if (MEM_P (src))
2290 int dreg = REGNO (dst);
2291 rtx inside = XEXP (src, 0);
2293 switch (GET_CODE (inside))
2296 ptrreg = REGNO (inside);
2300 ptrreg = subreg_regno (inside);
2304 ptrreg = REGNO (XEXP (inside, 0));
2305 /* ??? A r0+REG address shouldn't be possible here, because it isn't
2306 an offsettable address. Unfortunately, offsettable addresses use
2307 QImode to check the offset, and a QImode offsettable address
2308 requires r0 for the other operand, which is not currently
2309 supported, so we can't use the 'o' constraint.
2310 Thus we must check for and handle r0+REG addresses here.
2311 We punt for now, since this is likely very rare. */
2312 gcc_assert (!REG_P (XEXP (inside, 1)));
2316 return "mov.l %1,%0\n\tmov.l %1+4,%T0";
2318 return "mov.l %1,%0\n\tmov.l %1,%T0";
2323 /* Work out the safe way to copy. Copy into the second half first. */
2325 return "mov.l %T1,%T0\n\tmov.l %1,%0";
2328 return "mov.l %1,%0\n\tmov.l %T1,%T0";
2331 /* Print an instruction which would have gone into a delay slot after
2332 another instruction, but couldn't because the other instruction expanded
2333 into a sequence where putting the slot insn at the end wouldn't work. */
2336 print_slot (rtx insn)
2338 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file, optimize, 1, NULL);
2340 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
2344 output_far_jump (rtx insn, rtx op)
2346 struct { rtx lab, reg, op; } this_jmp;
2347 rtx braf_base_lab = NULL_RTX;
2350 int offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn));
2353 this_jmp.lab = gen_label_rtx ();
2357 && offset - get_attr_length (insn) <= 32766)
2360 jump = "mov.w %O0,%1; braf %1";
2368 jump = "mov.l %O0,%1; braf %1";
2370 jump = "mov.l r0,@-r15; mova %O0,r0; mov.l @r0,%1; add r0,%1; mov.l @r15+,r0; jmp @%1";
2373 jump = "mov.l %O0,%1; jmp @%1";
2375 /* If we have a scratch register available, use it. */
2376 if (NONJUMP_INSN_P ((prev = prev_nonnote_insn (insn)))
2377 && INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
2379 this_jmp.reg = SET_DEST (XVECEXP (PATTERN (prev), 0, 0));
2380 if (REGNO (this_jmp.reg) == R0_REG && flag_pic && ! TARGET_SH2)
2381 jump = "mov.l r1,@-r15; mova %O0,r0; mov.l @r0,r1; add r1,r0; mov.l @r15+,r1; jmp @%1";
2382 output_asm_insn (jump, &this_jmp.lab);
2383 if (dbr_sequence_length ())
2384 print_slot (final_sequence);
2386 output_asm_insn ("nop", 0);
2390 /* Output the delay slot insn first if any. */
2391 if (dbr_sequence_length ())
2392 print_slot (final_sequence);
2394 this_jmp.reg = gen_rtx_REG (SImode, 13);
2395 /* We must keep the stack aligned to 8-byte boundaries on SH5.
2396 Fortunately, MACL is fixed and call-clobbered, and we never
2397 need its value across jumps, so save r13 in it instead of in
2400 output_asm_insn ("lds r13, macl", 0);
2402 output_asm_insn ("mov.l r13,@-r15", 0);
2403 output_asm_insn (jump, &this_jmp.lab);
2405 output_asm_insn ("sts macl, r13", 0);
2407 output_asm_insn ("mov.l @r15+,r13", 0);
2409 if (far && flag_pic && TARGET_SH2)
2411 braf_base_lab = gen_label_rtx ();
2412 (*targetm.asm_out.internal_label) (asm_out_file, "L",
2413 CODE_LABEL_NUMBER (braf_base_lab));
2416 output_asm_insn (".align 2", 0);
2417 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (this_jmp.lab));
2419 if (far && flag_pic)
2422 this_jmp.lab = braf_base_lab;
2423 output_asm_insn (".long %O2-%O0", &this_jmp.lab);
2426 output_asm_insn (far ? ".long %O2" : ".word %O2-%O0", &this_jmp.lab);
2430 /* Local label counter, used for constants in the pool and inside
2431 pattern branches. */
2433 static int lf = 100;
2435 /* Output code for ordinary branches. */
2438 output_branch (int logic, rtx insn, rtx *operands)
2440 switch (get_attr_length (insn))
2443 /* This can happen if filling the delay slot has caused a forward
2444 branch to exceed its range (we could reverse it, but only
2445 when we know we won't overextend other branches; this should
2446 best be handled by relaxation).
2447 It can also happen when other condbranches hoist delay slot insn
2448 from their destination, thus leading to code size increase.
2449 But the branch will still be in the range -4092..+4098 bytes. */
2454 /* The call to print_slot will clobber the operands. */
2455 rtx op0 = operands[0];
2457 /* If the instruction in the delay slot is annulled (true), then
2458 there is no delay slot where we can put it now. The only safe
2459 place for it is after the label. final will do that by default. */
2462 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
2463 && get_attr_length (XVECEXP (final_sequence, 0, 1)))
2465 asm_fprintf (asm_out_file, "\tb%s%ss\t%LLF%d\n", logic ? "f" : "t",
2466 ASSEMBLER_DIALECT ? "/" : ".", label);
2467 print_slot (final_sequence);
2470 asm_fprintf (asm_out_file, "\tb%s\t%LLF%d\n", logic ? "f" : "t", label);
2472 output_asm_insn ("bra\t%l0", &op0);
2473 fprintf (asm_out_file, "\tnop\n");
2474 (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
2478 /* When relaxing, handle this like a short branch. The linker
2479 will fix it up if it still doesn't fit after relaxation. */
2481 return logic ? "bt%.\t%l0" : "bf%.\t%l0";
2483 /* These are for SH2e, in which we have to account for the
2484 extra nop because of the hardware bug in annulled branches. */
2490 gcc_assert (!final_sequence
2491 || !(INSN_ANNULLED_BRANCH_P
2492 (XVECEXP (final_sequence, 0, 0))));
2493 asm_fprintf (asm_out_file, "b%s%ss\t%LLF%d\n",
2495 ASSEMBLER_DIALECT ? "/" : ".", label);
2496 fprintf (asm_out_file, "\tnop\n");
2497 output_asm_insn ("bra\t%l0", operands);
2498 fprintf (asm_out_file, "\tnop\n");
2499 (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
2503 /* When relaxing, fall through. */
2508 sprintf (buffer, "b%s%ss\t%%l0",
2510 ASSEMBLER_DIALECT ? "/" : ".");
2511 output_asm_insn (buffer, &operands[0]);
2516 /* There should be no longer branches now - that would
2517 indicate that something has destroyed the branches set
2518 up in machine_dependent_reorg. */
2523 /* Output a code sequence for INSN using TEMPL with OPERANDS; but before,
2524 fill in operands 9 as a label to the successor insn.
2525 We try to use jump threading where possible.
2526 IF CODE matches the comparison in the IF_THEN_ELSE of a following jump,
2527 we assume the jump is taken. I.e. EQ means follow jmp and bf, NE means
2528 follow jmp and bt, if the address is in range. */
2530 output_branchy_insn (enum rtx_code code, const char *templ,
2531 rtx insn, rtx *operands)
2533 rtx next_insn = NEXT_INSN (insn);
2535 if (next_insn && JUMP_P (next_insn) && condjump_p (next_insn))
2537 rtx src = SET_SRC (PATTERN (next_insn));
2538 if (GET_CODE (src) == IF_THEN_ELSE && GET_CODE (XEXP (src, 0)) != code)
2540 /* Following branch not taken */
2541 operands[9] = gen_label_rtx ();
2542 emit_label_after (operands[9], next_insn);
2543 INSN_ADDRESSES_NEW (operands[9],
2544 INSN_ADDRESSES (INSN_UID (next_insn))
2545 + get_attr_length (next_insn));
2550 int offset = (branch_dest (next_insn)
2551 - INSN_ADDRESSES (INSN_UID (next_insn)) + 4);
2552 if (offset >= -252 && offset <= 258)
2554 if (GET_CODE (src) == IF_THEN_ELSE)
2556 src = XEXP (src, 1);
2562 operands[9] = gen_label_rtx ();
2563 emit_label_after (operands[9], insn);
2564 INSN_ADDRESSES_NEW (operands[9],
2565 INSN_ADDRESSES (INSN_UID (insn))
2566 + get_attr_length (insn));
2571 output_ieee_ccmpeq (rtx insn, rtx *operands)
2573 return output_branchy_insn (NE, "bt\t%l9\n\tfcmp/eq\t%1,%0",
2577 /* Output the start of the assembler file. */
2580 sh_file_start (void)
2582 default_file_start ();
2585 /* Declare the .directive section before it is used. */
2586 fputs ("\t.section .directive, \"SM\", @progbits, 1\n", asm_out_file);
2587 fputs ("\t.asciz \"#<SYMEDIT>#\\n\"\n", asm_out_file);
2591 /* We need to show the text section with the proper
2592 attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
2593 emits it without attributes in TEXT_SECTION_ASM_OP, else GAS
2594 will complain. We can teach GAS specifically about the
2595 default attributes for our choice of text section, but
2596 then we would have to change GAS again if/when we change
2597 the text section name. */
2598 fprintf (asm_out_file, "%s\n", TEXT_SECTION_ASM_OP);
2600 /* Switch to the data section so that the coffsem symbol
2601 isn't in the text section. */
2602 switch_to_section (data_section);
2604 if (TARGET_LITTLE_ENDIAN)
2605 fputs ("\t.little\n", asm_out_file);
2609 if (TARGET_SHCOMPACT)
2610 fputs ("\t.mode\tSHcompact\n", asm_out_file);
2611 else if (TARGET_SHMEDIA)
2612 fprintf (asm_out_file, "\t.mode\tSHmedia\n\t.abi\t%i\n",
2613 TARGET_SHMEDIA64 ? 64 : 32);
2617 /* Check if PAT includes UNSPEC_CALLER unspec pattern. */
2620 unspec_caller_rtx_p (rtx pat)
2625 split_const (pat, &base, &offset);
2626 if (GET_CODE (base) == UNSPEC)
2628 if (XINT (base, 1) == UNSPEC_CALLER)
2630 for (i = 0; i < XVECLEN (base, 0); i++)
2631 if (unspec_caller_rtx_p (XVECEXP (base, 0, i)))
2637 /* Indicate that INSN cannot be duplicated. This is true for insn
2638 that generates a unique label. */
2641 sh_cannot_copy_insn_p (rtx insn)
2645 if (!reload_completed || !flag_pic)
2648 if (!NONJUMP_INSN_P (insn))
2650 if (asm_noperands (insn) >= 0)
2653 pat = PATTERN (insn);
2654 if (GET_CODE (pat) != SET)
2656 pat = SET_SRC (pat);
2658 if (unspec_caller_rtx_p (pat))
2664 /* Actual number of instructions used to make a shift by N. */
2665 static const char ashiftrt_insns[] =
2666 { 0,1,2,3,4,5,8,8,8,8,8,8,8,8,8,8,2,3,4,5,8,8,8,8,8,8,8,8,8,8,8,2};
2668 /* Left shift and logical right shift are the same. */
2669 static const char shift_insns[] =
2670 { 0,1,1,2,2,3,3,4,1,2,2,3,3,4,3,3,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
2672 /* Individual shift amounts needed to get the above length sequences.
2673 One bit right shifts clobber the T bit, so when possible, put one bit
2674 shifts in the middle of the sequence, so the ends are eligible for
2675 branch delay slots. */
2676 static const short shift_amounts[32][5] = {
2677 {0}, {1}, {2}, {2, 1},
2678 {2, 2}, {2, 1, 2}, {2, 2, 2}, {2, 2, 1, 2},
2679 {8}, {8, 1}, {8, 2}, {8, 1, 2},
2680 {8, 2, 2}, {8, 2, 1, 2}, {8, -2, 8}, {8, -1, 8},
2681 {16}, {16, 1}, {16, 2}, {16, 1, 2},
2682 {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
2683 {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
2684 {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
2686 /* Likewise, but for shift amounts < 16, up to three highmost bits
2687 might be clobbered. This is typically used when combined with some
2688 kind of sign or zero extension. */
2690 static const char ext_shift_insns[] =
2691 { 0,1,1,2,2,3,2,2,1,2,2,3,3,3,2,2,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
2693 static const short ext_shift_amounts[32][4] = {
2694 {0}, {1}, {2}, {2, 1},
2695 {2, 2}, {2, 1, 2}, {8, -2}, {8, -1},
2696 {8}, {8, 1}, {8, 2}, {8, 1, 2},
2697 {8, 2, 2}, {16, -2, -1}, {16, -2}, {16, -1},
2698 {16}, {16, 1}, {16, 2}, {16, 1, 2},
2699 {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
2700 {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
2701 {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
2703 /* Assuming we have a value that has been sign-extended by at least one bit,
2704 can we use the ext_shift_amounts with the last shift turned to an arithmetic shift
2705 to shift it by N without data loss, and quicker than by other means? */
2706 #define EXT_SHIFT_SIGNED(n) (((n) | 8) == 15)
2708 /* This is used in length attributes in sh.md to help compute the length
2709 of arbitrary constant shift instructions. */
2712 shift_insns_rtx (rtx insn)
2714 rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2715 int shift_count = INTVAL (XEXP (set_src, 1)) & 31;
2716 enum rtx_code shift_code = GET_CODE (set_src);
2721 return ashiftrt_insns[shift_count];
2724 return shift_insns[shift_count];
2730 /* Return the cost of a shift. */
2740 if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
2742 if (GET_MODE (x) == DImode
2743 && CONST_INT_P (XEXP (x, 1))
2744 && INTVAL (XEXP (x, 1)) == 1)
2747 /* Everything else is invalid, because there is no pattern for it. */
2750 /* If shift by a non constant, then this will be expensive. */
2751 if (!CONST_INT_P (XEXP (x, 1)))
2752 return SH_DYNAMIC_SHIFT_COST;
2754 /* Otherwise, return the true cost in instructions. Cope with out of range
2755 shift counts more or less arbitrarily. */
2756 value = INTVAL (XEXP (x, 1)) & 31;
2758 if (GET_CODE (x) == ASHIFTRT)
2760 int cost = ashiftrt_insns[value];
2761 /* If SH3, then we put the constant in a reg and use shad. */
2762 if (cost > 1 + SH_DYNAMIC_SHIFT_COST)
2763 cost = 1 + SH_DYNAMIC_SHIFT_COST;
2767 return shift_insns[value];
2770 /* Return the cost of an AND operation. */
2777 /* Anding with a register is a single cycle and instruction. */
2778 if (!CONST_INT_P (XEXP (x, 1)))
2781 i = INTVAL (XEXP (x, 1));
2785 if (satisfies_constraint_I10 (XEXP (x, 1))
2786 || satisfies_constraint_J16 (XEXP (x, 1)))
2789 return 1 + rtx_cost (XEXP (x, 1), AND, !optimize_size);
2792 /* These constants are single cycle extu.[bw] instructions. */
2793 if (i == 0xff || i == 0xffff)
2795 /* Constants that can be used in an and immediate instruction in a single
2796 cycle, but this requires r0, so make it a little more expensive. */
2797 if (CONST_OK_FOR_K08 (i))
2799 /* Constants that can be loaded with a mov immediate and an and.
2800 This case is probably unnecessary. */
2801 if (CONST_OK_FOR_I08 (i))
2803 /* Any other constants requires a 2 cycle pc-relative load plus an and.
2804 This case is probably unnecessary. */
2808 /* Return the cost of an addition or a subtraction. */
2813 /* Adding a register is a single cycle insn. */
2814 if (REG_P (XEXP (x, 1))
2815 || GET_CODE (XEXP (x, 1)) == SUBREG)
2818 /* Likewise for small constants. */
2819 if (CONST_INT_P (XEXP (x, 1))
2820 && CONST_OK_FOR_ADD (INTVAL (XEXP (x, 1))))
2824 switch (GET_CODE (XEXP (x, 1)))
2829 return TARGET_SHMEDIA64 ? 5 : 3;
2832 if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1))))
2834 else if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1)) >> 16))
2836 else if (CONST_OK_FOR_I16 ((INTVAL (XEXP (x, 1)) >> 16) >> 16))
2844 /* Any other constant requires a 2 cycle pc-relative load plus an
2849 /* Return the cost of a multiply. */
2851 multcosts (rtx x ATTRIBUTE_UNUSED)
2853 if (sh_multcost >= 0)
2856 /* ??? We have a mul insn, but it has a latency of three, and doesn't
2857 accept constants. Ideally, we would use a cost of one or two and
2858 add the cost of the operand, but disregard the latter when inside loops
2859 and loop invariant code motion is still to follow.
2860 Using a multiply first and splitting it later if it's a loss
2861 doesn't work because of different sign / zero extension semantics
2862 of multiplies vs. shifts. */
2863 return TARGET_SMALLCODE ? 2 : 3;
2867 /* We have a mul insn, so we can never take more than the mul and the
2868 read of the mac reg, but count more because of the latency and extra
2870 if (TARGET_SMALLCODE)
2875 /* If we're aiming at small code, then just count the number of
2876 insns in a multiply call sequence. */
2877 if (TARGET_SMALLCODE)
2880 /* Otherwise count all the insns in the routine we'd be calling too. */
2884 /* Compute a (partial) cost for rtx X. Return true if the complete
2885 cost has been computed, and false if subexpressions should be
2886 scanned. In either case, *TOTAL contains the cost result. */
2889 sh_rtx_costs (rtx x, int code, int outer_code, int *total,
2890 bool speed ATTRIBUTE_UNUSED)
2897 if (INTVAL (x) == 0)
2899 else if (outer_code == AND && and_operand ((x), DImode))
2901 else if ((outer_code == IOR || outer_code == XOR
2902 || outer_code == PLUS)
2903 && CONST_OK_FOR_I10 (INTVAL (x)))
2905 else if (CONST_OK_FOR_I16 (INTVAL (x)))
2906 *total = COSTS_N_INSNS (outer_code != SET);
2907 else if (CONST_OK_FOR_I16 (INTVAL (x) >> 16))
2908 *total = COSTS_N_INSNS ((outer_code != SET) + 1);
2909 else if (CONST_OK_FOR_I16 ((INTVAL (x) >> 16) >> 16))
2910 *total = COSTS_N_INSNS ((outer_code != SET) + 2);
2912 *total = COSTS_N_INSNS ((outer_code != SET) + 3);
2915 if (CONST_OK_FOR_I08 (INTVAL (x)))
2917 else if ((outer_code == AND || outer_code == IOR || outer_code == XOR)
2918 && CONST_OK_FOR_K08 (INTVAL (x)))
2920 /* prepare_cmp_insn will force costly constants int registers before
2921 the cbranch[sd]i4 patterns can see them, so preserve potentially
2922 interesting ones not covered by I08 above. */
2923 else if (outer_code == COMPARE
2924 && ((unsigned HOST_WIDE_INT) INTVAL (x)
2925 == (unsigned HOST_WIDE_INT) 0x7fffffff + 1
2926 || INTVAL (x) == 0x7fffffff
2927 || INTVAL (x) == 0x80 || INTVAL (x) == -0x81))
2936 if (TARGET_SHMEDIA64)
2937 *total = COSTS_N_INSNS (4);
2938 else if (TARGET_SHMEDIA32)
2939 *total = COSTS_N_INSNS (2);
2946 *total = COSTS_N_INSNS (4);
2947 /* prepare_cmp_insn will force costly constants int registers before
2948 the cbranchdi4 pattern can see them, so preserve potentially
2949 interesting ones. */
2950 else if (outer_code == COMPARE && GET_MODE (x) == DImode)
2956 if (x == CONST0_RTX (GET_MODE (x)))
2958 else if (sh_1el_vec (x, VOIDmode))
2959 *total = outer_code != SET;
2960 if (sh_rep_vec (x, VOIDmode))
2961 *total = ((GET_MODE_UNIT_SIZE (GET_MODE (x)) + 3) / 4
2962 + (outer_code != SET));
2963 *total = COSTS_N_INSNS (3) + (outer_code != SET);
2968 *total = COSTS_N_INSNS (addsubcosts (x));
2972 *total = COSTS_N_INSNS (andcosts (x));
2976 *total = COSTS_N_INSNS (multcosts (x));
2982 *total = COSTS_N_INSNS (shiftcosts (x));
2989 *total = COSTS_N_INSNS (20);
2993 if (sh_1el_vec (x, VOIDmode))
2994 *total = outer_code != SET;
2995 if (sh_rep_vec (x, VOIDmode))
2996 *total = ((GET_MODE_UNIT_SIZE (GET_MODE (x)) + 3) / 4
2997 + (outer_code != SET));
2998 *total = COSTS_N_INSNS (3) + (outer_code != SET);
3011 /* Compute the cost of an address. For the SH, all valid addresses are
3012 the same cost. Use a slightly higher cost for reg + reg addressing,
3013 since it increases pressure on r0. */
3016 sh_address_cost (rtx X,
3017 bool speed ATTRIBUTE_UNUSED)
3019 return (GET_CODE (X) == PLUS
3020 && ! CONSTANT_P (XEXP (X, 1))
3021 && ! TARGET_SHMEDIA ? 1 : 0);
3024 /* Code to expand a shift. */
3027 gen_ashift (int type, int n, rtx reg)
3029 /* Negative values here come from the shift_amounts array. */
3042 emit_insn (gen_ashrsi3_k (reg, reg, GEN_INT (n)));
3046 emit_insn (gen_lshrsi3_m (reg, reg, GEN_INT (n)));
3048 emit_insn (gen_lshrsi3_k (reg, reg, GEN_INT (n)));
3051 emit_insn (gen_ashlsi3_std (reg, reg, GEN_INT (n)));
3056 /* Same for HImode */
3059 gen_ashift_hi (int type, int n, rtx reg)
3061 /* Negative values here come from the shift_amounts array. */
3075 /* We don't have HImode right shift operations because using the
3076 ordinary 32 bit shift instructions for that doesn't generate proper
3077 zero/sign extension.
3078 gen_ashift_hi is only called in contexts where we know that the
3079 sign extension works out correctly. */
3082 if (GET_CODE (reg) == SUBREG)
3084 offset = SUBREG_BYTE (reg);
3085 reg = SUBREG_REG (reg);
3087 gen_ashift (type, n, gen_rtx_SUBREG (SImode, reg, offset));
3091 emit_insn (gen_ashlhi3_k (reg, reg, GEN_INT (n)));
3096 /* Output RTL to split a constant shift into its component SH constant
3097 shift instructions. */
3100 gen_shifty_op (int code, rtx *operands)
3102 int value = INTVAL (operands[2]);
3105 /* Truncate the shift count in case it is out of bounds. */
3110 if (code == LSHIFTRT)
3112 emit_insn (gen_rotlsi3_1 (operands[0], operands[0]));
3113 emit_insn (gen_movt (operands[0]));
3116 else if (code == ASHIFT)
3118 /* There is a two instruction sequence for 31 bit left shifts,
3119 but it requires r0. */
3120 if (REG_P (operands[0]) && REGNO (operands[0]) == 0)
3122 emit_insn (gen_andsi3 (operands[0], operands[0], const1_rtx));
3123 emit_insn (gen_rotlsi3_31 (operands[0], operands[0]));
3128 else if (value == 0)
3130 /* This can happen even when optimizing, if there were subregs before
3131 reload. Don't output a nop here, as this is never optimized away;
3132 use a no-op move instead. */
3133 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[0]));
3137 max = shift_insns[value];
3138 for (i = 0; i < max; i++)
3139 gen_ashift (code, shift_amounts[value][i], operands[0]);
3142 /* Same as above, but optimized for values where the topmost bits don't
3146 gen_shifty_hi_op (int code, rtx *operands)
3148 int value = INTVAL (operands[2]);
3150 void (*gen_fun) (int, int, rtx);
3152 /* This operation is used by and_shl for SImode values with a few
3153 high bits known to be cleared. */
3157 emit_insn (gen_nop ());
3161 gen_fun = GET_MODE (operands[0]) == HImode ? gen_ashift_hi : gen_ashift;
3164 max = ext_shift_insns[value];
3165 for (i = 0; i < max; i++)
3166 gen_fun (code, ext_shift_amounts[value][i], operands[0]);
3169 /* When shifting right, emit the shifts in reverse order, so that
3170 solitary negative values come first. */
3171 for (i = ext_shift_insns[value] - 1; i >= 0; i--)
3172 gen_fun (code, ext_shift_amounts[value][i], operands[0]);
3175 /* Output RTL for an arithmetic right shift. */
3177 /* ??? Rewrite to use super-optimizer sequences. */
3180 expand_ashiftrt (rtx *operands)
3188 if (!CONST_INT_P (operands[2]))
3190 rtx count = copy_to_mode_reg (SImode, operands[2]);
3191 emit_insn (gen_negsi2 (count, count));
3192 emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
3195 else if (ashiftrt_insns[INTVAL (operands[2]) & 31]
3196 > 1 + SH_DYNAMIC_SHIFT_COST)
3199 = force_reg (SImode, GEN_INT (- (INTVAL (operands[2]) & 31)));
3200 emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
3204 if (!CONST_INT_P (operands[2]))
3207 value = INTVAL (operands[2]) & 31;
3211 /* If we are called from abs expansion, arrange things so that we
3212 we can use a single MT instruction that doesn't clobber the source,
3213 if LICM can hoist out the load of the constant zero. */
3214 if (currently_expanding_to_rtl)
3216 emit_insn (gen_cmpgtsi_t (force_reg (SImode, CONST0_RTX (SImode)),
3218 emit_insn (gen_mov_neg_si_t (operands[0]));
3221 emit_insn (gen_ashrsi2_31 (operands[0], operands[1]));
3224 else if (value >= 16 && value <= 19)
3226 wrk = gen_reg_rtx (SImode);
3227 emit_insn (gen_ashrsi2_16 (wrk, operands[1]));
3230 gen_ashift (ASHIFTRT, 1, wrk);
3231 emit_move_insn (operands[0], wrk);
3234 /* Expand a short sequence inline, longer call a magic routine. */
3235 else if (value <= 5)
3237 wrk = gen_reg_rtx (SImode);
3238 emit_move_insn (wrk, operands[1]);
3240 gen_ashift (ASHIFTRT, 1, wrk);
3241 emit_move_insn (operands[0], wrk);
3245 wrk = gen_reg_rtx (Pmode);
3247 /* Load the value into an arg reg and call a helper. */
3248 emit_move_insn (gen_rtx_REG (SImode, 4), operands[1]);
3249 sprintf (func, "__ashiftrt_r4_%d", value);
3250 function_symbol (wrk, func, SFUNC_STATIC);
3251 emit_insn (gen_ashrsi3_n (GEN_INT (value), wrk));
3252 emit_move_insn (operands[0], gen_rtx_REG (SImode, 4));
3257 sh_dynamicalize_shift_p (rtx count)
3259 return shift_insns[INTVAL (count) & 31] > 1 + SH_DYNAMIC_SHIFT_COST;
3262 /* Try to find a good way to implement the combiner pattern
3263 [(set (match_operand:SI 0 "register_operand" "r")
3264 (and:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
3265 (match_operand:SI 2 "const_int_operand" "n"))
3266 (match_operand:SI 3 "const_int_operand" "n"))) .
3267 LEFT_RTX is operand 2 in the above pattern, and MASK_RTX is operand 3.
3268 return 0 for simple right / left or left/right shift combination.
3269 return 1 for a combination of shifts with zero_extend.
3270 return 2 for a combination of shifts with an AND that needs r0.
3271 return 3 for a combination of shifts with an AND that needs an extra
3272 scratch register, when the three highmost bits of the AND mask are clear.
3273 return 4 for a combination of shifts with an AND that needs an extra
3274 scratch register, when any of the three highmost bits of the AND mask
3276 If ATTRP is set, store an initial right shift width in ATTRP[0],
3277 and the instruction length in ATTRP[1] . These values are not valid
3279 When ATTRP is set and returning 1, ATTRP[2] gets set to the index into
3280 shift_amounts for the last shift value that is to be used before the
3283 shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
3285 unsigned HOST_WIDE_INT mask, lsb, mask2, lsb2;
3286 int left = INTVAL (left_rtx), right;
3288 int cost, best_cost = 10000;
3289 int best_right = 0, best_len = 0;
3293 if (left < 0 || left > 31)
3295 if (CONST_INT_P (mask_rtx))
3296 mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> left;
3298 mask = (unsigned HOST_WIDE_INT) GET_MODE_MASK (SImode) >> left;
3299 /* Can this be expressed as a right shift / left shift pair? */
3300 lsb = ((mask ^ (mask - 1)) >> 1) + 1;
3301 right = exact_log2 (lsb);
3302 mask2 = ~(mask + lsb - 1);
3303 lsb2 = ((mask2 ^ (mask2 - 1)) >> 1) + 1;
3304 /* mask has no zeroes but trailing zeroes <==> ! mask2 */
3306 best_cost = shift_insns[right] + shift_insns[right + left];
3307 /* mask has no trailing zeroes <==> ! right */
3308 else if (! right && mask2 == ~(lsb2 - 1))
3310 int late_right = exact_log2 (lsb2);
3311 best_cost = shift_insns[left + late_right] + shift_insns[late_right];
3313 /* Try to use zero extend. */
3314 if (mask2 == ~(lsb2 - 1))
3318 for (width = 8; width <= 16; width += 8)
3320 /* Can we zero-extend right away? */
3321 if (lsb2 == (unsigned HOST_WIDE_INT) 1 << width)
3324 = 1 + ext_shift_insns[right] + ext_shift_insns[left + right];
3325 if (cost < best_cost)
3336 /* ??? Could try to put zero extend into initial right shift,
3337 or even shift a bit left before the right shift. */
3338 /* Determine value of first part of left shift, to get to the
3339 zero extend cut-off point. */
3340 first = width - exact_log2 (lsb2) + right;
3341 if (first >= 0 && right + left - first >= 0)
3343 cost = ext_shift_insns[right] + ext_shift_insns[first] + 1
3344 + ext_shift_insns[right + left - first];
3345 if (cost < best_cost)
3357 /* Try to use r0 AND pattern */
3358 for (i = 0; i <= 2; i++)
3362 if (! CONST_OK_FOR_K08 (mask >> i))
3364 cost = (i != 0) + 2 + ext_shift_insns[left + i];
3365 if (cost < best_cost)
3370 best_len = cost - 1;
3373 /* Try to use a scratch register to hold the AND operand. */
3374 can_ext = ((mask << left) & ((unsigned HOST_WIDE_INT) 3 << 30)) == 0;
3375 for (i = 0; i <= 2; i++)
3379 cost = (i != 0) + (CONST_OK_FOR_I08 (mask >> i) ? 2 : 3)
3380 + (can_ext ? ext_shift_insns : shift_insns)[left + i];
3381 if (cost < best_cost)
3386 best_len = cost - 1 - ! CONST_OK_FOR_I08 (mask >> i);
3392 attrp[0] = best_right;
3393 attrp[1] = best_len;
3398 /* This is used in length attributes of the unnamed instructions
3399 corresponding to shl_and_kind return values of 1 and 2. */
3401 shl_and_length (rtx insn)
3403 rtx set_src, left_rtx, mask_rtx;
3406 set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
3407 left_rtx = XEXP (XEXP (set_src, 0), 1);
3408 mask_rtx = XEXP (set_src, 1);
3409 shl_and_kind (left_rtx, mask_rtx, attributes);
3410 return attributes[1];
3413 /* This is used in length attribute of the and_shl_scratch instruction. */
3416 shl_and_scr_length (rtx insn)
3418 rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
3419 int len = shift_insns[INTVAL (XEXP (set_src, 1)) & 31];
3420 rtx op = XEXP (set_src, 0);
3421 len += shift_insns[INTVAL (XEXP (op, 1)) & 31] + 1;
3422 op = XEXP (XEXP (op, 0), 0);
3423 return len + shift_insns[INTVAL (XEXP (op, 1)) & 31];
3426 /* Generate rtl for instructions for which shl_and_kind advised a particular
3427 method of generating them, i.e. returned zero. */
3430 gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
3433 unsigned HOST_WIDE_INT mask;
3434 int kind = shl_and_kind (left_rtx, mask_rtx, attributes);
3435 int right, total_shift;
3436 void (*shift_gen_fun) (int, rtx *) = gen_shifty_hi_op;
3438 right = attributes[0];
3439 total_shift = INTVAL (left_rtx) + right;
3440 mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> total_shift;
3447 int first = attributes[2];
3452 emit_insn ((mask << right) <= 0xff
3453 ? gen_zero_extendqisi2 (dest,
3454 gen_lowpart (QImode, source))
3455 : gen_zero_extendhisi2 (dest,
3456 gen_lowpart (HImode, source)));
3460 emit_insn (gen_movsi (dest, source));
3464 operands[2] = GEN_INT (right);
3465 gen_shifty_hi_op (LSHIFTRT, operands);
3469 operands[2] = GEN_INT (first);
3470 gen_shifty_hi_op (ASHIFT, operands);
3471 total_shift -= first;
3475 emit_insn (mask <= 0xff
3476 ? gen_zero_extendqisi2 (dest, gen_lowpart (QImode, dest))
3477 : gen_zero_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3478 if (total_shift > 0)
3480 operands[2] = GEN_INT (total_shift);
3481 gen_shifty_hi_op (ASHIFT, operands);
3486 shift_gen_fun = gen_shifty_op;
3488 /* If the topmost bit that matters is set, set the topmost bits
3489 that don't matter. This way, we might be able to get a shorter
3491 if (mask & ((HOST_WIDE_INT) 1 << (31 - total_shift)))
3492 mask |= (HOST_WIDE_INT) ~0 << (31 - total_shift);
3494 /* Don't expand fine-grained when combining, because that will
3495 make the pattern fail. */
3496 if (currently_expanding_to_rtl
3497 || reload_in_progress || reload_completed)
3501 /* Cases 3 and 4 should be handled by this split
3502 only while combining */
3503 gcc_assert (kind <= 2);
3506 emit_insn (gen_lshrsi3 (dest, source, GEN_INT (right)));
3509 emit_insn (gen_andsi3 (dest, source, GEN_INT (mask)));
3514 operands[2] = GEN_INT (total_shift);
3515 shift_gen_fun (ASHIFT, operands);
3522 if (kind != 4 && total_shift < 16)
3524 neg = -ext_shift_amounts[total_shift][1];
3526 neg -= ext_shift_amounts[total_shift][2];
3530 emit_insn (gen_and_shl_scratch (dest, source,
3533 GEN_INT (total_shift + neg),
3535 emit_insn (gen_movsi (dest, dest));
3542 /* Try to find a good way to implement the combiner pattern
3543 [(set (match_operand:SI 0 "register_operand" "=r")
3544 (sign_extract:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
3545 (match_operand:SI 2 "const_int_operand" "n")
3546 (match_operand:SI 3 "const_int_operand" "n")
3548 (clobber (reg:SI T_REG))]
3549 LEFT_RTX is operand 2 in the above pattern, and SIZE_RTX is operand 3.
3550 return 0 for simple left / right shift combination.
3551 return 1 for left shift / 8 bit sign extend / left shift.
3552 return 2 for left shift / 16 bit sign extend / left shift.
3553 return 3 for left shift / 8 bit sign extend / shift / sign extend.
3554 return 4 for left shift / 16 bit sign extend / shift / sign extend.
3555 return 5 for left shift / 16 bit sign extend / right shift
3556 return 6 for < 8 bit sign extend / left shift.
3557 return 7 for < 8 bit sign extend / left shift / single right shift.
3558 If COSTP is nonzero, assign the calculated cost to *COSTP. */
3561 shl_sext_kind (rtx left_rtx, rtx size_rtx, int *costp)
3563 int left, size, insize, ext;
3564 int cost = 0, best_cost;
3567 left = INTVAL (left_rtx);
3568 size = INTVAL (size_rtx);
3569 insize = size - left;
3570 gcc_assert (insize > 0);
3571 /* Default to left / right shift. */
3573 best_cost = shift_insns[32 - insize] + ashiftrt_insns[32 - size];
3576 /* 16 bit shift / sign extend / 16 bit shift */
3577 cost = shift_insns[16 - insize] + 1 + ashiftrt_insns[16 - size];
3578 /* If ashiftrt_insns[16 - size] is 8, this choice will be overridden
3579 below, by alternative 3 or something even better. */
3580 if (cost < best_cost)
3586 /* Try a plain sign extend between two shifts. */
3587 for (ext = 16; ext >= insize; ext -= 8)
3591 cost = ext_shift_insns[ext - insize] + 1 + shift_insns[size - ext];
3592 if (cost < best_cost)
3594 kind = ext / (unsigned) 8;
3598 /* Check if we can do a sloppy shift with a final signed shift
3599 restoring the sign. */
3600 if (EXT_SHIFT_SIGNED (size - ext))
3601 cost = ext_shift_insns[ext - insize] + ext_shift_insns[size - ext] + 1;
3602 /* If not, maybe it's still cheaper to do the second shift sloppy,
3603 and do a final sign extend? */
3604 else if (size <= 16)
3605 cost = ext_shift_insns[ext - insize] + 1
3606 + ext_shift_insns[size > ext ? size - ext : ext - size] + 1;
3609 if (cost < best_cost)
3611 kind = ext / (unsigned) 8 + 2;
3615 /* Check if we can sign extend in r0 */
3618 cost = 3 + shift_insns[left];
3619 if (cost < best_cost)
3624 /* Try the same with a final signed shift. */
3627 cost = 3 + ext_shift_insns[left + 1] + 1;
3628 if (cost < best_cost)
3637 /* Try to use a dynamic shift. */
3638 cost = shift_insns[32 - insize] + 1 + SH_DYNAMIC_SHIFT_COST;
3639 if (cost < best_cost)
3650 /* Function to be used in the length attribute of the instructions
3651 implementing this pattern. */
3654 shl_sext_length (rtx insn)
3656 rtx set_src, left_rtx, size_rtx;
3659 set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
3660 left_rtx = XEXP (XEXP (set_src, 0), 1);
3661 size_rtx = XEXP (set_src, 1);
3662 shl_sext_kind (left_rtx, size_rtx, &cost);
3666 /* Generate rtl for this pattern */
3669 gen_shl_sext (rtx dest, rtx left_rtx, rtx size_rtx, rtx source)
3672 int left, size, insize, cost;
3675 kind = shl_sext_kind (left_rtx, size_rtx, &cost);
3676 left = INTVAL (left_rtx);
3677 size = INTVAL (size_rtx);
3678 insize = size - left;
3686 int ext = kind & 1 ? 8 : 16;
3687 int shift2 = size - ext;
3689 /* Don't expand fine-grained when combining, because that will
3690 make the pattern fail. */
3691 if (! currently_expanding_to_rtl
3692 && ! reload_in_progress && ! reload_completed)
3694 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3695 emit_insn (gen_movsi (dest, source));
3699 emit_insn (gen_movsi (dest, source));
3703 operands[2] = GEN_INT (ext - insize);
3704 gen_shifty_hi_op (ASHIFT, operands);
3707 ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
3708 : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3713 operands[2] = GEN_INT (shift2);
3714 gen_shifty_op (ASHIFT, operands);
3721 if (EXT_SHIFT_SIGNED (shift2))
3723 operands[2] = GEN_INT (shift2 + 1);
3724 gen_shifty_op (ASHIFT, operands);
3725 operands[2] = const1_rtx;
3726 gen_shifty_op (ASHIFTRT, operands);
3729 operands[2] = GEN_INT (shift2);
3730 gen_shifty_hi_op (ASHIFT, operands);
3734 operands[2] = GEN_INT (-shift2);
3735 gen_shifty_hi_op (LSHIFTRT, operands);
3737 emit_insn (size <= 8
3738 ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
3739 : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3746 if (! currently_expanding_to_rtl
3747 && ! reload_in_progress && ! reload_completed)
3748 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3752 operands[2] = GEN_INT (16 - insize);
3753 gen_shifty_hi_op (ASHIFT, operands);
3754 emit_insn (gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3756 /* Don't use gen_ashrsi3 because it generates new pseudos. */
3758 gen_ashift (ASHIFTRT, 1, dest);
3763 /* Don't expand fine-grained when combining, because that will
3764 make the pattern fail. */
3765 if (! currently_expanding_to_rtl
3766 && ! reload_in_progress && ! reload_completed)
3768 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3769 emit_insn (gen_movsi (dest, source));
3772 emit_insn (gen_andsi3 (dest, source, GEN_INT ((1 << insize) - 1)));
3773 emit_insn (gen_xorsi3 (dest, dest, GEN_INT (1 << (insize - 1))));
3774 emit_insn (gen_addsi3 (dest, dest, GEN_INT (-1 << (insize - 1))));
3776 operands[2] = kind == 7 ? GEN_INT (left + 1) : left_rtx;
3777 gen_shifty_op (ASHIFT, operands);
3779 emit_insn (gen_ashrsi3_k (dest, dest, const1_rtx));
3787 /* Prefix a symbol_ref name with "datalabel". */
3790 gen_datalabel_ref (rtx sym)
3794 if (GET_CODE (sym) == LABEL_REF)
3795 return gen_rtx_CONST (GET_MODE (sym),
3796 gen_rtx_UNSPEC (GET_MODE (sym),
3800 gcc_assert (GET_CODE (sym) == SYMBOL_REF);
3802 str = XSTR (sym, 0);
3803 /* Share all SYMBOL_REF strings with the same value - that is important
3805 str = IDENTIFIER_POINTER (get_identifier (str));
3806 XSTR (sym, 0) = str;
3812 static alloc_pool label_ref_list_pool;
3814 typedef struct label_ref_list_d
3817 struct label_ref_list_d *next;
3818 } *label_ref_list_t;
3820 /* The SH cannot load a large constant into a register, constants have to
3821 come from a pc relative load. The reference of a pc relative load
3822 instruction must be less than 1k in front of the instruction. This
3823 means that we often have to dump a constant inside a function, and
3824 generate code to branch around it.
3826 It is important to minimize this, since the branches will slow things
3827 down and make things bigger.
3829 Worst case code looks like:
3847 We fix this by performing a scan before scheduling, which notices which
3848 instructions need to have their operands fetched from the constant table
3849 and builds the table.
3853 scan, find an instruction which needs a pcrel move. Look forward, find the
3854 last barrier which is within MAX_COUNT bytes of the requirement.
3855 If there isn't one, make one. Process all the instructions between
3856 the find and the barrier.
3858 In the above example, we can tell that L3 is within 1k of L1, so
3859 the first move can be shrunk from the 3 insn+constant sequence into
3860 just 1 insn, and the constant moved to L3 to make:
3871 Then the second move becomes the target for the shortening process. */
3875 rtx value; /* Value in table. */
3876 rtx label; /* Label of value. */
3877 label_ref_list_t wend; /* End of window. */
3878 enum machine_mode mode; /* Mode of value. */
3880 /* True if this constant is accessed as part of a post-increment
3881 sequence. Note that HImode constants are never accessed in this way. */
3882 bool part_of_sequence_p;
3885 /* The maximum number of constants that can fit into one pool, since
3886 constants in the range 0..510 are at least 2 bytes long, and in the
3887 range from there to 1018 at least 4 bytes. */
3889 #define MAX_POOL_SIZE 372
3890 static pool_node pool_vector[MAX_POOL_SIZE];
3891 static int pool_size;
3892 static rtx pool_window_label;
3893 static int pool_window_last;
3895 static int max_labelno_before_reorg;
3897 /* ??? If we need a constant in HImode which is the truncated value of a
3898 constant we need in SImode, we could combine the two entries thus saving
3899 two bytes. Is this common enough to be worth the effort of implementing
3902 /* ??? This stuff should be done at the same time that we shorten branches.
3903 As it is now, we must assume that all branches are the maximum size, and
3904 this causes us to almost always output constant pools sooner than
3907 /* Add a constant to the pool and return its label. */
3910 add_constant (rtx x, enum machine_mode mode, rtx last_value)
3914 label_ref_list_t ref, newref;
3916 /* First see if we've already got it. */
3917 for (i = 0; i < pool_size; i++)
3919 if (x->code == pool_vector[i].value->code
3920 && mode == pool_vector[i].mode)
3922 if (x->code == CODE_LABEL)
3924 if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
3927 if (rtx_equal_p (x, pool_vector[i].value))
3932 || ! rtx_equal_p (last_value, pool_vector[i-1].value))
3934 new_rtx = gen_label_rtx ();
3935 LABEL_REFS (new_rtx) = pool_vector[i].label;
3936 pool_vector[i].label = lab = new_rtx;
3938 if (lab && pool_window_label)
3940 newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
3941 newref->label = pool_window_label;
3942 ref = pool_vector[pool_window_last].wend;
3944 pool_vector[pool_window_last].wend = newref;
3947 pool_window_label = new_rtx;
3948 pool_window_last = i;
3954 /* Need a new one. */
3955 pool_vector[pool_size].value = x;
3956 if (last_value && rtx_equal_p (last_value, pool_vector[pool_size - 1].value))
3959 pool_vector[pool_size - 1].part_of_sequence_p = true;
3962 lab = gen_label_rtx ();
3963 pool_vector[pool_size].mode = mode;
3964 pool_vector[pool_size].label = lab;
3965 pool_vector[pool_size].wend = NULL;
3966 pool_vector[pool_size].part_of_sequence_p = (lab == 0);
3967 if (lab && pool_window_label)
3969 newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
3970 newref->label = pool_window_label;
3971 ref = pool_vector[pool_window_last].wend;
3973 pool_vector[pool_window_last].wend = newref;
3976 pool_window_label = lab;
3977 pool_window_last = pool_size;
3982 /* Output the literal table. START, if nonzero, is the first instruction
3983 this table is needed for, and also indicates that there is at least one
3984 casesi_worker_2 instruction; We have to emit the operand3 labels from
3985 these insns at a 4-byte aligned position. BARRIER is the barrier
3986 after which we are to place the table. */
3989 dump_table (rtx start, rtx barrier)
3995 label_ref_list_t ref;
3998 /* Do two passes, first time dump out the HI sized constants. */
4000 for (i = 0; i < pool_size; i++)
4002 pool_node *p = &pool_vector[i];
4004 if (p->mode == HImode)
4008 scan = emit_insn_after (gen_align_2 (), scan);
4011 for (lab = p->label; lab; lab = LABEL_REFS (lab))
4012 scan = emit_label_after (lab, scan);
4013 scan = emit_insn_after (gen_consttable_2 (p->value, const0_rtx),
4015 for (ref = p->wend; ref; ref = ref->next)
4018 scan = emit_insn_after (gen_consttable_window_end (lab), scan);
4021 else if (p->mode == DFmode)
4029 scan = emit_insn_after (gen_align_4 (), scan);
4031 for (; start != barrier; start = NEXT_INSN (start))
4032 if (NONJUMP_INSN_P (start)
4033 && recog_memoized (start) == CODE_FOR_casesi_worker_2)
4035 rtx src = SET_SRC (XVECEXP (PATTERN (start), 0, 0));
4036 rtx lab = XEXP (XVECEXP (src, 0, 3), 0);
4038 scan = emit_label_after (lab, scan);
4041 if (TARGET_FMOVD && TARGET_ALIGN_DOUBLE && have_df)
4043 rtx align_insn = NULL_RTX;
4045 scan = emit_label_after (gen_label_rtx (), scan);
4046 scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
4049 for (i = 0; i < pool_size; i++)
4051 pool_node *p = &pool_vector[i];
4059 if (align_insn && !p->part_of_sequence_p)
4061 for (lab = p->label; lab; lab = LABEL_REFS (lab))
4062 emit_label_before (lab, align_insn);
4063 emit_insn_before (gen_consttable_4 (p->value, const0_rtx),
4065 for (ref = p->wend; ref; ref = ref->next)
4068 emit_insn_before (gen_consttable_window_end (lab),
4071 delete_insn (align_insn);
4072 align_insn = NULL_RTX;
4077 for (lab = p->label; lab; lab = LABEL_REFS (lab))
4078 scan = emit_label_after (lab, scan);
4079 scan = emit_insn_after (gen_consttable_4 (p->value,
4081 need_align = ! need_align;
4087 scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
4092 for (lab = p->label; lab; lab = LABEL_REFS (lab))
4093 scan = emit_label_after (lab, scan);
4094 scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
4101 if (p->mode != HImode)
4103 for (ref = p->wend; ref; ref = ref->next)
4106 scan = emit_insn_after (gen_consttable_window_end (lab),
4115 for (i = 0; i < pool_size; i++)
4117 pool_node *p = &pool_vector[i];
4128 scan = emit_label_after (gen_label_rtx (), scan);
4129 scan = emit_insn_after (gen_align_4 (), scan);
4131 for (lab = p->label; lab; lab = LABEL_REFS (lab))
4132 scan = emit_label_after (lab, scan);
4133 scan = emit_insn_after (gen_consttable_4 (p->value, const0_rtx),
4141 scan = emit_label_after (gen_label_rtx (), scan);
4142 scan = emit_insn_after (gen_align_4 (), scan);
4144 for (lab = p->label; lab; lab = LABEL_REFS (lab))
4145 scan = emit_label_after (lab, scan);
4146 scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
4153 if (p->mode != HImode)
4155 for (ref = p->wend; ref; ref = ref->next)
4158 scan = emit_insn_after (gen_consttable_window_end (lab), scan);
4163 scan = emit_insn_after (gen_consttable_end (), scan);
4164 scan = emit_barrier_after (scan);
4166 pool_window_label = NULL_RTX;
4167 pool_window_last = 0;
4170 /* Return nonzero if constant would be an ok source for a
4171 mov.w instead of a mov.l. */
4176 return (CONST_INT_P (src)
4177 && INTVAL (src) >= -32768
4178 && INTVAL (src) <= 32767);
4181 #define MOVA_LABELREF(mova) XVECEXP (SET_SRC (PATTERN (mova)), 0, 0)
4183 /* Nonzero if the insn is a move instruction which needs to be fixed. */
4185 /* ??? For a DImode/DFmode moves, we don't need to fix it if each half of the
4186 CONST_DOUBLE input value is CONST_OK_FOR_I08. For a SFmode move, we don't
4187 need to fix it if the input value is CONST_OK_FOR_I08. */
4190 broken_move (rtx insn)
4192 if (NONJUMP_INSN_P (insn))
4194 rtx pat = PATTERN (insn);
4195 if (GET_CODE (pat) == PARALLEL)
4196 pat = XVECEXP (pat, 0, 0);
4197 if (GET_CODE (pat) == SET
4198 /* We can load any 8-bit value if we don't care what the high
4199 order bits end up as. */
4200 && GET_MODE (SET_DEST (pat)) != QImode
4201 && (CONSTANT_P (SET_SRC (pat))
4202 /* Match mova_const. */
4203 || (GET_CODE (SET_SRC (pat)) == UNSPEC
4204 && XINT (SET_SRC (pat), 1) == UNSPEC_MOVA
4205 && GET_CODE (XVECEXP (SET_SRC (pat), 0, 0)) == CONST))
4207 && GET_CODE (SET_SRC (pat)) == CONST_DOUBLE
4208 && (fp_zero_operand (SET_SRC (pat))
4209 || fp_one_operand (SET_SRC (pat)))
4210 /* In general we don't know the current setting of fpscr, so disable fldi.
4211 There is an exception if this was a register-register move
4212 before reload - and hence it was ascertained that we have
4213 single precision setting - and in a post-reload optimization
4214 we changed this to do a constant load. In that case
4215 we don't have an r0 clobber, hence we must use fldi. */
4217 || (GET_CODE (XEXP (XVECEXP (PATTERN (insn), 0, 2), 0))
4219 && REG_P (SET_DEST (pat))
4220 && FP_REGISTER_P (REGNO (SET_DEST (pat))))
4222 && GET_MODE (SET_DEST (pat)) == SImode
4223 && (satisfies_constraint_I20 (SET_SRC (pat))
4224 || satisfies_constraint_I28 (SET_SRC (pat))))
4225 && ! satisfies_constraint_I08 (SET_SRC (pat)))
4235 return (NONJUMP_INSN_P (insn)
4236 && GET_CODE (PATTERN (insn)) == SET
4237 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
4238 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_MOVA
4239 /* Don't match mova_const. */
4240 && GET_CODE (MOVA_LABELREF (insn)) == LABEL_REF);
4243 /* Fix up a mova from a switch that went out of range. */
4245 fixup_mova (rtx mova)
4247 PUT_MODE (XEXP (MOVA_LABELREF (mova), 0), QImode);
4250 SET_SRC (PATTERN (mova)) = MOVA_LABELREF (mova);
4251 INSN_CODE (mova) = -1;
4256 rtx lab = gen_label_rtx ();
4257 rtx wpat, wpat0, wpat1, wsrc, target, base, diff;
4261 worker = NEXT_INSN (worker);
4263 && !LABEL_P (worker)
4264 && !JUMP_P (worker));
4265 } while (NOTE_P (worker)
4266 || recog_memoized (worker) != CODE_FOR_casesi_worker_1);
4267 wpat = PATTERN (worker);
4268 wpat0 = XVECEXP (wpat, 0, 0);
4269 wpat1 = XVECEXP (wpat, 0, 1);
4270 wsrc = SET_SRC (wpat0);
4271 PATTERN (worker) = (gen_casesi_worker_2
4272 (SET_DEST (wpat0), XVECEXP (wsrc, 0, 1),
4273 XEXP (XVECEXP (wsrc, 0, 2), 0), lab,
4275 INSN_CODE (worker) = -1;
4276 target = XVECEXP (SET_SRC (PATTERN (mova)), 0, 0);
4277 base = gen_rtx_LABEL_REF (Pmode, lab);
4278 diff = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, target, base), UNSPEC_SYMOFF);
4279 SET_SRC (PATTERN (mova)) = gen_rtx_CONST (Pmode, diff);
4280 INSN_CODE (mova) = -1;
4284 /* NEW_MOVA is a mova we've just encountered while scanning forward. Update
4285 *num_mova, and check if the new mova is not nested within the first one.
4286 return 0 if *first_mova was replaced, 1 if new_mova was replaced,
4287 2 if new_mova has been assigned to *first_mova, -1 otherwise.. */
4289 untangle_mova (int *num_mova, rtx *first_mova, rtx new_mova)
4291 int n_addr = 0; /* Initialization to shut up spurious warning. */
4292 int f_target, n_target = 0; /* Likewise. */
4296 /* If NEW_MOVA has no address yet, it will be handled later. */
4297 if (INSN_ADDRESSES_SIZE() <= (unsigned) INSN_UID (new_mova))
4300 n_addr = INSN_ADDRESSES (INSN_UID (new_mova));
4301 n_target = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (new_mova), 0)));
4302 if (n_addr > n_target || n_addr + 1022 < n_target)
4304 /* Change the mova into a load.
4305 broken_move will then return true for it. */
4306 fixup_mova (new_mova);
4312 *first_mova = new_mova;
4317 = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (*first_mova), 0))))
4322 if (f_target - INSN_ADDRESSES (INSN_UID (*first_mova))
4323 > n_target - n_addr)
4325 fixup_mova (*first_mova);
4330 fixup_mova (new_mova);
4335 /* Find the last barrier from insn FROM which is close enough to hold the
4336 constant pool. If we can't find one, then create one near the end of
4340 find_barrier (int num_mova, rtx mova, rtx from)
4349 int leading_mova = num_mova;
4350 rtx barrier_before_mova = 0, found_barrier = 0, good_barrier = 0;
4355 /* For HImode: range is 510, add 4 because pc counts from address of
4356 second instruction after this one, subtract 2 for the jump instruction
4357 that we may need to emit before the table, subtract 2 for the instruction
4358 that fills the jump delay slot (in very rare cases, reorg will take an
4359 instruction from after the constant pool or will leave the delay slot
4360 empty). This gives 510.
4361 For SImode: range is 1020, add 4 because pc counts from address of
4362 second instruction after this one, subtract 2 in case pc is 2 byte
4363 aligned, subtract 2 for the jump instruction that we may need to emit
4364 before the table, subtract 2 for the instruction that fills the jump
4365 delay slot. This gives 1018. */
4367 /* The branch will always be shortened now that the reference address for
4368 forward branches is the successor address, thus we need no longer make
4369 adjustments to the [sh]i_limit for -O0. */
4374 while (from && count_si < si_limit && count_hi < hi_limit)
4376 int inc = get_attr_length (from);
4379 /* If this is a label that existed at the time of the compute_alignments
4380 call, determine the alignment. N.B. When find_barrier recurses for
4381 an out-of-reach mova, we might see labels at the start of previously
4382 inserted constant tables. */
4384 && CODE_LABEL_NUMBER (from) <= max_labelno_before_reorg)
4387 new_align = 1 << label_to_alignment (from);
4388 else if (BARRIER_P (prev_nonnote_insn (from)))
4389 new_align = 1 << barrier_align (from);
4394 /* In case we are scanning a constant table because of recursion, check
4395 for explicit alignments. If the table is long, we might be forced
4396 to emit the new table in front of it; the length of the alignment
4397 might be the last straw. */
4398 else if (NONJUMP_INSN_P (from)
4399 && GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
4400 && XINT (PATTERN (from), 1) == UNSPECV_ALIGN)
4401 new_align = INTVAL (XVECEXP (PATTERN (from), 0, 0));
4402 /* When we find the end of a constant table, paste the new constant
4403 at the end. That is better than putting it in front because
4404 this way, we don't need extra alignment for adding a 4-byte-aligned
4405 mov(a) label to a 2/4 or 8/4 byte aligned table. */
4406 else if (NONJUMP_INSN_P (from)
4407 && GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
4408 && XINT (PATTERN (from), 1) == UNSPECV_CONST_END)
4411 if (BARRIER_P (from))
4415 found_barrier = from;
4417 /* If we are at the end of the function, or in front of an alignment
4418 instruction, we need not insert an extra alignment. We prefer
4419 this kind of barrier. */
4420 if (barrier_align (from) > 2)
4421 good_barrier = from;
4423 /* If we are at the end of a hot/cold block, dump the constants
4425 next = NEXT_INSN (from);
4428 && NOTE_KIND (next) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
4432 if (broken_move (from))
4435 enum machine_mode mode;
4437 pat = PATTERN (from);
4438 if (GET_CODE (pat) == PARALLEL)
4439 pat = XVECEXP (pat, 0, 0);
4440 src = SET_SRC (pat);
4441 dst = SET_DEST (pat);
4442 mode = GET_MODE (dst);
4444 /* We must explicitly check the mode, because sometimes the
4445 front end will generate code to load unsigned constants into
4446 HImode targets without properly sign extending them. */
4448 || (mode == SImode && hi_const (src) && REGNO (dst) != FPUL_REG))
4451 /* We put the short constants before the long constants, so
4452 we must count the length of short constants in the range
4453 for the long constants. */
4454 /* ??? This isn't optimal, but is easy to do. */
4459 /* We dump DF/DI constants before SF/SI ones, because
4460 the limit is the same, but the alignment requirements
4461 are higher. We may waste up to 4 additional bytes
4462 for alignment, and the DF/DI constant may have
4463 another SF/SI constant placed before it. */
4464 if (TARGET_SHCOMPACT
4466 && (mode == DFmode || mode == DImode))
4471 while (si_align > 2 && found_si + si_align - 2 > count_si)
4473 if (found_si > count_si)
4474 count_si = found_si;
4475 found_si += GET_MODE_SIZE (mode);
4477 si_limit -= GET_MODE_SIZE (mode);
4483 switch (untangle_mova (&num_mova, &mova, from))
4485 case 0: return find_barrier (0, 0, mova);
4490 = good_barrier ? good_barrier : found_barrier;
4494 if (found_si > count_si)
4495 count_si = found_si;
4497 else if (JUMP_TABLE_DATA_P (from))
4499 if ((num_mova > 1 && GET_MODE (prev_nonnote_insn (from)) == VOIDmode)
4501 && (prev_nonnote_insn (from)
4502 == XEXP (MOVA_LABELREF (mova), 0))))
4504 if (barrier_align (next_real_insn (from)) == align_jumps_log)
4506 /* We have just passed the barrier in front of the
4507 ADDR_DIFF_VEC, which is stored in found_barrier. Since
4508 the ADDR_DIFF_VEC is accessed as data, just like our pool
4509 constants, this is a good opportunity to accommodate what
4510 we have gathered so far.
4511 If we waited any longer, we could end up at a barrier in
4512 front of code, which gives worse cache usage for separated
4513 instruction / data caches. */
4514 good_barrier = found_barrier;
4519 rtx body = PATTERN (from);
4520 inc = XVECLEN (body, 1) * GET_MODE_SIZE (GET_MODE (body));
4523 /* For the SH1, we generate alignments even after jumps-around-jumps. */
4524 else if (JUMP_P (from)
4526 && ! TARGET_SMALLCODE)
4532 if (new_align > si_align)
4534 si_limit -= (count_si - 1) & (new_align - si_align);
4535 si_align = new_align;
4537 count_si = (count_si + new_align - 1) & -new_align;
4542 if (new_align > hi_align)
4544 hi_limit -= (count_hi - 1) & (new_align - hi_align);
4545 hi_align = new_align;
4547 count_hi = (count_hi + new_align - 1) & -new_align;
4549 from = NEXT_INSN (from);
4556 /* Try as we might, the leading mova is out of range. Change
4557 it into a load (which will become a pcload) and retry. */
4559 return find_barrier (0, 0, mova);
4563 /* Insert the constant pool table before the mova instruction,
4564 to prevent the mova label reference from going out of range. */
4566 good_barrier = found_barrier = barrier_before_mova;
4572 if (good_barrier && next_real_insn (found_barrier))
4573 found_barrier = good_barrier;
4577 /* We didn't find a barrier in time to dump our stuff,
4578 so we'll make one. */
4579 rtx label = gen_label_rtx ();
4581 /* If we exceeded the range, then we must back up over the last
4582 instruction we looked at. Otherwise, we just need to undo the
4583 NEXT_INSN at the end of the loop. */
4584 if (PREV_INSN (from) != orig
4585 && (count_hi > hi_limit || count_si > si_limit))
4586 from = PREV_INSN (PREV_INSN (from));
4588 from = PREV_INSN (from);
4590 /* Walk back to be just before any jump or label.
4591 Putting it before a label reduces the number of times the branch
4592 around the constant pool table will be hit. Putting it before
4593 a jump makes it more likely that the bra delay slot will be
4595 while (NOTE_P (from) || JUMP_P (from)
4597 from = PREV_INSN (from);
4599 from = emit_jump_insn_after (gen_jump (label), from);
4600 JUMP_LABEL (from) = label;
4601 LABEL_NUSES (label) = 1;
4602 found_barrier = emit_barrier_after (from);
4603 emit_label_after (label, found_barrier);
4606 return found_barrier;
4609 /* If the instruction INSN is implemented by a special function, and we can
4610 positively find the register that is used to call the sfunc, and this
4611 register is not used anywhere else in this instruction - except as the
4612 destination of a set, return this register; else, return 0. */
4614 sfunc_uses_reg (rtx insn)
4617 rtx pattern, part, reg_part, reg;
4619 if (!NONJUMP_INSN_P (insn))
4621 pattern = PATTERN (insn);
4622 if (GET_CODE (pattern) != PARALLEL || get_attr_type (insn) != TYPE_SFUNC)
4625 for (reg_part = 0, i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
4627 part = XVECEXP (pattern, 0, i);
4628 if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == SImode)
4633 reg = XEXP (reg_part, 0);
4634 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
4636 part = XVECEXP (pattern, 0, i);
4637 if (part == reg_part || GET_CODE (part) == CLOBBER)
4639 if (reg_mentioned_p (reg, ((GET_CODE (part) == SET
4640 && REG_P (SET_DEST (part)))
4641 ? SET_SRC (part) : part)))
4647 /* See if the only way in which INSN uses REG is by calling it, or by
4648 setting it while calling it. Set *SET to a SET rtx if the register
4652 noncall_uses_reg (rtx reg, rtx insn, rtx *set)
4658 reg2 = sfunc_uses_reg (insn);
4659 if (reg2 && REGNO (reg2) == REGNO (reg))
4661 pattern = single_set (insn);
4663 && REG_P (SET_DEST (pattern))
4664 && REGNO (reg) == REGNO (SET_DEST (pattern)))
4670 /* We don't use rtx_equal_p because we don't care if the mode is
4672 pattern = single_set (insn);
4674 && REG_P (SET_DEST (pattern))
4675 && REGNO (reg) == REGNO (SET_DEST (pattern)))
4681 par = PATTERN (insn);
4682 if (GET_CODE (par) == PARALLEL)
4683 for (i = XVECLEN (par, 0) - 1; i >= 0; i--)
4685 part = XVECEXP (par, 0, i);
4686 if (GET_CODE (part) != SET && reg_mentioned_p (reg, part))
4689 return reg_mentioned_p (reg, SET_SRC (pattern));
4695 pattern = PATTERN (insn);
4697 if (GET_CODE (pattern) == PARALLEL)
4701 for (i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
4702 if (reg_mentioned_p (reg, XVECEXP (pattern, 0, i)))
4704 pattern = XVECEXP (pattern, 0, 0);
4707 if (GET_CODE (pattern) == SET)
4709 if (reg_mentioned_p (reg, SET_DEST (pattern)))
4711 /* We don't use rtx_equal_p, because we don't care if the
4712 mode is different. */
4713 if (!REG_P (SET_DEST (pattern))
4714 || REGNO (reg) != REGNO (SET_DEST (pattern)))
4720 pattern = SET_SRC (pattern);
4723 if (GET_CODE (pattern) != CALL
4724 || !MEM_P (XEXP (pattern, 0))
4725 || ! rtx_equal_p (reg, XEXP (XEXP (pattern, 0), 0)))
4731 /* Given a X, a pattern of an insn or a part of it, return a mask of used
4732 general registers. Bits 0..15 mean that the respective registers
4733 are used as inputs in the instruction. Bits 16..31 mean that the
4734 registers 0..15, respectively, are used as outputs, or are clobbered.
4735 IS_DEST should be set to 16 if X is the destination of a SET, else to 0. */
4737 regs_used (rtx x, int is_dest)
4745 code = GET_CODE (x);
4750 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
4751 << (REGNO (x) + is_dest));
4755 rtx y = SUBREG_REG (x);
4760 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
4762 subreg_regno_offset (REGNO (y),
4765 GET_MODE (x)) + is_dest));
4769 return regs_used (SET_SRC (x), 0) | regs_used (SET_DEST (x), 16);
4771 /* If there was a return value, it must have been indicated with USE. */
4786 fmt = GET_RTX_FORMAT (code);
4788 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4793 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4794 used |= regs_used (XVECEXP (x, i, j), is_dest);
4796 else if (fmt[i] == 'e')
4797 used |= regs_used (XEXP (x, i), is_dest);
4802 /* Create an instruction that prevents redirection of a conditional branch
4803 to the destination of the JUMP with address ADDR.
4804 If the branch needs to be implemented as an indirect jump, try to find
4805 a scratch register for it.
4806 If NEED_BLOCK is 0, don't do anything unless we need a scratch register.
4807 If any preceding insn that doesn't fit into a delay slot is good enough,
4808 pass 1. Pass 2 if a definite blocking insn is needed.
4809 -1 is used internally to avoid deep recursion.
4810 If a blocking instruction is made or recognized, return it. */
4813 gen_block_redirect (rtx jump, int addr, int need_block)
4816 rtx prev = prev_nonnote_insn (jump);
4819 /* First, check if we already have an instruction that satisfies our need. */
4820 if (prev && NONJUMP_INSN_P (prev) && ! INSN_DELETED_P (prev))
4822 if (INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
4824 if (GET_CODE (PATTERN (prev)) == USE
4825 || GET_CODE (PATTERN (prev)) == CLOBBER
4826 || get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
4828 else if ((need_block &= ~1) < 0)
4830 else if (recog_memoized (prev) == CODE_FOR_block_branch_redirect)
4833 if (GET_CODE (PATTERN (jump)) == RETURN)
4837 /* Reorg even does nasty things with return insns that cause branches
4838 to go out of range - see find_end_label and callers. */
4839 return emit_insn_before (gen_block_branch_redirect (const0_rtx) , jump);
4841 /* We can't use JUMP_LABEL here because it might be undefined
4842 when not optimizing. */
4843 dest = XEXP (SET_SRC (PATTERN (jump)), 0);
4844 /* If the branch is out of range, try to find a scratch register for it. */
4846 && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
4850 /* Don't look for the stack pointer as a scratch register,
4851 it would cause trouble if an interrupt occurred. */
4852 unsigned attempt = 0x7fff, used;
4853 int jump_left = flag_expensive_optimizations + 1;
4855 /* It is likely that the most recent eligible instruction is wanted for
4856 the delay slot. Therefore, find out which registers it uses, and
4857 try to avoid using them. */
4859 for (scan = jump; (scan = PREV_INSN (scan)); )
4863 if (INSN_DELETED_P (scan))
4865 code = GET_CODE (scan);
4866 if (code == CODE_LABEL || code == JUMP_INSN)
4869 && GET_CODE (PATTERN (scan)) != USE
4870 && GET_CODE (PATTERN (scan)) != CLOBBER
4871 && get_attr_in_delay_slot (scan) == IN_DELAY_SLOT_YES)
4873 attempt &= ~regs_used (PATTERN (scan), 0);
4877 for (used = dead = 0, scan = JUMP_LABEL (jump);
4878 (scan = NEXT_INSN (scan)); )
4882 if (INSN_DELETED_P (scan))
4884 code = GET_CODE (scan);
4887 used |= regs_used (PATTERN (scan), 0);
4888 if (code == CALL_INSN)
4889 used |= regs_used (CALL_INSN_FUNCTION_USAGE (scan), 0);
4890 dead |= (used >> 16) & ~used;
4896 if (code == JUMP_INSN)
4898 if (jump_left-- && simplejump_p (scan))
4899 scan = JUMP_LABEL (scan);
4905 /* Mask out the stack pointer again, in case it was
4906 the only 'free' register we have found. */
4909 /* If the immediate destination is still in range, check for possible
4910 threading with a jump beyond the delay slot insn.
4911 Don't check if we are called recursively; the jump has been or will be
4912 checked in a different invocation then. */
4914 else if (optimize && need_block >= 0)
4916 rtx next = next_active_insn (next_active_insn (dest));
4917 if (next && JUMP_P (next)
4918 && GET_CODE (PATTERN (next)) == SET
4919 && recog_memoized (next) == CODE_FOR_jump_compact)
4921 dest = JUMP_LABEL (next);
4923 && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
4925 gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), -1);
4931 rtx reg = gen_rtx_REG (SImode, exact_log2 (dead & -dead));
4933 /* It would be nice if we could convert the jump into an indirect
4934 jump / far branch right now, and thus exposing all constituent
4935 instructions to further optimization. However, reorg uses
4936 simplejump_p to determine if there is an unconditional jump where
4937 it should try to schedule instructions from the target of the
4938 branch; simplejump_p fails for indirect jumps even if they have
4940 rtx insn = emit_insn_before (gen_indirect_jump_scratch
4941 (reg, GEN_INT (INSN_UID (JUMP_LABEL (jump))))
4943 /* ??? We would like this to have the scope of the jump, but that
4944 scope will change when a delay slot insn of an inner scope is added.
4945 Hence, after delay slot scheduling, we'll have to expect
4946 NOTE_INSN_BLOCK_END notes between the indirect_jump_scratch and
4949 INSN_LOCATOR (insn) = INSN_LOCATOR (jump);
4950 INSN_CODE (insn) = CODE_FOR_indirect_jump_scratch;
4953 else if (need_block)
4954 /* We can't use JUMP_LABEL here because it might be undefined
4955 when not optimizing. */
4956 return emit_insn_before (gen_block_branch_redirect
4957 (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))))
4962 #define CONDJUMP_MIN -252
4963 #define CONDJUMP_MAX 262
4966 /* A label (to be placed) in front of the jump
4967 that jumps to our ultimate destination. */
4969 /* Where we are going to insert it if we cannot move the jump any farther,
4970 or the jump itself if we have picked up an existing jump. */
4972 /* The ultimate destination. */
4974 struct far_branch *prev;
4975 /* If the branch has already been created, its address;
4976 else the address of its first prospective user. */
4980 static void gen_far_branch (struct far_branch *);
4981 enum mdep_reorg_phase_e mdep_reorg_phase;
4983 gen_far_branch (struct far_branch *bp)
4985 rtx insn = bp->insert_place;
4987 rtx label = gen_label_rtx ();
4990 emit_label_after (label, insn);
4993 jump = emit_jump_insn_after (gen_jump (bp->far_label), insn);
4994 LABEL_NUSES (bp->far_label)++;
4997 jump = emit_jump_insn_after (gen_return (), insn);
4998 /* Emit a barrier so that reorg knows that any following instructions
4999 are not reachable via a fall-through path.
5000 But don't do this when not optimizing, since we wouldn't suppress the
5001 alignment for the barrier then, and could end up with out-of-range
5002 pc-relative loads. */
5004 emit_barrier_after (jump);
5005 emit_label_after (bp->near_label, insn);
5006 JUMP_LABEL (jump) = bp->far_label;
5007 ok = invert_jump (insn, label, 1);
5010 /* If we are branching around a jump (rather than a return), prevent
5011 reorg from using an insn from the jump target as the delay slot insn -
5012 when reorg did this, it pessimized code (we rather hide the delay slot)
5013 and it could cause branches to go out of range. */
5016 (gen_stuff_delay_slot
5017 (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))),
5018 GEN_INT (recog_memoized (insn) == CODE_FOR_branch_false)),
5020 /* Prevent reorg from undoing our splits. */
5021 gen_block_redirect (jump, bp->address += 2, 2);
5024 /* Fix up ADDR_DIFF_VECs. */
5026 fixup_addr_diff_vecs (rtx first)
5030 for (insn = first; insn; insn = NEXT_INSN (insn))
5032 rtx vec_lab, pat, prev, prevpat, x, braf_label;
5035 || GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
5037 pat = PATTERN (insn);
5038 vec_lab = XEXP (XEXP (pat, 0), 0);
5040 /* Search the matching casesi_jump_2. */
5041 for (prev = vec_lab; ; prev = PREV_INSN (prev))
5045 prevpat = PATTERN (prev);
5046 if (GET_CODE (prevpat) != PARALLEL || XVECLEN (prevpat, 0) != 2)
5048 x = XVECEXP (prevpat, 0, 1);
5049 if (GET_CODE (x) != USE)
5052 if (GET_CODE (x) == LABEL_REF && XEXP (x, 0) == vec_lab)
5055 /* FIXME: This is a bug in the optimizer, but it seems harmless
5056 to just avoid panicing. */
5060 /* Emit the reference label of the braf where it belongs, right after
5061 the casesi_jump_2 (i.e. braf). */
5062 braf_label = XEXP (XEXP (SET_SRC (XVECEXP (prevpat, 0, 0)), 1), 0);
5063 emit_label_after (braf_label, prev);
5065 /* Fix up the ADDR_DIF_VEC to be relative
5066 to the reference address of the braf. */
5067 XEXP (XEXP (pat, 0), 0) = braf_label;
5071 /* BARRIER_OR_LABEL is either a BARRIER or a CODE_LABEL immediately following
5072 a barrier. Return the base 2 logarithm of the desired alignment. */
5074 barrier_align (rtx barrier_or_label)
5076 rtx next = next_real_insn (barrier_or_label), pat, prev;
5077 int slot, credit, jump_to_next = 0;
5082 pat = PATTERN (next);
5084 if (GET_CODE (pat) == ADDR_DIFF_VEC)
5087 if (GET_CODE (pat) == UNSPEC_VOLATILE && XINT (pat, 1) == UNSPECV_ALIGN)
5088 /* This is a barrier in front of a constant table. */
5091 prev = prev_real_insn (barrier_or_label);
5092 if (GET_CODE (PATTERN (prev)) == ADDR_DIFF_VEC)
5094 pat = PATTERN (prev);
5095 /* If this is a very small table, we want to keep the alignment after
5096 the table to the minimum for proper code alignment. */
5097 return ((TARGET_SMALLCODE
5098 || ((unsigned) XVECLEN (pat, 1) * GET_MODE_SIZE (GET_MODE (pat))
5099 <= (unsigned) 1 << (CACHE_LOG - 2)))
5100 ? 1 << TARGET_SHMEDIA : align_jumps_log);
5103 if (TARGET_SMALLCODE)
5106 if (! TARGET_SH2 || ! optimize)
5107 return align_jumps_log;
5109 /* When fixing up pcloads, a constant table might be inserted just before
5110 the basic block that ends with the barrier. Thus, we can't trust the
5111 instruction lengths before that. */
5112 if (mdep_reorg_phase > SH_FIXUP_PCLOAD)
5114 /* Check if there is an immediately preceding branch to the insn beyond
5115 the barrier. We must weight the cost of discarding useful information
5116 from the current cache line when executing this branch and there is
5117 an alignment, against that of fetching unneeded insn in front of the
5118 branch target when there is no alignment. */
5120 /* There are two delay_slot cases to consider. One is the simple case
5121 where the preceding branch is to the insn beyond the barrier (simple
5122 delay slot filling), and the other is where the preceding branch has
5123 a delay slot that is a duplicate of the insn after the barrier
5124 (fill_eager_delay_slots) and the branch is to the insn after the insn
5125 after the barrier. */
5127 /* PREV is presumed to be the JUMP_INSN for the barrier under
5128 investigation. Skip to the insn before it. */
5129 prev = prev_real_insn (prev);
5131 for (slot = 2, credit = (1 << (CACHE_LOG - 2)) + 2;
5132 credit >= 0 && prev && NONJUMP_INSN_P (prev);
5133 prev = prev_real_insn (prev))
5136 if (GET_CODE (PATTERN (prev)) == USE
5137 || GET_CODE (PATTERN (prev)) == CLOBBER)
5139 if (GET_CODE (PATTERN (prev)) == SEQUENCE)
5141 prev = XVECEXP (PATTERN (prev), 0, 1);
5142 if (INSN_UID (prev) == INSN_UID (next))
5144 /* Delay slot was filled with insn at jump target. */
5151 get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
5153 credit -= get_attr_length (prev);
5157 && JUMP_LABEL (prev))
5161 || next_real_insn (JUMP_LABEL (prev)) == next
5162 /* If relax_delay_slots() decides NEXT was redundant
5163 with some previous instruction, it will have
5164 redirected PREV's jump to the following insn. */
5165 || JUMP_LABEL (prev) == next_nonnote_insn (next)
5166 /* There is no upper bound on redundant instructions
5167 that might have been skipped, but we must not put an
5168 alignment where none had been before. */
5169 || (x = (NEXT_INSN (NEXT_INSN (PREV_INSN (prev)))),
5171 && (INSN_CODE (x) == CODE_FOR_block_branch_redirect
5172 || INSN_CODE (x) == CODE_FOR_indirect_jump_scratch
5173 || INSN_CODE (x) == CODE_FOR_stuff_delay_slot))))
5175 rtx pat = PATTERN (prev);
5176 if (GET_CODE (pat) == PARALLEL)
5177 pat = XVECEXP (pat, 0, 0);
5178 if (credit - slot >= (GET_CODE (SET_SRC (pat)) == PC ? 2 : 0))
5184 return align_jumps_log;
5187 /* If we are inside a phony loop, almost any kind of label can turn up as the
5188 first one in the loop. Aligning a braf label causes incorrect switch
5189 destination addresses; we can detect braf labels because they are
5190 followed by a BARRIER.
5191 Applying loop alignment to small constant or switch tables is a waste
5192 of space, so we suppress this too. */
5194 sh_loop_align (rtx label)
5199 next = next_nonnote_insn (next);
5200 while (next && LABEL_P (next));
5204 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC
5205 || recog_memoized (next) == CODE_FOR_consttable_2)
5208 return align_loops_log;
5211 /* Do a final pass over the function, just before delayed branch
5217 rtx first, insn, mova = NULL_RTX;
5219 rtx r0_rtx = gen_rtx_REG (Pmode, 0);
5220 rtx r0_inc_rtx = gen_rtx_POST_INC (Pmode, r0_rtx);
5222 first = get_insns ();
5223 max_labelno_before_reorg = max_label_num ();
5225 /* We must split call insns before introducing `mova's. If we're
5226 optimizing, they'll have already been split. Otherwise, make
5227 sure we don't split them too late. */
5229 split_all_insns_noflow ();
5234 /* If relaxing, generate pseudo-ops to associate function calls with
5235 the symbols they call. It does no harm to not generate these
5236 pseudo-ops. However, when we can generate them, it enables to
5237 linker to potentially relax the jsr to a bsr, and eliminate the
5238 register load and, possibly, the constant pool entry. */
5240 mdep_reorg_phase = SH_INSERT_USES_LABELS;
5243 /* Remove all REG_LABEL_OPERAND notes. We want to use them for our
5244 own purposes. This works because none of the remaining passes
5245 need to look at them.
5247 ??? But it may break in the future. We should use a machine
5248 dependent REG_NOTE, or some other approach entirely. */
5249 for (insn = first; insn; insn = NEXT_INSN (insn))
5255 while ((note = find_reg_note (insn, REG_LABEL_OPERAND,
5257 remove_note (insn, note);
5261 for (insn = first; insn; insn = NEXT_INSN (insn))
5263 rtx pattern, reg, link, set, scan, dies, label;
5264 int rescan = 0, foundinsn = 0;
5268 pattern = PATTERN (insn);
5270 if (GET_CODE (pattern) == PARALLEL)
5271 pattern = XVECEXP (pattern, 0, 0);
5272 if (GET_CODE (pattern) == SET)
5273 pattern = SET_SRC (pattern);
5275 if (GET_CODE (pattern) != CALL
5276 || !MEM_P (XEXP (pattern, 0)))
5279 reg = XEXP (XEXP (pattern, 0), 0);
5283 reg = sfunc_uses_reg (insn);
5291 /* Try scanning backward to find where the register is set. */
5293 for (scan = PREV_INSN (insn);
5294 scan && !LABEL_P (scan);
5295 scan = PREV_INSN (scan))
5297 if (! INSN_P (scan))
5300 if (! reg_mentioned_p (reg, scan))
5303 if (noncall_uses_reg (reg, scan, &set))
5316 /* The register is set at LINK. */
5318 /* We can only optimize the function call if the register is
5319 being set to a symbol. In theory, we could sometimes
5320 optimize calls to a constant location, but the assembler
5321 and linker do not support that at present. */
5322 if (GET_CODE (SET_SRC (set)) != SYMBOL_REF
5323 && GET_CODE (SET_SRC (set)) != LABEL_REF)
5326 /* Scan forward from LINK to the place where REG dies, and
5327 make sure that the only insns which use REG are
5328 themselves function calls. */
5330 /* ??? This doesn't work for call targets that were allocated
5331 by reload, since there may not be a REG_DEAD note for the
5335 for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan))
5339 /* Don't try to trace forward past a CODE_LABEL if we haven't
5340 seen INSN yet. Ordinarily, we will only find the setting insn
5341 if it is in the same basic block. However,
5342 cross-jumping can insert code labels in between the load and
5343 the call, and can result in situations where a single call
5344 insn may have two targets depending on where we came from. */
5346 if (LABEL_P (scan) && ! foundinsn)
5349 if (! INSN_P (scan))
5352 /* Don't try to trace forward past a JUMP. To optimize
5353 safely, we would have to check that all the
5354 instructions at the jump destination did not use REG. */
5359 if (! reg_mentioned_p (reg, scan))
5362 if (noncall_uses_reg (reg, scan, &scanset))
5369 && (CALL_P (scan) || sfunc_uses_reg (scan)))
5371 /* There is a function call to this register other
5372 than the one we are checking. If we optimize
5373 this call, we need to rescan again below. */
5377 /* ??? We shouldn't have to worry about SCANSET here.
5378 We should just be able to check for a REG_DEAD note
5379 on a function call. However, the REG_DEAD notes are
5380 apparently not dependable around libcalls; c-torture
5381 execute/920501-2 is a test case. If SCANSET is set,
5382 then this insn sets the register, so it must have
5383 died earlier. Unfortunately, this will only handle
5384 the cases in which the register is, in fact, set in a
5387 /* ??? We shouldn't have to use FOUNDINSN here.
5388 This dates back to when we used LOG_LINKS to find
5389 the most recent insn which sets the register. */
5393 || find_reg_note (scan, REG_DEAD, reg)))
5402 /* Either there was a branch, or some insn used REG
5403 other than as a function call address. */
5407 /* Create a code label, and put it in a REG_LABEL_OPERAND note
5408 on the insn which sets the register, and on each call insn
5409 which uses the register. In final_prescan_insn we look for
5410 the REG_LABEL_OPERAND notes, and output the appropriate label
5413 label = gen_label_rtx ();
5414 add_reg_note (link, REG_LABEL_OPERAND, label);
5415 add_reg_note (insn, REG_LABEL_OPERAND, label);
5423 scan = NEXT_INSN (scan);
5426 && reg_mentioned_p (reg, scan))
5427 || ((reg2 = sfunc_uses_reg (scan))
5428 && REGNO (reg2) == REGNO (reg))))
5429 add_reg_note (scan, REG_LABEL_OPERAND, label);
5431 while (scan != dies);
5437 fixup_addr_diff_vecs (first);
5441 mdep_reorg_phase = SH_SHORTEN_BRANCHES0;
5442 shorten_branches (first);
5445 /* Scan the function looking for move instructions which have to be
5446 changed to pc-relative loads and insert the literal tables. */
5447 label_ref_list_pool = create_alloc_pool ("label references list",
5448 sizeof (struct label_ref_list_d),
5450 mdep_reorg_phase = SH_FIXUP_PCLOAD;
5451 for (insn = first, num_mova = 0; insn; insn = NEXT_INSN (insn))
5455 /* ??? basic block reordering can move a switch table dispatch
5456 below the switch table. Check if that has happened.
5457 We only have the addresses available when optimizing; but then,
5458 this check shouldn't be needed when not optimizing. */
5459 if (!untangle_mova (&num_mova, &mova, insn))
5465 else if (JUMP_P (insn)
5466 && GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
5468 /* ??? loop invariant motion can also move a mova out of a
5469 loop. Since loop does this code motion anyway, maybe we
5470 should wrap UNSPEC_MOVA into a CONST, so that reload can
5473 && GET_MODE (prev_nonnote_insn (insn)) == VOIDmode)
5474 || (prev_nonnote_insn (insn)
5475 == XEXP (MOVA_LABELREF (mova), 0))))
5482 /* Some code might have been inserted between the mova and
5483 its ADDR_DIFF_VEC. Check if the mova is still in range. */
5484 for (scan = mova, total = 0; scan != insn; scan = NEXT_INSN (scan))
5485 total += get_attr_length (scan);
5487 /* range of mova is 1020, add 4 because pc counts from address of
5488 second instruction after this one, subtract 2 in case pc is 2
5489 byte aligned. Possible alignment needed for the ADDR_DIFF_VEC
5490 cancels out with alignment effects of the mova itself. */
5493 /* Change the mova into a load, and restart scanning
5494 there. broken_move will then return true for mova. */
5499 if (broken_move (insn)
5500 || (NONJUMP_INSN_P (insn)
5501 && recog_memoized (insn) == CODE_FOR_casesi_worker_2))
5504 /* Scan ahead looking for a barrier to stick the constant table
5506 rtx barrier = find_barrier (num_mova, mova, insn);
5507 rtx last_float_move = NULL_RTX, last_float = 0, *last_float_addr = NULL;
5508 int need_aligned_label = 0;
5510 if (num_mova && ! mova_p (mova))
5512 /* find_barrier had to change the first mova into a
5513 pcload; thus, we have to start with this new pcload. */
5517 /* Now find all the moves between the points and modify them. */
5518 for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
5522 if (NONJUMP_INSN_P (scan)
5523 && recog_memoized (scan) == CODE_FOR_casesi_worker_2)
5524 need_aligned_label = 1;
5525 if (broken_move (scan))
5527 rtx *patp = &PATTERN (scan), pat = *patp;
5531 enum machine_mode mode;
5533 if (GET_CODE (pat) == PARALLEL)
5534 patp = &XVECEXP (pat, 0, 0), pat = *patp;
5535 src = SET_SRC (pat);
5536 dst = SET_DEST (pat);
5537 mode = GET_MODE (dst);
5539 if (mode == SImode && hi_const (src)
5540 && REGNO (dst) != FPUL_REG)
5545 while (GET_CODE (dst) == SUBREG)
5547 offset += subreg_regno_offset (REGNO (SUBREG_REG (dst)),
5548 GET_MODE (SUBREG_REG (dst)),
5551 dst = SUBREG_REG (dst);
5553 dst = gen_rtx_REG (HImode, REGNO (dst) + offset);
5555 if (REG_P (dst) && FP_ANY_REGISTER_P (REGNO (dst)))
5557 /* This must be an insn that clobbers r0. */
5558 rtx *clobberp = &XVECEXP (PATTERN (scan), 0,
5559 XVECLEN (PATTERN (scan), 0)
5561 rtx clobber = *clobberp;
5563 gcc_assert (GET_CODE (clobber) == CLOBBER
5564 && rtx_equal_p (XEXP (clobber, 0), r0_rtx));
5567 && reg_set_between_p (r0_rtx, last_float_move, scan))
5571 && GET_MODE_SIZE (mode) != 4
5572 && GET_MODE_SIZE (GET_MODE (last_float)) == 4)
5574 lab = add_constant (src, mode, last_float);
5576 emit_insn_before (gen_mova (lab), scan);
5579 /* There will be a REG_UNUSED note for r0 on
5580 LAST_FLOAT_MOVE; we have to change it to REG_INC,
5581 lest reorg:mark_target_live_regs will not
5582 consider r0 to be used, and we end up with delay
5583 slot insn in front of SCAN that clobbers r0. */
5585 = find_regno_note (last_float_move, REG_UNUSED, 0);
5587 /* If we are not optimizing, then there may not be
5590 PUT_REG_NOTE_KIND (note, REG_INC);
5592 *last_float_addr = r0_inc_rtx;
5594 last_float_move = scan;
5596 newsrc = gen_const_mem (mode,
5597 (((TARGET_SH4 && ! TARGET_FMOVD)
5598 || REGNO (dst) == FPUL_REG)
5601 last_float_addr = &XEXP (newsrc, 0);
5603 /* Remove the clobber of r0. */
5604 *clobberp = gen_rtx_CLOBBER (GET_MODE (clobber),
5605 gen_rtx_SCRATCH (Pmode));
5607 /* This is a mova needing a label. Create it. */
5608 else if (GET_CODE (src) == UNSPEC
5609 && XINT (src, 1) == UNSPEC_MOVA
5610 && GET_CODE (XVECEXP (src, 0, 0)) == CONST)
5612 lab = add_constant (XVECEXP (src, 0, 0), mode, 0);
5613 newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
5614 newsrc = gen_rtx_UNSPEC (SImode,
5615 gen_rtvec (1, newsrc),
5620 lab = add_constant (src, mode, 0);
5621 newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
5622 newsrc = gen_const_mem (mode, newsrc);
5624 *patp = gen_rtx_SET (VOIDmode, dst, newsrc);
5625 INSN_CODE (scan) = -1;
5628 dump_table (need_aligned_label ? insn : 0, barrier);
5632 free_alloc_pool (label_ref_list_pool);
5633 for (insn = first; insn; insn = NEXT_INSN (insn))
5634 PUT_MODE (insn, VOIDmode);
5636 mdep_reorg_phase = SH_SHORTEN_BRANCHES1;
5637 INSN_ADDRESSES_FREE ();
5638 split_branches (first);
5640 /* The INSN_REFERENCES_ARE_DELAYED in sh.h is problematic because it
5641 also has an effect on the register that holds the address of the sfunc.
5642 Insert an extra dummy insn in front of each sfunc that pretends to
5643 use this register. */
5644 if (flag_delayed_branch)
5646 for (insn = first; insn; insn = NEXT_INSN (insn))
5648 rtx reg = sfunc_uses_reg (insn);
5652 emit_insn_before (gen_use_sfunc_addr (reg), insn);
5656 /* fpscr is not actually a user variable, but we pretend it is for the
5657 sake of the previous optimization passes, since we want it handled like
5658 one. However, we don't have any debugging information for it, so turn
5659 it into a non-user variable now. */
5661 REG_USERVAR_P (get_fpscr_rtx ()) = 0;
5663 mdep_reorg_phase = SH_AFTER_MDEP_REORG;
5667 get_dest_uid (rtx label, int max_uid)
5669 rtx dest = next_real_insn (label);
5672 /* This can happen for an undefined label. */
5674 dest_uid = INSN_UID (dest);
5675 /* If this is a newly created branch redirection blocking instruction,
5676 we cannot index the branch_uid or insn_addresses arrays with its
5677 uid. But then, we won't need to, because the actual destination is
5678 the following branch. */
5679 while (dest_uid >= max_uid)
5681 dest = NEXT_INSN (dest);
5682 dest_uid = INSN_UID (dest);
5684 if (JUMP_P (dest) && GET_CODE (PATTERN (dest)) == RETURN)
5689 /* Split condbranches that are out of range. Also add clobbers for
5690 scratch registers that are needed in far jumps.
5691 We do this before delay slot scheduling, so that it can take our
5692 newly created instructions into account. It also allows us to
5693 find branches with common targets more easily. */
5696 split_branches (rtx first)
5699 struct far_branch **uid_branch, *far_branch_list = 0;
5700 int max_uid = get_max_uid ();
5703 /* Find out which branches are out of range. */
5704 shorten_branches (first);
5706 uid_branch = (struct far_branch **) alloca (max_uid * sizeof *uid_branch);
5707 memset ((char *) uid_branch, 0, max_uid * sizeof *uid_branch);
5709 for (insn = first; insn; insn = NEXT_INSN (insn))
5710 if (! INSN_P (insn))
5712 else if (INSN_DELETED_P (insn))
5714 /* Shorten_branches would split this instruction again,
5715 so transform it into a note. */
5716 SET_INSN_DELETED (insn);
5718 else if (JUMP_P (insn)
5719 /* Don't mess with ADDR_DIFF_VEC */
5720 && (GET_CODE (PATTERN (insn)) == SET
5721 || GET_CODE (PATTERN (insn)) == RETURN))
5723 enum attr_type type = get_attr_type (insn);
5724 if (type == TYPE_CBRANCH)
5728 if (get_attr_length (insn) > 4)
5730 rtx src = SET_SRC (PATTERN (insn));
5731 rtx olabel = XEXP (XEXP (src, 1), 0);
5732 int addr = INSN_ADDRESSES (INSN_UID (insn));
5734 int dest_uid = get_dest_uid (olabel, max_uid);
5735 struct far_branch *bp = uid_branch[dest_uid];
5737 /* redirect_jump needs a valid JUMP_LABEL, and it might delete
5738 the label if the LABEL_NUSES count drops to zero. There is
5739 always a jump_optimize pass that sets these values, but it
5740 proceeds to delete unreferenced code, and then if not
5741 optimizing, to un-delete the deleted instructions, thus
5742 leaving labels with too low uses counts. */
5745 JUMP_LABEL (insn) = olabel;
5746 LABEL_NUSES (olabel)++;
5750 bp = (struct far_branch *) alloca (sizeof *bp);
5751 uid_branch[dest_uid] = bp;
5752 bp->prev = far_branch_list;
5753 far_branch_list = bp;
5755 = XEXP (XEXP (SET_SRC (PATTERN (insn)), 1), 0);
5756 LABEL_NUSES (bp->far_label)++;
5760 label = bp->near_label;
5761 if (! label && bp->address - addr >= CONDJUMP_MIN)
5763 rtx block = bp->insert_place;
5765 if (GET_CODE (PATTERN (block)) == RETURN)
5766 block = PREV_INSN (block);
5768 block = gen_block_redirect (block,
5770 label = emit_label_after (gen_label_rtx (),
5772 bp->near_label = label;
5774 else if (label && ! NEXT_INSN (label))
5776 if (addr + 2 - bp->address <= CONDJUMP_MAX)
5777 bp->insert_place = insn;
5779 gen_far_branch (bp);
5783 || (NEXT_INSN (label) && bp->address - addr < CONDJUMP_MIN))
5785 bp->near_label = label = gen_label_rtx ();
5786 bp->insert_place = insn;
5789 ok = redirect_jump (insn, label, 0);
5794 /* get_attr_length (insn) == 2 */
5795 /* Check if we have a pattern where reorg wants to redirect
5796 the branch to a label from an unconditional branch that
5798 /* We can't use JUMP_LABEL here because it might be undefined
5799 when not optimizing. */
5800 /* A syntax error might cause beyond to be NULL_RTX. */
5802 = next_active_insn (XEXP (XEXP (SET_SRC (PATTERN (insn)), 1),
5807 || ((beyond = next_active_insn (beyond))
5808 && JUMP_P (beyond)))
5809 && GET_CODE (PATTERN (beyond)) == SET
5810 && recog_memoized (beyond) == CODE_FOR_jump_compact
5812 (INSN_UID (XEXP (SET_SRC (PATTERN (beyond)), 0)))
5813 - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
5815 gen_block_redirect (beyond,
5816 INSN_ADDRESSES (INSN_UID (beyond)), 1);
5819 next = next_active_insn (insn);
5822 || ((next = next_active_insn (next))
5824 && GET_CODE (PATTERN (next)) == SET
5825 && recog_memoized (next) == CODE_FOR_jump_compact
5827 (INSN_UID (XEXP (SET_SRC (PATTERN (next)), 0)))
5828 - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
5830 gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), 1);
5832 else if (type == TYPE_JUMP || type == TYPE_RETURN)
5834 int addr = INSN_ADDRESSES (INSN_UID (insn));
5837 struct far_branch *bp;
5839 if (type == TYPE_JUMP)
5841 far_label = XEXP (SET_SRC (PATTERN (insn)), 0);
5842 dest_uid = get_dest_uid (far_label, max_uid);
5845 /* Parse errors can lead to labels outside
5847 if (! NEXT_INSN (far_label))
5852 JUMP_LABEL (insn) = far_label;
5853 LABEL_NUSES (far_label)++;
5855 redirect_jump (insn, NULL_RTX, 1);
5859 bp = uid_branch[dest_uid];
5862 bp = (struct far_branch *) alloca (sizeof *bp);
5863 uid_branch[dest_uid] = bp;
5864 bp->prev = far_branch_list;
5865 far_branch_list = bp;
5867 bp->far_label = far_label;
5869 LABEL_NUSES (far_label)++;
5871 else if (bp->near_label && ! NEXT_INSN (bp->near_label))
5872 if (addr - bp->address <= CONDJUMP_MAX)
5873 emit_label_after (bp->near_label, PREV_INSN (insn));
5876 gen_far_branch (bp);
5882 bp->insert_place = insn;
5884 emit_insn_before (gen_block_branch_redirect (const0_rtx), insn);
5886 gen_block_redirect (insn, addr, bp->near_label ? 2 : 0);
5889 /* Generate all pending far branches,
5890 and free our references to the far labels. */
5891 while (far_branch_list)
5893 if (far_branch_list->near_label
5894 && ! NEXT_INSN (far_branch_list->near_label))
5895 gen_far_branch (far_branch_list);
5897 && far_branch_list->far_label
5898 && ! --LABEL_NUSES (far_branch_list->far_label))
5899 delete_insn (far_branch_list->far_label);
5900 far_branch_list = far_branch_list->prev;
5903 /* Instruction length information is no longer valid due to the new
5904 instructions that have been generated. */
5905 init_insn_lengths ();
5908 /* Dump out instruction addresses, which is useful for debugging the
5909 constant pool table stuff.
5911 If relaxing, output the label and pseudo-ops used to link together
5912 calls and the instruction which set the registers. */
5914 /* ??? The addresses printed by this routine for insns are nonsense for
5915 insns which are inside of a sequence where none of the inner insns have
5916 variable length. This is because the second pass of shorten_branches
5917 does not bother to update them. */
5920 final_prescan_insn (rtx insn, rtx *opvec ATTRIBUTE_UNUSED,
5921 int noperands ATTRIBUTE_UNUSED)
5923 if (TARGET_DUMPISIZE)
5924 fprintf (asm_out_file, "\n! at %04x\n", INSN_ADDRESSES (INSN_UID (insn)));
5930 note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX);
5935 pattern = PATTERN (insn);
5936 if (GET_CODE (pattern) == PARALLEL)
5937 pattern = XVECEXP (pattern, 0, 0);
5938 switch (GET_CODE (pattern))
5941 if (GET_CODE (SET_SRC (pattern)) != CALL
5942 && get_attr_type (insn) != TYPE_SFUNC)
5944 targetm.asm_out.internal_label
5945 (asm_out_file, "L", CODE_LABEL_NUMBER (XEXP (note, 0)));
5948 /* else FALLTHROUGH */
5950 asm_fprintf (asm_out_file, "\t.uses %LL%d\n",
5951 CODE_LABEL_NUMBER (XEXP (note, 0)));
5961 /* Dump out any constants accumulated in the final pass. These will
5965 output_jump_label_table (void)
5971 fprintf (asm_out_file, "\t.align 2\n");
5972 for (i = 0; i < pool_size; i++)
5974 pool_node *p = &pool_vector[i];
5976 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5977 CODE_LABEL_NUMBER (p->label));
5978 output_asm_insn (".long %O0", &p->value);
5986 /* A full frame looks like:
5990 [ if current_function_anonymous_args
6003 local-0 <- fp points here. */
6005 /* Number of bytes pushed for anonymous args, used to pass information
6006 between expand_prologue and expand_epilogue. */
6008 /* Adjust the stack by SIZE bytes. REG holds the rtl of the register to be
6009 adjusted. If epilogue_p is zero, this is for a prologue; otherwise, it's
6010 for an epilogue and a negative value means that it's for a sibcall
6011 epilogue. If LIVE_REGS_MASK is nonzero, it points to a HARD_REG_SET of
6012 all the registers that are about to be restored, and hence dead. */
6015 output_stack_adjust (int size, rtx reg, int epilogue_p,
6016 HARD_REG_SET *live_regs_mask)
6018 rtx (*emit_fn) (rtx) = epilogue_p ? &emit_insn : &frame_insn;
6021 HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
6023 /* This test is bogus, as output_stack_adjust is used to re-align the
6026 gcc_assert (!(size % align));
6029 if (CONST_OK_FOR_ADD (size))
6030 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size)));
6031 /* Try to do it with two partial adjustments; however, we must make
6032 sure that the stack is properly aligned at all times, in case
6033 an interrupt occurs between the two partial adjustments. */
6034 else if (CONST_OK_FOR_ADD (size / 2 & -align)
6035 && CONST_OK_FOR_ADD (size - (size / 2 & -align)))
6037 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size / 2 & -align)));
6038 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size - (size / 2 & -align))));
6044 int temp = epilogue_p ? 7 : (TARGET_SH5 ? 0 : 1);
6047 /* If TEMP is invalid, we could temporarily save a general
6048 register to MACL. However, there is currently no need
6049 to handle this case, so just die when we see it. */
6051 || current_function_interrupt
6052 || ! call_really_used_regs[temp] || fixed_regs[temp])
6054 if (temp < 0 && ! current_function_interrupt
6055 && (TARGET_SHMEDIA || epilogue_p >= 0))
6058 COPY_HARD_REG_SET (temps, call_used_reg_set);
6059 AND_COMPL_HARD_REG_SET (temps, call_fixed_reg_set);
6063 if (crtl->return_rtx)
6065 enum machine_mode mode;
6066 mode = GET_MODE (crtl->return_rtx);
6067 if (BASE_RETURN_VALUE_REG (mode) == FIRST_RET_REG)
6068 nreg = HARD_REGNO_NREGS (FIRST_RET_REG, mode);
6070 for (i = 0; i < nreg; i++)
6071 CLEAR_HARD_REG_BIT (temps, FIRST_RET_REG + i);
6072 if (crtl->calls_eh_return)
6074 CLEAR_HARD_REG_BIT (temps, EH_RETURN_STACKADJ_REGNO);
6075 for (i = 0; i <= 3; i++)
6076 CLEAR_HARD_REG_BIT (temps, EH_RETURN_DATA_REGNO (i));
6079 if (TARGET_SHMEDIA && epilogue_p < 0)
6080 for (i = FIRST_TARGET_REG; i <= LAST_TARGET_REG; i++)
6081 CLEAR_HARD_REG_BIT (temps, i);
6082 if (epilogue_p <= 0)
6084 for (i = FIRST_PARM_REG;
6085 i < FIRST_PARM_REG + NPARM_REGS (SImode); i++)
6086 CLEAR_HARD_REG_BIT (temps, i);
6087 if (cfun->static_chain_decl != NULL)
6088 CLEAR_HARD_REG_BIT (temps, STATIC_CHAIN_REGNUM);
6090 temp = scavenge_reg (&temps);
6092 if (temp < 0 && live_regs_mask)
6096 COPY_HARD_REG_SET (temps, *live_regs_mask);
6097 CLEAR_HARD_REG_BIT (temps, REGNO (reg));
6098 temp = scavenge_reg (&temps);
6102 rtx adj_reg, tmp_reg, mem;
6104 /* If we reached here, the most likely case is the (sibcall)
6105 epilogue for non SHmedia. Put a special push/pop sequence
6106 for such case as the last resort. This looks lengthy but
6107 would not be problem because it seems to be very
6110 gcc_assert (!TARGET_SHMEDIA && epilogue_p);
6113 /* ??? There is still the slight possibility that r4 or
6114 r5 have been reserved as fixed registers or assigned
6115 as global registers, and they change during an
6116 interrupt. There are possible ways to handle this:
6118 - If we are adjusting the frame pointer (r14), we can do
6119 with a single temp register and an ordinary push / pop
6121 - Grab any call-used or call-saved registers (i.e. not
6122 fixed or globals) for the temps we need. We might
6123 also grab r14 if we are adjusting the stack pointer.
6124 If we can't find enough available registers, issue
6125 a diagnostic and die - the user must have reserved
6126 way too many registers.
6127 But since all this is rather unlikely to happen and
6128 would require extra testing, we just die if r4 / r5
6129 are not available. */
6130 gcc_assert (!fixed_regs[4] && !fixed_regs[5]
6131 && !global_regs[4] && !global_regs[5]);
6133 adj_reg = gen_rtx_REG (GET_MODE (reg), 4);
6134 tmp_reg = gen_rtx_REG (GET_MODE (reg), 5);
6135 emit_move_insn (gen_tmp_stack_mem (Pmode, reg), adj_reg);
6136 emit_insn (GEN_MOV (adj_reg, GEN_INT (size)));
6137 emit_insn (GEN_ADD3 (adj_reg, adj_reg, reg));
6138 mem = gen_tmp_stack_mem (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
6139 emit_move_insn (mem, tmp_reg);
6140 emit_move_insn (tmp_reg, gen_tmp_stack_mem (Pmode, reg));
6141 mem = gen_tmp_stack_mem (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
6142 emit_move_insn (mem, tmp_reg);
6143 emit_move_insn (reg, adj_reg);
6144 mem = gen_tmp_stack_mem (Pmode, gen_rtx_POST_INC (Pmode, reg));
6145 emit_move_insn (adj_reg, mem);
6146 mem = gen_tmp_stack_mem (Pmode, gen_rtx_POST_INC (Pmode, reg));
6147 emit_move_insn (tmp_reg, mem);
6148 /* Tell flow the insns that pop r4/r5 aren't dead. */
6153 const_reg = gen_rtx_REG (GET_MODE (reg), temp);
6155 /* If SIZE is negative, subtract the positive value.
6156 This sometimes allows a constant pool entry to be shared
6157 between prologue and epilogue code. */
6160 emit_insn (GEN_MOV (const_reg, GEN_INT (-size)));
6161 insn = emit_fn (GEN_SUB3 (reg, reg, const_reg));
6165 emit_insn (GEN_MOV (const_reg, GEN_INT (size)));
6166 insn = emit_fn (GEN_ADD3 (reg, reg, const_reg));
6169 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
6170 gen_rtx_SET (VOIDmode, reg,
6171 gen_rtx_PLUS (SImode, reg,
6181 RTX_FRAME_RELATED_P (x) = 1;
6185 /* Output RTL to push register RN onto the stack. */
6192 x = gen_push_fpul ();
6193 else if (rn == FPSCR_REG)
6194 x = gen_push_fpscr ();
6195 else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && ! TARGET_FPU_SINGLE
6196 && FP_OR_XD_REGISTER_P (rn))
6198 if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
6200 x = gen_push_4 (gen_rtx_REG (DFmode, rn));
6202 else if (TARGET_SH2E && FP_REGISTER_P (rn))
6203 x = gen_push_e (gen_rtx_REG (SFmode, rn));
6205 x = gen_push (gen_rtx_REG (SImode, rn));
6208 add_reg_note (x, REG_INC, gen_rtx_REG (SImode, STACK_POINTER_REGNUM));
6212 /* Output RTL to pop register RN from the stack. */
6219 x = gen_pop_fpul ();
6220 else if (rn == FPSCR_REG)
6221 x = gen_pop_fpscr ();
6222 else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && ! TARGET_FPU_SINGLE
6223 && FP_OR_XD_REGISTER_P (rn))
6225 if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
6227 x = gen_pop_4 (gen_rtx_REG (DFmode, rn));
6229 else if (TARGET_SH2E && FP_REGISTER_P (rn))
6230 x = gen_pop_e (gen_rtx_REG (SFmode, rn));
6232 x = gen_pop (gen_rtx_REG (SImode, rn));
6235 add_reg_note (x, REG_INC, gen_rtx_REG (SImode, STACK_POINTER_REGNUM));
6238 /* Generate code to push the regs specified in the mask. */
6241 push_regs (HARD_REG_SET *mask, int interrupt_handler)
6243 int i = interrupt_handler ? LAST_BANKED_REG + 1 : 0;
6246 /* Push PR last; this gives better latencies after the prologue, and
6247 candidates for the return delay slot when there are no general
6248 registers pushed. */
6249 for (; i < FIRST_PSEUDO_REGISTER; i++)
6251 /* If this is an interrupt handler, and the SZ bit varies,
6252 and we have to push any floating point register, we need
6253 to switch to the correct precision first. */
6254 if (i == FIRST_FP_REG && interrupt_handler && TARGET_FMOVD
6255 && hard_reg_set_intersect_p (*mask, reg_class_contents[DF_REGS]))
6257 HARD_REG_SET unsaved;
6260 COMPL_HARD_REG_SET (unsaved, *mask);
6261 fpscr_set_from_mem (NORMAL_MODE (FP_MODE), unsaved);
6265 && (i != FPSCR_REG || ! skip_fpscr)
6266 && TEST_HARD_REG_BIT (*mask, i))
6268 /* If the ISR has RESBANK attribute assigned, don't push any of
6269 the following registers - R0-R14, MACH, MACL and GBR. */
6270 if (! (sh_cfun_resbank_handler_p ()
6271 && ((i >= FIRST_GENERAL_REG && i < LAST_GENERAL_REG)
6279 /* Push banked registers last to improve delay slot opportunities. */
6280 if (interrupt_handler)
6281 for (i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
6282 if (TEST_HARD_REG_BIT (*mask, i))
6285 /* Don't push PR register for an ISR with RESBANK attribute assigned. */
6286 if (TEST_HARD_REG_BIT (*mask, PR_REG) && !sh_cfun_resbank_handler_p ())
6290 /* Calculate how much extra space is needed to save all callee-saved
6292 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
6295 shmedia_target_regs_stack_space (HARD_REG_SET *live_regs_mask)
6298 int stack_space = 0;
6299 int interrupt_handler = sh_cfun_interrupt_handler_p ();
6301 for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
6302 if ((! call_really_used_regs[reg] || interrupt_handler)
6303 && ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
6304 /* Leave space to save this target register on the stack,
6305 in case target register allocation wants to use it. */
6306 stack_space += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
6310 /* Decide whether we should reserve space for callee-save target registers,
6311 in case target register allocation wants to use them. REGS_SAVED is
6312 the space, in bytes, that is already required for register saves.
6313 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
6316 shmedia_reserve_space_for_target_registers_p (int regs_saved,
6317 HARD_REG_SET *live_regs_mask)
6321 return shmedia_target_regs_stack_space (live_regs_mask) <= regs_saved;
6324 /* Decide how much space to reserve for callee-save target registers
6325 in case target register allocation wants to use them.
6326 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
6329 shmedia_target_regs_stack_adjust (HARD_REG_SET *live_regs_mask)
6331 if (shmedia_space_reserved_for_target_registers)
6332 return shmedia_target_regs_stack_space (live_regs_mask);
6337 /* Work out the registers which need to be saved, both as a mask and a
6338 count of saved words. Return the count.
6340 If doing a pragma interrupt function, then push all regs used by the
6341 function, and if we call another function (we can tell by looking at PR),
6342 make sure that all the regs it clobbers are safe too. */
6345 calc_live_regs (HARD_REG_SET *live_regs_mask)
6350 bool interrupt_or_trapa_handler, trapa_handler, interrupt_handler;
6351 bool nosave_low_regs;
6352 int pr_live, has_call;
6354 attrs = DECL_ATTRIBUTES (current_function_decl);
6355 interrupt_or_trapa_handler = sh_cfun_interrupt_handler_p ();
6356 trapa_handler = lookup_attribute ("trapa_handler", attrs) != NULL_TREE;
6357 interrupt_handler = interrupt_or_trapa_handler && ! trapa_handler;
6358 nosave_low_regs = lookup_attribute ("nosave_low_regs", attrs) != NULL_TREE;
6360 CLEAR_HARD_REG_SET (*live_regs_mask);
6361 if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && interrupt_handler
6362 && df_regs_ever_live_p (FPSCR_REG))
6363 target_flags &= ~MASK_FPU_SINGLE;
6364 /* If we can save a lot of saves by switching to double mode, do that. */
6365 else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && TARGET_FPU_SINGLE)
6366 for (count = 0, reg = FIRST_FP_REG; reg <= LAST_FP_REG; reg += 2)
6367 if (df_regs_ever_live_p (reg) && df_regs_ever_live_p (reg+1)
6368 && (! call_really_used_regs[reg]
6369 || interrupt_handler)
6372 target_flags &= ~MASK_FPU_SINGLE;
6375 /* PR_MEDIA_REG is a general purpose register, thus global_alloc already
6376 knows how to use it. That means the pseudo originally allocated for
6377 the initial value can become the PR_MEDIA_REG hard register, as seen for
6378 execute/20010122-1.c:test9. */
6380 /* ??? this function is called from initial_elimination_offset, hence we
6381 can't use the result of sh_media_register_for_return here. */
6382 pr_live = sh_pr_n_sets ();
6385 rtx pr_initial = has_hard_reg_initial_val (Pmode, PR_REG);
6386 pr_live = (pr_initial
6387 ? (!REG_P (pr_initial)
6388 || REGNO (pr_initial) != (PR_REG))
6389 : df_regs_ever_live_p (PR_REG));
6390 /* For Shcompact, if not optimizing, we end up with a memory reference
6391 using the return address pointer for __builtin_return_address even
6392 though there is no actual need to put the PR register on the stack. */
6393 pr_live |= df_regs_ever_live_p (RETURN_ADDRESS_POINTER_REGNUM);
6395 /* Force PR to be live if the prologue has to call the SHmedia
6396 argument decoder or register saver. */
6397 if (TARGET_SHCOMPACT
6398 && ((crtl->args.info.call_cookie
6399 & ~ CALL_COOKIE_RET_TRAMP (1))
6400 || crtl->saves_all_registers))
6402 has_call = TARGET_SHMEDIA ? ! leaf_function_p () : pr_live;
6403 for (count = 0, reg = FIRST_PSEUDO_REGISTER; reg-- != 0; )
6405 if (reg == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG)
6408 ? (/* Need to save all the regs ever live. */
6409 (df_regs_ever_live_p (reg)
6410 || (call_really_used_regs[reg]
6411 && (! fixed_regs[reg] || reg == MACH_REG || reg == MACL_REG
6412 || reg == PIC_OFFSET_TABLE_REGNUM)
6414 || (TARGET_SHMEDIA && has_call
6415 && REGISTER_NATURAL_MODE (reg) == SImode
6416 && (GENERAL_REGISTER_P (reg) || TARGET_REGISTER_P (reg))))
6417 && reg != STACK_POINTER_REGNUM && reg != ARG_POINTER_REGNUM
6418 && reg != RETURN_ADDRESS_POINTER_REGNUM
6419 && reg != T_REG && reg != GBR_REG
6420 /* Push fpscr only on targets which have FPU */
6421 && (reg != FPSCR_REG || TARGET_FPU_ANY))
6422 : (/* Only push those regs which are used and need to be saved. */
6425 && crtl->args.info.call_cookie
6426 && reg == PIC_OFFSET_TABLE_REGNUM)
6427 || (df_regs_ever_live_p (reg)
6428 && ((!call_really_used_regs[reg]
6429 && !(reg != PIC_OFFSET_TABLE_REGNUM
6430 && fixed_regs[reg] && call_used_regs[reg]))
6431 || (trapa_handler && reg == FPSCR_REG && TARGET_FPU_ANY)))
6432 || (crtl->calls_eh_return
6433 && (reg == EH_RETURN_DATA_REGNO (0)
6434 || reg == EH_RETURN_DATA_REGNO (1)
6435 || reg == EH_RETURN_DATA_REGNO (2)
6436 || reg == EH_RETURN_DATA_REGNO (3)))
6437 || ((reg == MACL_REG || reg == MACH_REG)
6438 && df_regs_ever_live_p (reg)
6439 && sh_cfun_attr_renesas_p ())
6442 SET_HARD_REG_BIT (*live_regs_mask, reg);
6443 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
6445 if ((TARGET_SH4 || TARGET_SH2A_DOUBLE || TARGET_SH5) && TARGET_FMOVD
6446 && GET_MODE_CLASS (REGISTER_NATURAL_MODE (reg)) == MODE_FLOAT)
6448 if (FP_REGISTER_P (reg))
6450 if (! TARGET_FPU_SINGLE && ! df_regs_ever_live_p (reg ^ 1))
6452 SET_HARD_REG_BIT (*live_regs_mask, (reg ^ 1));
6453 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg ^ 1));
6456 else if (XD_REGISTER_P (reg))
6458 /* Must switch to double mode to access these registers. */
6459 target_flags &= ~MASK_FPU_SINGLE;
6463 if (nosave_low_regs && reg == R8_REG)
6466 /* If we have a target register optimization pass after prologue / epilogue
6467 threading, we need to assume all target registers will be live even if
6469 if (flag_branch_target_load_optimize2
6470 && TARGET_SAVE_ALL_TARGET_REGS
6471 && shmedia_space_reserved_for_target_registers)
6472 for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
6473 if ((! call_really_used_regs[reg] || interrupt_handler)
6474 && ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
6476 SET_HARD_REG_BIT (*live_regs_mask, reg);
6477 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
6479 /* If this is an interrupt handler, we don't have any call-clobbered
6480 registers we can conveniently use for target register save/restore.
6481 Make sure we save at least one general purpose register when we need
6482 to save target registers. */
6483 if (interrupt_handler
6484 && hard_reg_set_intersect_p (*live_regs_mask,
6485 reg_class_contents[TARGET_REGS])
6486 && ! hard_reg_set_intersect_p (*live_regs_mask,
6487 reg_class_contents[GENERAL_REGS]))
6489 SET_HARD_REG_BIT (*live_regs_mask, R0_REG);
6490 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (R0_REG));
6496 /* Code to generate prologue and epilogue sequences */
6498 /* PUSHED is the number of bytes that are being pushed on the
6499 stack for register saves. Return the frame size, padded
6500 appropriately so that the stack stays properly aligned. */
6501 static HOST_WIDE_INT
6502 rounded_frame_size (int pushed)
6504 HOST_WIDE_INT size = get_frame_size ();
6505 HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
6507 return ((size + pushed + align - 1) & -align) - pushed;
6510 /* Choose a call-clobbered target-branch register that remains
6511 unchanged along the whole function. We set it up as the return
6512 value in the prologue. */
6514 sh_media_register_for_return (void)
6519 if (! current_function_is_leaf)
6521 if (lookup_attribute ("interrupt_handler",
6522 DECL_ATTRIBUTES (current_function_decl)))
6524 if (sh_cfun_interrupt_handler_p ())
6527 tr0_used = flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
6529 for (regno = FIRST_TARGET_REG + tr0_used; regno <= LAST_TARGET_REG; regno++)
6530 if (call_really_used_regs[regno] && ! df_regs_ever_live_p (regno))
6536 /* The maximum registers we need to save are:
6537 - 62 general purpose registers (r15 is stack pointer, r63 is zero)
6538 - 32 floating point registers (for each pair, we save none,
6539 one single precision value, or a double precision value).
6540 - 8 target registers
6541 - add 1 entry for a delimiter. */
6542 #define MAX_SAVED_REGS (62+32+8)
6544 typedef struct save_entry_s
6553 /* There will be a delimiter entry with VOIDmode both at the start and the
6554 end of a filled in schedule. The end delimiter has the offset of the
6555 save with the smallest (i.e. most negative) offset. */
6556 typedef struct save_schedule_s
6558 save_entry entries[MAX_SAVED_REGS + 2];
6559 int temps[MAX_TEMPS+1];
6562 /* Fill in SCHEDULE according to LIVE_REGS_MASK. If RESTORE is nonzero,
6563 use reverse order. Returns the last entry written to (not counting
6564 the delimiter). OFFSET_BASE is a number to be added to all offset
6568 sh5_schedule_saves (HARD_REG_SET *live_regs_mask, save_schedule *schedule,
6572 save_entry *entry = schedule->entries;
6576 if (! current_function_interrupt)
6577 for (i = FIRST_GENERAL_REG; tmpx < MAX_TEMPS && i <= LAST_GENERAL_REG; i++)
6578 if (call_really_used_regs[i] && ! fixed_regs[i] && i != PR_MEDIA_REG
6579 && ! FUNCTION_ARG_REGNO_P (i)
6580 && i != FIRST_RET_REG
6581 && ! (cfun->static_chain_decl != NULL && i == STATIC_CHAIN_REGNUM)
6582 && ! (crtl->calls_eh_return
6583 && (i == EH_RETURN_STACKADJ_REGNO
6584 || ((unsigned) i >= EH_RETURN_DATA_REGNO (0)
6585 && (unsigned) i <= EH_RETURN_DATA_REGNO (3)))))
6586 schedule->temps[tmpx++] = i;
6588 entry->mode = VOIDmode;
6589 entry->offset = offset_base;
6591 /* We loop twice: first, we save 8-byte aligned registers in the
6592 higher addresses, that are known to be aligned. Then, we
6593 proceed to saving 32-bit registers that don't need 8-byte
6595 If this is an interrupt function, all registers that need saving
6596 need to be saved in full. moreover, we need to postpone saving
6597 target registers till we have saved some general purpose registers
6598 we can then use as scratch registers. */
6599 offset = offset_base;
6600 for (align = 1; align >= 0; align--)
6602 for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
6603 if (TEST_HARD_REG_BIT (*live_regs_mask, i))
6605 enum machine_mode mode = REGISTER_NATURAL_MODE (i);
6608 if (current_function_interrupt)
6610 if (TARGET_REGISTER_P (i))
6612 if (GENERAL_REGISTER_P (i))
6615 if (mode == SFmode && (i % 2) == 1
6616 && ! TARGET_FPU_SINGLE && FP_REGISTER_P (i)
6617 && (TEST_HARD_REG_BIT (*live_regs_mask, (i ^ 1))))
6624 /* If we're doing the aligned pass and this is not aligned,
6625 or we're doing the unaligned pass and this is aligned,
6627 if ((GET_MODE_SIZE (mode) % (STACK_BOUNDARY / BITS_PER_UNIT) == 0)
6631 if (current_function_interrupt
6632 && GENERAL_REGISTER_P (i)
6633 && tmpx < MAX_TEMPS)
6634 schedule->temps[tmpx++] = i;
6636 offset -= GET_MODE_SIZE (mode);
6639 entry->offset = offset;
6642 if (align && current_function_interrupt)
6643 for (i = LAST_TARGET_REG; i >= FIRST_TARGET_REG; i--)
6644 if (TEST_HARD_REG_BIT (*live_regs_mask, i))
6646 offset -= GET_MODE_SIZE (DImode);
6648 entry->mode = DImode;
6649 entry->offset = offset;
6654 entry->mode = VOIDmode;
6655 entry->offset = offset;
6656 schedule->temps[tmpx] = -1;
6661 sh_expand_prologue (void)
6663 HARD_REG_SET live_regs_mask;
6666 int save_flags = target_flags;
6669 = lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl));
6671 current_function_interrupt = sh_cfun_interrupt_handler_p ();
6673 /* We have pretend args if we had an object sent partially in registers
6674 and partially on the stack, e.g. a large structure. */
6675 pretend_args = crtl->args.pretend_args_size;
6676 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl)
6677 && (NPARM_REGS(SImode)
6678 > crtl->args.info.arg_count[(int) SH_ARG_INT]))
6680 output_stack_adjust (-pretend_args
6681 - crtl->args.info.stack_regs * 8,
6682 stack_pointer_rtx, 0, NULL);
6684 if (TARGET_SHCOMPACT && flag_pic && crtl->args.info.call_cookie)
6685 /* We're going to use the PIC register to load the address of the
6686 incoming-argument decoder and/or of the return trampoline from
6687 the GOT, so make sure the PIC register is preserved and
6689 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
6691 if (TARGET_SHCOMPACT
6692 && (crtl->args.info.call_cookie & ~ CALL_COOKIE_RET_TRAMP(1)))
6696 /* First, make all registers with incoming arguments that will
6697 be pushed onto the stack live, so that register renaming
6698 doesn't overwrite them. */
6699 for (reg = 0; reg < NPARM_REGS (SImode); reg++)
6700 if (CALL_COOKIE_STACKSEQ_GET (crtl->args.info.call_cookie)
6701 >= NPARM_REGS (SImode) - reg)
6702 for (; reg < NPARM_REGS (SImode); reg++)
6703 emit_insn (gen_shcompact_preserve_incoming_args
6704 (gen_rtx_REG (SImode, FIRST_PARM_REG + reg)));
6705 else if (CALL_COOKIE_INT_REG_GET
6706 (crtl->args.info.call_cookie, reg) == 1)
6707 emit_insn (gen_shcompact_preserve_incoming_args
6708 (gen_rtx_REG (SImode, FIRST_PARM_REG + reg)));
6710 emit_move_insn (gen_rtx_REG (Pmode, MACL_REG),
6712 emit_move_insn (gen_rtx_REG (SImode, R0_REG),
6713 GEN_INT (crtl->args.info.call_cookie));
6714 emit_move_insn (gen_rtx_REG (SImode, MACH_REG),
6715 gen_rtx_REG (SImode, R0_REG));
6717 else if (TARGET_SHMEDIA)
6719 int tr = sh_media_register_for_return ();
6722 emit_move_insn (gen_rtx_REG (DImode, tr),
6723 gen_rtx_REG (DImode, PR_MEDIA_REG));
6726 /* Emit the code for SETUP_VARARGS. */
6729 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
6731 /* Push arg regs as if they'd been provided by caller in stack. */
6732 for (i = 0; i < NPARM_REGS(SImode); i++)
6734 int rn = NPARM_REGS(SImode) + FIRST_PARM_REG - i - 1;
6737 if (i >= (NPARM_REGS(SImode)
6738 - crtl->args.info.arg_count[(int) SH_ARG_INT]
6746 /* If we're supposed to switch stacks at function entry, do so now. */
6750 /* The argument specifies a variable holding the address of the
6751 stack the interrupt function should switch to/from at entry/exit. */
6752 tree arg = TREE_VALUE ( TREE_VALUE (sp_switch_attr));
6754 = ggc_strdup (TREE_STRING_POINTER (arg));
6755 rtx sp_switch = gen_rtx_SYMBOL_REF (Pmode, s);
6757 lab = add_constant (sp_switch, SImode, 0);
6758 newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
6759 newsrc = gen_const_mem (SImode, newsrc);
6761 emit_insn (gen_sp_switch_1 (newsrc));
6764 d = calc_live_regs (&live_regs_mask);
6765 /* ??? Maybe we could save some switching if we can move a mode switch
6766 that already happens to be at the function start into the prologue. */
6767 if (target_flags != save_flags && ! current_function_interrupt)
6768 emit_insn (gen_toggle_sz ());
6772 int offset_base, offset;
6774 int offset_in_r0 = -1;
6776 int tregs_space = shmedia_target_regs_stack_adjust (&live_regs_mask);
6777 int total_size, save_size;
6778 save_schedule schedule;
6782 if (call_really_used_regs[R0_REG] && ! fixed_regs[R0_REG]
6783 && ! current_function_interrupt)
6784 r0 = gen_rtx_REG (Pmode, R0_REG);
6786 /* D is the actual number of bytes that we need for saving registers,
6787 however, in initial_elimination_offset we have committed to using
6788 an additional TREGS_SPACE amount of bytes - in order to keep both
6789 addresses to arguments supplied by the caller and local variables
6790 valid, we must keep this gap. Place it between the incoming
6791 arguments and the actually saved registers in a bid to optimize
6792 locality of reference. */
6793 total_size = d + tregs_space;
6794 total_size += rounded_frame_size (total_size);
6795 save_size = total_size - rounded_frame_size (d);
6796 if (save_size % (STACK_BOUNDARY / BITS_PER_UNIT))
6797 d_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
6798 - save_size % (STACK_BOUNDARY / BITS_PER_UNIT));
6800 /* If adjusting the stack in a single step costs nothing extra, do so.
6801 I.e. either if a single addi is enough, or we need a movi anyway,
6802 and we don't exceed the maximum offset range (the test for the
6803 latter is conservative for simplicity). */
6805 && (CONST_OK_FOR_I10 (-total_size)
6806 || (! CONST_OK_FOR_I10 (-(save_size + d_rounding))
6807 && total_size <= 2044)))
6808 d_rounding = total_size - save_size;
6810 offset_base = d + d_rounding;
6812 output_stack_adjust (-(save_size + d_rounding), stack_pointer_rtx,
6815 sh5_schedule_saves (&live_regs_mask, &schedule, offset_base);
6816 tmp_pnt = schedule.temps;
6817 for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
6819 enum machine_mode mode = (enum machine_mode) entry->mode;
6820 unsigned int reg = entry->reg;
6821 rtx reg_rtx, mem_rtx, pre_dec = NULL_RTX;
6824 offset = entry->offset;
6826 reg_rtx = gen_rtx_REG (mode, reg);
6828 mem_rtx = gen_frame_mem (mode,
6829 gen_rtx_PLUS (Pmode,
6833 if (!memory_address_p (mode, XEXP (mem_rtx, 0)))
6839 if (HAVE_PRE_DECREMENT
6840 && (offset_in_r0 - offset == GET_MODE_SIZE (mode)
6841 || mem_rtx == NULL_RTX
6842 || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
6844 pre_dec = gen_frame_mem (mode, gen_rtx_PRE_DEC (Pmode, r0));
6846 if (!memory_address_p (mode, XEXP (pre_dec, 0)))
6851 offset += GET_MODE_SIZE (mode);
6855 if (mem_rtx != NULL_RTX)
6858 if (offset_in_r0 == -1)
6860 emit_move_insn (r0, GEN_INT (offset));
6861 offset_in_r0 = offset;
6863 else if (offset != offset_in_r0)
6868 GEN_INT (offset - offset_in_r0)));
6869 offset_in_r0 += offset - offset_in_r0;
6872 if (pre_dec != NULL_RTX)
6878 (Pmode, r0, stack_pointer_rtx));
6882 offset -= GET_MODE_SIZE (mode);
6883 offset_in_r0 -= GET_MODE_SIZE (mode);
6888 mem_rtx = gen_frame_mem (mode, r0);
6890 mem_rtx = gen_frame_mem (mode,
6891 gen_rtx_PLUS (Pmode,
6895 /* We must not use an r0-based address for target-branch
6896 registers or for special registers without pre-dec
6897 memory addresses, since we store their values in r0
6899 gcc_assert (!TARGET_REGISTER_P (reg)
6900 && ((reg != PR_REG && !SPECIAL_REGISTER_P (reg))
6901 || mem_rtx == pre_dec));
6904 orig_reg_rtx = reg_rtx;
6905 if (TARGET_REGISTER_P (reg)
6906 || ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
6907 && mem_rtx != pre_dec))
6909 rtx tmp_reg = gen_rtx_REG (GET_MODE (reg_rtx), *tmp_pnt);
6911 emit_move_insn (tmp_reg, reg_rtx);
6913 if (REGNO (tmp_reg) == R0_REG)
6917 gcc_assert (!refers_to_regno_p
6918 (R0_REG, R0_REG+1, mem_rtx, (rtx *) 0));
6921 if (*++tmp_pnt <= 0)
6922 tmp_pnt = schedule.temps;
6929 /* Mark as interesting for dwarf cfi generator */
6930 insn = emit_move_insn (mem_rtx, reg_rtx);
6931 RTX_FRAME_RELATED_P (insn) = 1;
6932 /* If we use an intermediate register for the save, we can't
6933 describe this exactly in cfi as a copy of the to-be-saved
6934 register into the temporary register and then the temporary
6935 register on the stack, because the temporary register can
6936 have a different natural size than the to-be-saved register.
6937 Thus, we gloss over the intermediate copy and pretend we do
6938 a direct save from the to-be-saved register. */
6939 if (REGNO (reg_rtx) != reg)
6943 set = gen_rtx_SET (VOIDmode, mem_rtx, orig_reg_rtx);
6944 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
6947 if (TARGET_SHCOMPACT && (offset_in_r0 != -1))
6949 rtx reg_rtx = gen_rtx_REG (mode, reg);
6951 rtx mem_rtx = gen_frame_mem (mode,
6952 gen_rtx_PLUS (Pmode,
6956 set = gen_rtx_SET (VOIDmode, mem_rtx, reg_rtx);
6957 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
6962 gcc_assert (entry->offset == d_rounding);
6965 push_regs (&live_regs_mask, current_function_interrupt);
6967 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
6968 emit_insn (gen_GOTaddr2picreg ());
6970 if (SHMEDIA_REGS_STACK_ADJUST ())
6972 /* This must NOT go through the PLT, otherwise mach and macl
6973 may be clobbered. */
6974 function_symbol (gen_rtx_REG (Pmode, R0_REG),
6976 ? "__GCC_push_shmedia_regs"
6977 : "__GCC_push_shmedia_regs_nofpu"), SFUNC_GOT);
6978 emit_insn (gen_shmedia_save_restore_regs_compact
6979 (GEN_INT (-SHMEDIA_REGS_STACK_ADJUST ())));
6982 if (target_flags != save_flags && ! current_function_interrupt)
6983 emit_insn (gen_toggle_sz ());
6985 target_flags = save_flags;
6987 output_stack_adjust (-rounded_frame_size (d) + d_rounding,
6988 stack_pointer_rtx, 0, NULL);
6990 if (frame_pointer_needed)
6991 frame_insn (GEN_MOV (hard_frame_pointer_rtx, stack_pointer_rtx));
6993 if (TARGET_SHCOMPACT
6994 && (crtl->args.info.call_cookie & ~ CALL_COOKIE_RET_TRAMP(1)))
6996 /* This must NOT go through the PLT, otherwise mach and macl
6997 may be clobbered. */
6998 function_symbol (gen_rtx_REG (Pmode, R0_REG),
6999 "__GCC_shcompact_incoming_args", SFUNC_GOT);
7000 emit_insn (gen_shcompact_incoming_args ());
7005 sh_expand_epilogue (bool sibcall_p)
7007 HARD_REG_SET live_regs_mask;
7011 int save_flags = target_flags;
7012 int frame_size, save_size;
7013 int fpscr_deferred = 0;
7014 int e = sibcall_p ? -1 : 1;
7016 d = calc_live_regs (&live_regs_mask);
7019 frame_size = rounded_frame_size (d);
7023 int tregs_space = shmedia_target_regs_stack_adjust (&live_regs_mask);
7025 if (d % (STACK_BOUNDARY / BITS_PER_UNIT))
7026 d_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
7027 - d % (STACK_BOUNDARY / BITS_PER_UNIT));
7029 total_size = d + tregs_space;
7030 total_size += rounded_frame_size (total_size);
7031 save_size = total_size - frame_size;
7033 /* If adjusting the stack in a single step costs nothing extra, do so.
7034 I.e. either if a single addi is enough, or we need a movi anyway,
7035 and we don't exceed the maximum offset range (the test for the
7036 latter is conservative for simplicity). */
7038 && ! frame_pointer_needed
7039 && (CONST_OK_FOR_I10 (total_size)
7040 || (! CONST_OK_FOR_I10 (save_size + d_rounding)
7041 && total_size <= 2044)))
7042 d_rounding = frame_size;
7044 frame_size -= d_rounding;
7047 if (frame_pointer_needed)
7049 /* We must avoid scheduling the epilogue with previous basic blocks.
7050 See PR/18032 and PR/40313. */
7051 emit_insn (gen_blockage ());
7052 output_stack_adjust (frame_size, hard_frame_pointer_rtx, e,
7055 /* We must avoid moving the stack pointer adjustment past code
7056 which reads from the local frame, else an interrupt could
7057 occur after the SP adjustment and clobber data in the local
7059 emit_insn (gen_blockage ());
7060 emit_insn (GEN_MOV (stack_pointer_rtx, hard_frame_pointer_rtx));
7062 else if (frame_size)
7064 /* We must avoid moving the stack pointer adjustment past code
7065 which reads from the local frame, else an interrupt could
7066 occur after the SP adjustment and clobber data in the local
7068 emit_insn (gen_blockage ());
7069 output_stack_adjust (frame_size, stack_pointer_rtx, e, &live_regs_mask);
7072 if (SHMEDIA_REGS_STACK_ADJUST ())
7074 function_symbol (gen_rtx_REG (Pmode, R0_REG),
7076 ? "__GCC_pop_shmedia_regs"
7077 : "__GCC_pop_shmedia_regs_nofpu"), SFUNC_GOT);
7078 /* This must NOT go through the PLT, otherwise mach and macl
7079 may be clobbered. */
7080 emit_insn (gen_shmedia_save_restore_regs_compact
7081 (GEN_INT (SHMEDIA_REGS_STACK_ADJUST ())));
7084 /* Pop all the registers. */
7086 if (target_flags != save_flags && ! current_function_interrupt)
7087 emit_insn (gen_toggle_sz ());
7090 int offset_base, offset;
7091 int offset_in_r0 = -1;
7093 rtx r0 = gen_rtx_REG (Pmode, R0_REG);
7094 save_schedule schedule;
7098 entry = sh5_schedule_saves (&live_regs_mask, &schedule, d_rounding);
7099 offset_base = -entry[1].offset + d_rounding;
7100 tmp_pnt = schedule.temps;
7101 for (; entry->mode != VOIDmode; entry--)
7103 enum machine_mode mode = (enum machine_mode) entry->mode;
7104 int reg = entry->reg;
7105 rtx reg_rtx, mem_rtx, post_inc = NULL_RTX, insn;
7107 offset = offset_base + entry->offset;
7108 reg_rtx = gen_rtx_REG (mode, reg);
7110 mem_rtx = gen_frame_mem (mode,
7111 gen_rtx_PLUS (Pmode,
7115 if (!memory_address_p (mode, XEXP (mem_rtx, 0)))
7118 if (HAVE_POST_INCREMENT
7119 && (offset == offset_in_r0
7120 || (offset + GET_MODE_SIZE (mode) != d + d_rounding
7121 && mem_rtx == NULL_RTX)
7122 || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
7124 post_inc = gen_frame_mem (mode, gen_rtx_POST_INC (Pmode, r0));
7126 if (!memory_address_p (mode, XEXP (post_inc, 0)))
7127 post_inc = NULL_RTX;
7132 if (mem_rtx != NULL_RTX)
7135 if (offset_in_r0 == -1)
7137 emit_move_insn (r0, GEN_INT (offset));
7138 offset_in_r0 = offset;
7140 else if (offset != offset_in_r0)
7145 GEN_INT (offset - offset_in_r0)));
7146 offset_in_r0 += offset - offset_in_r0;
7149 if (post_inc != NULL_RTX)
7155 (Pmode, r0, stack_pointer_rtx));
7161 offset_in_r0 += GET_MODE_SIZE (mode);
7164 mem_rtx = gen_frame_mem (mode, r0);
7166 mem_rtx = gen_frame_mem (mode,
7167 gen_rtx_PLUS (Pmode,
7171 gcc_assert ((reg != PR_REG && !SPECIAL_REGISTER_P (reg))
7172 || mem_rtx == post_inc);
7175 if ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
7176 && mem_rtx != post_inc)
7178 insn = emit_move_insn (r0, mem_rtx);
7181 else if (TARGET_REGISTER_P (reg))
7183 rtx tmp_reg = gen_rtx_REG (mode, *tmp_pnt);
7185 /* Give the scheduler a bit of freedom by using up to
7186 MAX_TEMPS registers in a round-robin fashion. */
7187 insn = emit_move_insn (tmp_reg, mem_rtx);
7190 tmp_pnt = schedule.temps;
7193 insn = emit_move_insn (reg_rtx, mem_rtx);
7196 gcc_assert (entry->offset + offset_base == d + d_rounding);
7198 else /* ! TARGET_SH5 */
7203 /* For an ISR with RESBANK attribute assigned, don't pop PR
7205 if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG)
7206 && !sh_cfun_resbank_handler_p ())
7208 if (!frame_pointer_needed)
7209 emit_insn (gen_blockage ());
7213 /* Banked registers are poped first to avoid being scheduled in the
7214 delay slot. RTE switches banks before the ds instruction. */
7215 if (current_function_interrupt)
7217 for (i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
7218 if (TEST_HARD_REG_BIT (live_regs_mask, i))
7219 pop (LAST_BANKED_REG - i);
7221 last_reg = FIRST_PSEUDO_REGISTER - LAST_BANKED_REG - 1;
7224 last_reg = FIRST_PSEUDO_REGISTER;
7226 for (i = 0; i < last_reg; i++)
7228 int j = (FIRST_PSEUDO_REGISTER - 1) - i;
7230 if (j == FPSCR_REG && current_function_interrupt && TARGET_FMOVD
7231 && hard_reg_set_intersect_p (live_regs_mask,
7232 reg_class_contents[DF_REGS]))
7234 /* For an ISR with RESBANK attribute assigned, don't pop
7235 following registers, R0-R14, MACH, MACL and GBR. */
7236 else if (j != PR_REG && TEST_HARD_REG_BIT (live_regs_mask, j)
7237 && ! (sh_cfun_resbank_handler_p ()
7238 && ((j >= FIRST_GENERAL_REG
7239 && j < LAST_GENERAL_REG)
7245 if (j == FIRST_FP_REG && fpscr_deferred)
7249 if (target_flags != save_flags && ! current_function_interrupt)
7250 emit_insn (gen_toggle_sz ());
7251 target_flags = save_flags;
7253 output_stack_adjust (crtl->args.pretend_args_size
7254 + save_size + d_rounding
7255 + crtl->args.info.stack_regs * 8,
7256 stack_pointer_rtx, e, NULL);
7258 if (crtl->calls_eh_return)
7259 emit_insn (GEN_ADD3 (stack_pointer_rtx, stack_pointer_rtx,
7260 EH_RETURN_STACKADJ_RTX));
7262 /* Switch back to the normal stack if necessary. */
7263 if (lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl)))
7264 emit_insn (gen_sp_switch_2 ());
7266 /* Tell flow the insn that pops PR isn't dead. */
7267 /* PR_REG will never be live in SHmedia mode, and we don't need to
7268 USE PR_MEDIA_REG, since it will be explicitly copied to TR0_REG
7269 by the return pattern. */
7270 if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG))
7271 emit_use (gen_rtx_REG (SImode, PR_REG));
7274 static int sh_need_epilogue_known = 0;
7277 sh_need_epilogue (void)
7279 if (! sh_need_epilogue_known)
7284 sh_expand_epilogue (0);
7285 epilogue = get_insns ();
7287 sh_need_epilogue_known = (epilogue == NULL ? -1 : 1);
7289 return sh_need_epilogue_known > 0;
7292 /* Emit code to change the current function's return address to RA.
7293 TEMP is available as a scratch register, if needed. */
7296 sh_set_return_address (rtx ra, rtx tmp)
7298 HARD_REG_SET live_regs_mask;
7300 int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
7303 d = calc_live_regs (&live_regs_mask);
7305 /* If pr_reg isn't life, we can set it (or the register given in
7306 sh_media_register_for_return) directly. */
7307 if (! TEST_HARD_REG_BIT (live_regs_mask, pr_reg))
7313 int rr_regno = sh_media_register_for_return ();
7318 rr = gen_rtx_REG (DImode, rr_regno);
7321 rr = gen_rtx_REG (SImode, pr_reg);
7323 emit_insn (GEN_MOV (rr, ra));
7324 /* Tell flow the register for return isn't dead. */
7332 save_schedule schedule;
7335 entry = sh5_schedule_saves (&live_regs_mask, &schedule, 0);
7336 offset = entry[1].offset;
7337 for (; entry->mode != VOIDmode; entry--)
7338 if (entry->reg == pr_reg)
7341 /* We can't find pr register. */
7345 offset = entry->offset - offset;
7346 pr_offset = (rounded_frame_size (d) + offset
7347 + SHMEDIA_REGS_STACK_ADJUST ());
7350 pr_offset = rounded_frame_size (d);
7352 emit_insn (GEN_MOV (tmp, GEN_INT (pr_offset)));
7353 emit_insn (GEN_ADD3 (tmp, tmp, hard_frame_pointer_rtx));
7355 tmp = gen_frame_mem (Pmode, tmp);
7356 emit_insn (GEN_MOV (tmp, ra));
7357 /* Tell this store isn't dead. */
7361 /* Clear variables at function end. */
7364 sh_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
7365 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7367 sh_need_epilogue_known = 0;
7371 sh_builtin_saveregs (void)
7373 /* First unnamed integer register. */
7374 int first_intreg = crtl->args.info.arg_count[(int) SH_ARG_INT];
7375 /* Number of integer registers we need to save. */
7376 int n_intregs = MAX (0, NPARM_REGS (SImode) - first_intreg);
7377 /* First unnamed SFmode float reg */
7378 int first_floatreg = crtl->args.info.arg_count[(int) SH_ARG_FLOAT];
7379 /* Number of SFmode float regs to save. */
7380 int n_floatregs = MAX (0, NPARM_REGS (SFmode) - first_floatreg);
7383 alias_set_type alias_set;
7389 int pushregs = n_intregs;
7391 while (pushregs < NPARM_REGS (SImode) - 1
7392 && (CALL_COOKIE_INT_REG_GET
7393 (crtl->args.info.call_cookie,
7394 NPARM_REGS (SImode) - pushregs)
7397 crtl->args.info.call_cookie
7398 &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
7403 if (pushregs == NPARM_REGS (SImode))
7404 crtl->args.info.call_cookie
7405 |= (CALL_COOKIE_INT_REG (0, 1)
7406 | CALL_COOKIE_STACKSEQ (pushregs - 1));
7408 crtl->args.info.call_cookie
7409 |= CALL_COOKIE_STACKSEQ (pushregs);
7411 crtl->args.pretend_args_size += 8 * n_intregs;
7413 if (TARGET_SHCOMPACT)
7417 if (! TARGET_SH2E && ! TARGET_SH4 && ! TARGET_SH5)
7419 error ("__builtin_saveregs not supported by this subtarget");
7426 /* Allocate block of memory for the regs. */
7427 /* ??? If n_intregs + n_floatregs == 0, should we allocate at least 1 byte?
7428 Or can assign_stack_local accept a 0 SIZE argument? */
7429 bufsize = (n_intregs * UNITS_PER_WORD) + (n_floatregs * UNITS_PER_WORD);
7432 regbuf = gen_frame_mem (BLKmode, gen_rtx_REG (Pmode, ARG_POINTER_REGNUM));
7433 else if (n_floatregs & 1)
7437 regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
7438 addr = copy_to_mode_reg (Pmode, XEXP (regbuf, 0));
7439 emit_insn (gen_iorsi3 (addr, addr, GEN_INT (UNITS_PER_WORD)));
7440 regbuf = change_address (regbuf, BLKmode, addr);
7442 else if (STACK_BOUNDARY < 64 && TARGET_FPU_DOUBLE && n_floatregs)
7446 regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
7447 addr = copy_to_mode_reg (Pmode, plus_constant (XEXP (regbuf, 0), 4));
7448 mask = copy_to_mode_reg (Pmode, GEN_INT (-8));
7449 emit_insn (gen_andsi3 (addr, addr, mask));
7450 regbuf = change_address (regbuf, BLKmode, addr);
7453 regbuf = assign_stack_local (BLKmode, bufsize, TARGET_FPU_DOUBLE ? 64 : 0);
7454 alias_set = get_varargs_alias_set ();
7455 set_mem_alias_set (regbuf, alias_set);
7458 This is optimized to only save the regs that are necessary. Explicitly
7459 named args need not be saved. */
7461 move_block_from_reg (BASE_ARG_REG (SImode) + first_intreg,
7462 adjust_address (regbuf, BLKmode,
7463 n_floatregs * UNITS_PER_WORD),
7467 /* Return the address of the regbuf. */
7468 return XEXP (regbuf, 0);
7471 This is optimized to only save the regs that are necessary. Explicitly
7472 named args need not be saved.
7473 We explicitly build a pointer to the buffer because it halves the insn
7474 count when not optimizing (otherwise the pointer is built for each reg
7476 We emit the moves in reverse order so that we can use predecrement. */
7478 fpregs = copy_to_mode_reg (Pmode,
7479 plus_constant (XEXP (regbuf, 0),
7480 n_floatregs * UNITS_PER_WORD));
7481 if (TARGET_SH4 || TARGET_SH2A_DOUBLE)
7484 for (regno = NPARM_REGS (DFmode) - 2; regno >= first_floatreg; regno -= 2)
7486 emit_insn (gen_addsi3 (fpregs, fpregs,
7487 GEN_INT (-2 * UNITS_PER_WORD)));
7488 mem = change_address (regbuf, DFmode, fpregs);
7489 emit_move_insn (mem,
7490 gen_rtx_REG (DFmode, BASE_ARG_REG (DFmode) + regno));
7492 regno = first_floatreg;
7495 emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
7496 mem = change_address (regbuf, SFmode, fpregs);
7497 emit_move_insn (mem,
7498 gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno
7499 - (TARGET_LITTLE_ENDIAN != 0)));
7503 for (regno = NPARM_REGS (SFmode) - 1; regno >= first_floatreg; regno--)
7507 emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
7508 mem = change_address (regbuf, SFmode, fpregs);
7509 emit_move_insn (mem,
7510 gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno));
7513 /* Return the address of the regbuf. */
7514 return XEXP (regbuf, 0);
7517 /* Define the `__builtin_va_list' type for the ABI. */
7520 sh_build_builtin_va_list (void)
7522 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7525 if (TARGET_SH5 || (! TARGET_SH2E && ! TARGET_SH4)
7526 || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
7527 return ptr_type_node;
7529 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
7531 f_next_o = build_decl (BUILTINS_LOCATION,
7532 FIELD_DECL, get_identifier ("__va_next_o"),
7534 f_next_o_limit = build_decl (BUILTINS_LOCATION,
7536 get_identifier ("__va_next_o_limit"),
7538 f_next_fp = build_decl (BUILTINS_LOCATION,
7539 FIELD_DECL, get_identifier ("__va_next_fp"),
7541 f_next_fp_limit = build_decl (BUILTINS_LOCATION,
7543 get_identifier ("__va_next_fp_limit"),
7545 f_next_stack = build_decl (BUILTINS_LOCATION,
7546 FIELD_DECL, get_identifier ("__va_next_stack"),
7549 DECL_FIELD_CONTEXT (f_next_o) = record;
7550 DECL_FIELD_CONTEXT (f_next_o_limit) = record;
7551 DECL_FIELD_CONTEXT (f_next_fp) = record;
7552 DECL_FIELD_CONTEXT (f_next_fp_limit) = record;
7553 DECL_FIELD_CONTEXT (f_next_stack) = record;
7555 TYPE_FIELDS (record) = f_next_o;
7556 TREE_CHAIN (f_next_o) = f_next_o_limit;
7557 TREE_CHAIN (f_next_o_limit) = f_next_fp;
7558 TREE_CHAIN (f_next_fp) = f_next_fp_limit;
7559 TREE_CHAIN (f_next_fp_limit) = f_next_stack;
7561 layout_type (record);
7566 /* Implement `va_start' for varargs and stdarg. */
7569 sh_va_start (tree valist, rtx nextarg)
7571 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7572 tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
7578 expand_builtin_saveregs ();
7579 std_expand_builtin_va_start (valist, nextarg);
7583 if ((! TARGET_SH2E && ! TARGET_SH4)
7584 || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
7586 std_expand_builtin_va_start (valist, nextarg);
7590 f_next_o = TYPE_FIELDS (va_list_type_node);
7591 f_next_o_limit = TREE_CHAIN (f_next_o);
7592 f_next_fp = TREE_CHAIN (f_next_o_limit);
7593 f_next_fp_limit = TREE_CHAIN (f_next_fp);
7594 f_next_stack = TREE_CHAIN (f_next_fp_limit);
7596 next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
7598 next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
7599 valist, f_next_o_limit, NULL_TREE);
7600 next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp), valist, f_next_fp,
7602 next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
7603 valist, f_next_fp_limit, NULL_TREE);
7604 next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
7605 valist, f_next_stack, NULL_TREE);
7607 /* Call __builtin_saveregs. */
7608 u = make_tree (sizetype, expand_builtin_saveregs ());
7609 u = fold_convert (ptr_type_node, u);
7610 t = build2 (MODIFY_EXPR, ptr_type_node, next_fp, u);
7611 TREE_SIDE_EFFECTS (t) = 1;
7612 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7614 nfp = crtl->args.info.arg_count[SH_ARG_FLOAT];
7619 u = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, u,
7620 size_int (UNITS_PER_WORD * nfp));
7621 t = build2 (MODIFY_EXPR, ptr_type_node, next_fp_limit, u);
7622 TREE_SIDE_EFFECTS (t) = 1;
7623 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7625 t = build2 (MODIFY_EXPR, ptr_type_node, next_o, u);
7626 TREE_SIDE_EFFECTS (t) = 1;
7627 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7629 nint = crtl->args.info.arg_count[SH_ARG_INT];
7634 u = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, u,
7635 size_int (UNITS_PER_WORD * nint));
7636 t = build2 (MODIFY_EXPR, ptr_type_node, next_o_limit, u);
7637 TREE_SIDE_EFFECTS (t) = 1;
7638 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7640 u = make_tree (ptr_type_node, nextarg);
7641 t = build2 (MODIFY_EXPR, ptr_type_node, next_stack, u);
7642 TREE_SIDE_EFFECTS (t) = 1;
7643 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7646 /* TYPE is a RECORD_TYPE. If there is only a single nonzero-sized
7647 member, return it. */
7649 find_sole_member (tree type)
7651 tree field, member = NULL_TREE;
7653 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
7655 if (TREE_CODE (field) != FIELD_DECL)
7657 if (!DECL_SIZE (field))
7659 if (integer_zerop (DECL_SIZE (field)))
7667 /* Implement `va_arg'. */
7670 sh_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
7671 gimple_seq *post_p ATTRIBUTE_UNUSED)
7673 HOST_WIDE_INT size, rsize;
7674 tree tmp, pptr_type_node;
7675 tree addr, lab_over = NULL, result = NULL;
7676 int pass_by_ref = targetm.calls.must_pass_in_stack (TYPE_MODE (type), type);
7680 type = build_pointer_type (type);
7682 size = int_size_in_bytes (type);
7683 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
7684 pptr_type_node = build_pointer_type (ptr_type_node);
7686 if (! TARGET_SH5 && (TARGET_SH2E || TARGET_SH4)
7687 && ! (TARGET_HITACHI || sh_cfun_attr_renesas_p ()))
7689 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7690 tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
7695 f_next_o = TYPE_FIELDS (va_list_type_node);
7696 f_next_o_limit = TREE_CHAIN (f_next_o);
7697 f_next_fp = TREE_CHAIN (f_next_o_limit);
7698 f_next_fp_limit = TREE_CHAIN (f_next_fp);
7699 f_next_stack = TREE_CHAIN (f_next_fp_limit);
7701 next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
7703 next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
7704 valist, f_next_o_limit, NULL_TREE);
7705 next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp),
7706 valist, f_next_fp, NULL_TREE);
7707 next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
7708 valist, f_next_fp_limit, NULL_TREE);
7709 next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
7710 valist, f_next_stack, NULL_TREE);
7712 /* Structures with a single member with a distinct mode are passed
7713 like their member. This is relevant if the latter has a REAL_TYPE
7714 or COMPLEX_TYPE type. */
7716 while (TREE_CODE (eff_type) == RECORD_TYPE
7717 && (member = find_sole_member (eff_type))
7718 && (TREE_CODE (TREE_TYPE (member)) == REAL_TYPE
7719 || TREE_CODE (TREE_TYPE (member)) == COMPLEX_TYPE
7720 || TREE_CODE (TREE_TYPE (member)) == RECORD_TYPE))
7722 tree field_type = TREE_TYPE (member);
7724 if (TYPE_MODE (eff_type) == TYPE_MODE (field_type))
7725 eff_type = field_type;
7728 gcc_assert ((TYPE_ALIGN (eff_type)
7729 < GET_MODE_ALIGNMENT (TYPE_MODE (field_type)))
7730 || (TYPE_ALIGN (eff_type)
7731 > GET_MODE_BITSIZE (TYPE_MODE (field_type))));
7736 if (TARGET_SH4 || TARGET_SH2A_DOUBLE)
7738 pass_as_float = ((TREE_CODE (eff_type) == REAL_TYPE && size <= 8)
7739 || (TREE_CODE (eff_type) == COMPLEX_TYPE
7740 && TREE_CODE (TREE_TYPE (eff_type)) == REAL_TYPE
7745 pass_as_float = (TREE_CODE (eff_type) == REAL_TYPE && size == 4);
7748 addr = create_tmp_var (pptr_type_node, NULL);
7749 lab_false = create_artificial_label (UNKNOWN_LOCATION);
7750 lab_over = create_artificial_label (UNKNOWN_LOCATION);
7752 valist = build1 (INDIRECT_REF, ptr_type_node, addr);
7756 tree next_fp_tmp = create_tmp_var (TREE_TYPE (f_next_fp), NULL);
7758 bool is_double = size == 8 && TREE_CODE (eff_type) == REAL_TYPE;
7760 tmp = build1 (ADDR_EXPR, pptr_type_node, unshare_expr (next_fp));
7761 gimplify_assign (unshare_expr (addr), tmp, pre_p);
7763 gimplify_assign (unshare_expr (next_fp_tmp), valist, pre_p);
7764 tmp = next_fp_limit;
7765 if (size > 4 && !is_double)
7766 tmp = build2 (POINTER_PLUS_EXPR, TREE_TYPE (tmp),
7767 unshare_expr (tmp), size_int (4 - size));
7768 tmp = build2 (GE_EXPR, boolean_type_node,
7769 unshare_expr (next_fp_tmp), unshare_expr (tmp));
7770 cmp = build3 (COND_EXPR, void_type_node, tmp,
7771 build1 (GOTO_EXPR, void_type_node,
7772 unshare_expr (lab_false)), NULL_TREE);
7774 gimplify_and_add (cmp, pre_p);
7776 if (TYPE_ALIGN (eff_type) > BITS_PER_WORD
7777 || (is_double || size == 16))
7779 tmp = fold_convert (sizetype, next_fp_tmp);
7780 tmp = build2 (BIT_AND_EXPR, sizetype, tmp,
7781 size_int (UNITS_PER_WORD));
7782 tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node,
7783 unshare_expr (next_fp_tmp), tmp);
7784 gimplify_assign (unshare_expr (next_fp_tmp), tmp, pre_p);
7787 gimplify_and_add (cmp, pre_p);
7789 #ifdef FUNCTION_ARG_SCmode_WART
7790 if (TYPE_MODE (eff_type) == SCmode
7791 && TARGET_SH4 && TARGET_LITTLE_ENDIAN)
7793 tree subtype = TREE_TYPE (eff_type);
7797 = std_gimplify_va_arg_expr (next_fp_tmp, subtype, pre_p, NULL);
7798 imag = get_initialized_tmp_var (imag, pre_p, NULL);
7801 = std_gimplify_va_arg_expr (next_fp_tmp, subtype, pre_p, NULL);
7802 real = get_initialized_tmp_var (real, pre_p, NULL);
7804 result = build2 (COMPLEX_EXPR, eff_type, real, imag);
7805 if (type != eff_type)
7806 result = build1 (VIEW_CONVERT_EXPR, type, result);
7807 result = get_initialized_tmp_var (result, pre_p, NULL);
7809 #endif /* FUNCTION_ARG_SCmode_WART */
7811 tmp = build1 (GOTO_EXPR, void_type_node, unshare_expr (lab_over));
7812 gimplify_and_add (tmp, pre_p);
7814 tmp = build1 (LABEL_EXPR, void_type_node, unshare_expr (lab_false));
7815 gimplify_and_add (tmp, pre_p);
7817 tmp = build1 (ADDR_EXPR, pptr_type_node, unshare_expr (next_stack));
7818 gimplify_assign (unshare_expr (addr), tmp, pre_p);
7819 gimplify_assign (unshare_expr (next_fp_tmp),
7820 unshare_expr (valist), pre_p);
7822 gimplify_assign (unshare_expr (valist),
7823 unshare_expr (next_fp_tmp), post_p);
7824 valist = next_fp_tmp;
7828 tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node,
7829 unshare_expr (next_o), size_int (rsize));
7830 tmp = build2 (GT_EXPR, boolean_type_node, tmp,
7831 unshare_expr (next_o_limit));
7832 tmp = build3 (COND_EXPR, void_type_node, tmp,
7833 build1 (GOTO_EXPR, void_type_node,
7834 unshare_expr (lab_false)),
7836 gimplify_and_add (tmp, pre_p);
7838 tmp = build1 (ADDR_EXPR, pptr_type_node, unshare_expr (next_o));
7839 gimplify_assign (unshare_expr (addr), tmp, pre_p);
7841 tmp = build1 (GOTO_EXPR, void_type_node, unshare_expr (lab_over));
7842 gimplify_and_add (tmp, pre_p);
7844 tmp = build1 (LABEL_EXPR, void_type_node, unshare_expr (lab_false));
7845 gimplify_and_add (tmp, pre_p);
7847 if (size > 4 && ! (TARGET_SH4 || TARGET_SH2A))
7848 gimplify_assign (unshare_expr (next_o),
7849 unshare_expr (next_o_limit), pre_p);
7851 tmp = build1 (ADDR_EXPR, pptr_type_node, unshare_expr (next_stack));
7852 gimplify_assign (unshare_expr (addr), tmp, pre_p);
7857 tmp = build1 (LABEL_EXPR, void_type_node, unshare_expr (lab_over));
7858 gimplify_and_add (tmp, pre_p);
7862 /* ??? In va-sh.h, there had been code to make values larger than
7863 size 8 indirect. This does not match the FUNCTION_ARG macros. */
7865 tmp = std_gimplify_va_arg_expr (valist, type, pre_p, NULL);
7868 gimplify_assign (result, tmp, pre_p);
7869 result = build1 (NOP_EXPR, TREE_TYPE (result), result);
7870 tmp = build1 (LABEL_EXPR, void_type_node, unshare_expr (lab_over));
7871 gimplify_and_add (tmp, pre_p);
7877 result = build_va_arg_indirect_ref (result);
7882 /* 64 bit floating points memory transfers are paired single precision loads
7883 or store. So DWARF information needs fixing in little endian (unless
7884 PR=SZ=1 in FPSCR). */
7886 sh_dwarf_register_span (rtx reg)
7888 unsigned regno = REGNO (reg);
7890 if (WORDS_BIG_ENDIAN || GET_MODE (reg) != DFmode)
7894 gen_rtx_PARALLEL (VOIDmode,
7896 gen_rtx_REG (SFmode,
7897 DBX_REGISTER_NUMBER (regno+1)),
7898 gen_rtx_REG (SFmode,
7899 DBX_REGISTER_NUMBER (regno))));
7902 static enum machine_mode
7903 sh_promote_function_mode (const_tree type, enum machine_mode mode,
7904 int *punsignedp, const_tree funtype,
7905 int for_return ATTRIBUTE_UNUSED)
7907 if (sh_promote_prototypes (funtype))
7908 return promote_mode (type, mode, punsignedp);
7914 sh_promote_prototypes (const_tree type)
7920 return ! sh_attr_renesas_p (type);
7923 /* Whether an argument must be passed by reference. On SHcompact, we
7924 pretend arguments wider than 32-bits that would have been passed in
7925 registers are passed by reference, so that an SHmedia trampoline
7926 loads them into the full 64-bits registers. */
7929 shcompact_byref (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
7930 const_tree type, bool named)
7932 unsigned HOST_WIDE_INT size;
7935 size = int_size_in_bytes (type);
7937 size = GET_MODE_SIZE (mode);
7939 if (cum->arg_count[SH_ARG_INT] < NPARM_REGS (SImode)
7941 || GET_SH_ARG_CLASS (mode) == SH_ARG_INT
7942 || (GET_SH_ARG_CLASS (mode) == SH_ARG_FLOAT
7943 && cum->arg_count[SH_ARG_FLOAT] >= NPARM_REGS (SFmode)))
7945 && !SHCOMPACT_FORCE_ON_STACK (mode, type)
7946 && !SH5_WOULD_BE_PARTIAL_NREGS (*cum, mode, type, named))
7953 sh_pass_by_reference (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7954 const_tree type, bool named)
7956 if (targetm.calls.must_pass_in_stack (mode, type))
7959 /* ??? std_gimplify_va_arg_expr passes NULL for cum. That function
7960 wants to know about pass-by-reference semantics for incoming
7965 if (TARGET_SHCOMPACT)
7967 cum->byref = shcompact_byref (cum, mode, type, named);
7968 return cum->byref != 0;
7975 sh_callee_copies (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7976 const_tree type, bool named ATTRIBUTE_UNUSED)
7978 /* ??? How can it possibly be correct to return true only on the
7979 caller side of the equation? Is there someplace else in the
7980 sh backend that's magically producing the copies? */
7981 return (cum->outgoing
7982 && ((mode == BLKmode ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode))
7983 % SH_MIN_ALIGN_FOR_CALLEE_COPY == 0));
7987 sh_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7988 tree type, bool named ATTRIBUTE_UNUSED)
7993 && PASS_IN_REG_P (*cum, mode, type)
7994 && !(TARGET_SH4 || TARGET_SH2A_DOUBLE)
7995 && (ROUND_REG (*cum, mode)
7997 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
7998 : ROUND_ADVANCE (int_size_in_bytes (type)))
7999 > NPARM_REGS (mode)))
8000 words = NPARM_REGS (mode) - ROUND_REG (*cum, mode);
8002 else if (!TARGET_SHCOMPACT
8003 && SH5_WOULD_BE_PARTIAL_NREGS (*cum, mode, type, named))
8004 words = NPARM_REGS (SImode) - cum->arg_count[SH_ARG_INT];
8006 return words * UNITS_PER_WORD;
8010 /* Define where to put the arguments to a function.
8011 Value is zero to push the argument on the stack,
8012 or a hard register in which to store the argument.
8014 MODE is the argument's machine mode.
8015 TYPE is the data type of the argument (as a tree).
8016 This is null for libcalls where that information may
8018 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8019 the preceding args and about the function being called.
8020 NAMED is nonzero if this argument is a named parameter
8021 (otherwise it is an extra parameter matching an ellipsis).
8023 On SH the first args are normally in registers
8024 and the rest are pushed. Any arg that starts within the first
8025 NPARM_REGS words is at least partially passed in a register unless
8026 its data type forbids. */
8030 sh_function_arg (CUMULATIVE_ARGS *ca, enum machine_mode mode,
8031 tree type, int named)
8033 if (! TARGET_SH5 && mode == VOIDmode)
8034 return GEN_INT (ca->renesas_abi ? 1 : 0);
8037 && PASS_IN_REG_P (*ca, mode, type)
8038 && (named || ! (TARGET_HITACHI || ca->renesas_abi)))
8042 if (mode == SCmode && TARGET_SH4 && TARGET_LITTLE_ENDIAN
8043 && (! FUNCTION_ARG_SCmode_WART || (ROUND_REG (*ca, mode) & 1)))
8045 rtx r1 = gen_rtx_EXPR_LIST (VOIDmode,
8046 gen_rtx_REG (SFmode,
8048 + (ROUND_REG (*ca, mode) ^ 1)),
8050 rtx r2 = gen_rtx_EXPR_LIST (VOIDmode,
8051 gen_rtx_REG (SFmode,
8053 + ((ROUND_REG (*ca, mode) + 1) ^ 1)),
8055 return gen_rtx_PARALLEL(SCmode, gen_rtvec(2, r1, r2));
8058 /* If the alignment of a DF value causes an SF register to be
8059 skipped, we will use that skipped register for the next SF
8061 if ((TARGET_HITACHI || ca->renesas_abi)
8062 && ca->free_single_fp_reg
8064 return gen_rtx_REG (mode, ca->free_single_fp_reg);
8066 regno = (BASE_ARG_REG (mode) + ROUND_REG (*ca, mode))
8067 ^ (mode == SFmode && TARGET_SH4
8068 && TARGET_LITTLE_ENDIAN != 0
8069 && ! TARGET_HITACHI && ! ca->renesas_abi);
8070 return gen_rtx_REG (mode, regno);
8076 if (mode == VOIDmode && TARGET_SHCOMPACT)
8077 return GEN_INT (ca->call_cookie);
8079 /* The following test assumes unnamed arguments are promoted to
8081 if (mode == SFmode && ca->free_single_fp_reg)
8082 return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode, ca->free_single_fp_reg);
8084 if ((GET_SH_ARG_CLASS (mode) == SH_ARG_FLOAT)
8085 && (named || ! ca->prototype_p)
8086 && ca->arg_count[(int) SH_ARG_FLOAT] < NPARM_REGS (SFmode))
8088 if (! ca->prototype_p && TARGET_SHMEDIA)
8089 return SH5_PROTOTYPELESS_FLOAT_ARG (*ca, mode);
8091 return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode,
8093 + ca->arg_count[(int) SH_ARG_FLOAT]);
8096 if (ca->arg_count[(int) SH_ARG_INT] < NPARM_REGS (SImode)
8097 && (! TARGET_SHCOMPACT
8098 || (! SHCOMPACT_FORCE_ON_STACK (mode, type)
8099 && ! SH5_WOULD_BE_PARTIAL_NREGS (*ca, mode,
8102 return gen_rtx_REG (mode, (FIRST_PARM_REG
8103 + ca->arg_count[(int) SH_ARG_INT]));
8112 /* Update the data in CUM to advance over an argument
8113 of mode MODE and data type TYPE.
8114 (TYPE is null for libcalls where that information may not be
8118 sh_function_arg_advance (CUMULATIVE_ARGS *ca, enum machine_mode mode,
8119 tree type, int named)
8123 else if (TARGET_SH5)
8125 tree type2 = (ca->byref && type
8128 enum machine_mode mode2 = (ca->byref && type
8131 int dwords = ((ca->byref
8134 ? int_size_in_bytes (type2)
8135 : GET_MODE_SIZE (mode2)) + 7) / 8;
8136 int numregs = MIN (dwords, NPARM_REGS (SImode)
8137 - ca->arg_count[(int) SH_ARG_INT]);
8141 ca->arg_count[(int) SH_ARG_INT] += numregs;
8142 if (TARGET_SHCOMPACT
8143 && SHCOMPACT_FORCE_ON_STACK (mode2, type2))
8146 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
8148 /* N.B. We want this also for outgoing. */
8149 ca->stack_regs += numregs;
8154 ca->stack_regs += numregs;
8155 ca->byref_regs += numregs;
8159 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
8163 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
8166 else if (dwords > numregs)
8168 int pushregs = numregs;
8170 if (TARGET_SHCOMPACT)
8171 ca->stack_regs += numregs;
8172 while (pushregs < NPARM_REGS (SImode) - 1
8173 && (CALL_COOKIE_INT_REG_GET
8175 NPARM_REGS (SImode) - pushregs)
8179 &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
8183 if (numregs == NPARM_REGS (SImode))
8185 |= CALL_COOKIE_INT_REG (0, 1)
8186 | CALL_COOKIE_STACKSEQ (numregs - 1);
8189 |= CALL_COOKIE_STACKSEQ (numregs);
8192 if (GET_SH_ARG_CLASS (mode2) == SH_ARG_FLOAT
8193 && (named || ! ca->prototype_p))
8195 if (mode2 == SFmode && ca->free_single_fp_reg)
8196 ca->free_single_fp_reg = 0;
8197 else if (ca->arg_count[(int) SH_ARG_FLOAT]
8198 < NPARM_REGS (SFmode))
8201 = MIN ((GET_MODE_SIZE (mode2) + 7) / 8 * 2,
8203 - ca->arg_count[(int) SH_ARG_FLOAT]);
8205 ca->arg_count[(int) SH_ARG_FLOAT] += numfpregs;
8207 if (TARGET_SHCOMPACT && ! ca->prototype_p)
8209 if (ca->outgoing && numregs > 0)
8213 |= (CALL_COOKIE_INT_REG
8214 (ca->arg_count[(int) SH_ARG_INT]
8215 - numregs + ((numfpregs - 2) / 2),
8216 4 + (ca->arg_count[(int) SH_ARG_FLOAT]
8219 while (numfpregs -= 2);
8221 else if (mode2 == SFmode && (named)
8222 && (ca->arg_count[(int) SH_ARG_FLOAT]
8223 < NPARM_REGS (SFmode)))
8224 ca->free_single_fp_reg
8225 = FIRST_FP_PARM_REG - numfpregs
8226 + ca->arg_count[(int) SH_ARG_FLOAT] + 1;
8232 if ((TARGET_HITACHI || ca->renesas_abi) && TARGET_FPU_DOUBLE)
8234 /* Note that we've used the skipped register. */
8235 if (mode == SFmode && ca->free_single_fp_reg)
8237 ca->free_single_fp_reg = 0;
8240 /* When we have a DF after an SF, there's an SF register that get
8241 skipped in order to align the DF value. We note this skipped
8242 register, because the next SF value will use it, and not the
8243 SF that follows the DF. */
8245 && ROUND_REG (*ca, DFmode) != ROUND_REG (*ca, SFmode))
8247 ca->free_single_fp_reg = (ROUND_REG (*ca, SFmode)
8248 + BASE_ARG_REG (mode));
8252 if (! ((TARGET_SH4 || TARGET_SH2A) || ca->renesas_abi)
8253 || PASS_IN_REG_P (*ca, mode, type))
8254 (ca->arg_count[(int) GET_SH_ARG_CLASS (mode)]
8255 = (ROUND_REG (*ca, mode)
8257 ? ROUND_ADVANCE (int_size_in_bytes (type))
8258 : ROUND_ADVANCE (GET_MODE_SIZE (mode)))));
8261 /* The Renesas calling convention doesn't quite fit into this scheme since
8262 the address is passed like an invisible argument, but one that is always
8263 passed in memory. */
8265 sh_struct_value_rtx (tree fndecl, int incoming ATTRIBUTE_UNUSED)
8267 if (TARGET_HITACHI || sh_attr_renesas_p (fndecl))
8269 return gen_rtx_REG (Pmode, 2);
8272 /* Worker function for TARGET_RETURN_IN_MEMORY. */
8275 sh_return_in_memory (const_tree type, const_tree fndecl)
8279 if (TYPE_MODE (type) == BLKmode)
8280 return ((unsigned HOST_WIDE_INT) int_size_in_bytes (type)) > 8;
8282 return GET_MODE_SIZE (TYPE_MODE (type)) > 8;
8286 return (TYPE_MODE (type) == BLKmode
8287 || ((TARGET_HITACHI || sh_attr_renesas_p (fndecl))
8288 && TREE_CODE (type) == RECORD_TYPE));
8292 /* We actually emit the code in sh_expand_prologue. We used to use
8293 a static variable to flag that we need to emit this code, but that
8294 doesn't when inlining, when functions are deferred and then emitted
8295 later. Fortunately, we already have two flags that are part of struct
8296 function that tell if a function uses varargs or stdarg. */
8298 sh_setup_incoming_varargs (CUMULATIVE_ARGS *ca,
8299 enum machine_mode mode,
8301 int *pretend_arg_size,
8302 int second_time ATTRIBUTE_UNUSED)
8304 gcc_assert (cfun->stdarg);
8305 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
8307 int named_parm_regs, anon_parm_regs;
8309 named_parm_regs = (ROUND_REG (*ca, mode)
8311 ? ROUND_ADVANCE (int_size_in_bytes (type))
8312 : ROUND_ADVANCE (GET_MODE_SIZE (mode))));
8313 anon_parm_regs = NPARM_REGS (SImode) - named_parm_regs;
8314 if (anon_parm_regs > 0)
8315 *pretend_arg_size = anon_parm_regs * 4;
8320 sh_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
8326 sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *ca)
8328 return ! (TARGET_HITACHI || ca->renesas_abi) && ! TARGET_SH5;
8332 /* Define the offset between two registers, one to be eliminated, and
8333 the other its replacement, at the start of a routine. */
8336 initial_elimination_offset (int from, int to)
8339 int regs_saved_rounding = 0;
8340 int total_saved_regs_space;
8341 int total_auto_space;
8342 int save_flags = target_flags;
8344 HARD_REG_SET live_regs_mask;
8346 shmedia_space_reserved_for_target_registers = false;
8347 regs_saved = calc_live_regs (&live_regs_mask);
8348 regs_saved += SHMEDIA_REGS_STACK_ADJUST ();
8350 if (shmedia_reserve_space_for_target_registers_p (regs_saved, &live_regs_mask))
8352 shmedia_space_reserved_for_target_registers = true;
8353 regs_saved += shmedia_target_regs_stack_adjust (&live_regs_mask);
8356 if (TARGET_SH5 && regs_saved % (STACK_BOUNDARY / BITS_PER_UNIT))
8357 regs_saved_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
8358 - regs_saved % (STACK_BOUNDARY / BITS_PER_UNIT));
8360 total_auto_space = rounded_frame_size (regs_saved) - regs_saved_rounding;
8361 copy_flags = target_flags;
8362 target_flags = save_flags;
8364 total_saved_regs_space = regs_saved + regs_saved_rounding;
8366 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
8367 return total_saved_regs_space + total_auto_space
8368 + crtl->args.info.byref_regs * 8;
8370 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
8371 return total_saved_regs_space + total_auto_space
8372 + crtl->args.info.byref_regs * 8;
8374 /* Initial gap between fp and sp is 0. */
8375 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
8378 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
8379 return rounded_frame_size (0);
8381 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
8382 return rounded_frame_size (0);
8384 gcc_assert (from == RETURN_ADDRESS_POINTER_REGNUM
8385 && (to == HARD_FRAME_POINTER_REGNUM
8386 || to == STACK_POINTER_REGNUM));
8389 int n = total_saved_regs_space;
8390 int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
8391 save_schedule schedule;
8394 n += total_auto_space;
8396 /* If it wasn't saved, there's not much we can do. */
8397 if (! TEST_HARD_REG_BIT (live_regs_mask, pr_reg))
8400 target_flags = copy_flags;
8402 sh5_schedule_saves (&live_regs_mask, &schedule, n);
8403 for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
8404 if (entry->reg == pr_reg)
8406 target_flags = save_flags;
8407 return entry->offset;
8412 return total_auto_space;
8415 /* Parse the -mfixed-range= option string. */
8417 sh_fix_range (const char *const_str)
8420 char *str, *dash, *comma;
8422 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
8423 REG2 are either register names or register numbers. The effect
8424 of this option is to mark the registers in the range from REG1 to
8425 REG2 as ``fixed'' so they won't be used by the compiler. */
8427 i = strlen (const_str);
8428 str = (char *) alloca (i + 1);
8429 memcpy (str, const_str, i + 1);
8433 dash = strchr (str, '-');
8436 warning (0, "value of -mfixed-range must have form REG1-REG2");
8440 comma = strchr (dash + 1, ',');
8444 first = decode_reg_name (str);
8447 warning (0, "unknown register name: %s", str);
8451 last = decode_reg_name (dash + 1);
8454 warning (0, "unknown register name: %s", dash + 1);
8462 warning (0, "%s-%s is an empty range", str, dash + 1);
8466 for (i = first; i <= last; ++i)
8467 fixed_regs[i] = call_used_regs[i] = 1;
8477 /* Insert any deferred function attributes from earlier pragmas. */
8479 sh_insert_attributes (tree node, tree *attributes)
8483 if (TREE_CODE (node) != FUNCTION_DECL)
8486 /* We are only interested in fields. */
8490 /* Append the attributes to the deferred attributes. */
8491 *sh_deferred_function_attributes_tail = *attributes;
8492 attrs = sh_deferred_function_attributes;
8496 /* Some attributes imply or require the interrupt attribute. */
8497 if (!lookup_attribute ("interrupt_handler", attrs)
8498 && !lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (node)))
8500 /* If we have a trapa_handler, but no interrupt_handler attribute,
8501 insert an interrupt_handler attribute. */
8502 if (lookup_attribute ("trapa_handler", attrs) != NULL_TREE)
8503 /* We can't use sh_pr_interrupt here because that's not in the
8506 = tree_cons (get_identifier("interrupt_handler"), NULL_TREE, attrs);
8507 /* However, for sp_switch, trap_exit, nosave_low_regs and resbank,
8508 if the interrupt attribute is missing, we ignore the attribute
8510 else if (lookup_attribute ("sp_switch", attrs)
8511 || lookup_attribute ("trap_exit", attrs)
8512 || lookup_attribute ("nosave_low_regs", attrs)
8513 || lookup_attribute ("resbank", attrs))
8517 for (tail = attributes; attrs; attrs = TREE_CHAIN (attrs))
8519 if (is_attribute_p ("sp_switch", TREE_PURPOSE (attrs))
8520 || is_attribute_p ("trap_exit", TREE_PURPOSE (attrs))
8521 || is_attribute_p ("nosave_low_regs", TREE_PURPOSE (attrs))
8522 || is_attribute_p ("resbank", TREE_PURPOSE (attrs)))
8523 warning (OPT_Wattributes,
8524 "%qE attribute only applies to interrupt functions",
8525 TREE_PURPOSE (attrs));
8528 *tail = tree_cons (TREE_PURPOSE (attrs), NULL_TREE,
8530 tail = &TREE_CHAIN (*tail);
8533 attrs = *attributes;
8537 /* Install the processed list. */
8538 *attributes = attrs;
8540 /* Clear deferred attributes. */
8541 sh_deferred_function_attributes = NULL_TREE;
8542 sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
8547 /* Supported attributes:
8549 interrupt_handler -- specifies this function is an interrupt handler.
8551 trapa_handler - like above, but don't save all registers.
8553 sp_switch -- specifies an alternate stack for an interrupt handler
8556 trap_exit -- use a trapa to exit an interrupt function instead of
8559 nosave_low_regs - don't save r0..r7 in an interrupt handler.
8560 This is useful on the SH3 and upwards,
8561 which has a separate set of low regs for User and Supervisor modes.
8562 This should only be used for the lowest level of interrupts. Higher levels
8563 of interrupts must save the registers in case they themselves are
8566 renesas -- use Renesas calling/layout conventions (functions and
8569 resbank -- In case of an ISR, use a register bank to save registers
8570 R0-R14, MACH, MACL, GBR and PR. This is useful only on SH2A targets.
8573 /* Handle a 'resbank' attribute. */
8575 sh_handle_resbank_handler_attribute (tree * node, tree name,
8576 tree args ATTRIBUTE_UNUSED,
8577 int flags ATTRIBUTE_UNUSED,
8578 bool * no_add_attrs)
8582 warning (OPT_Wattributes, "%qE attribute is supported only for SH2A",
8584 *no_add_attrs = true;
8586 if (TREE_CODE (*node) != FUNCTION_DECL)
8588 warning (OPT_Wattributes, "%qE attribute only applies to functions",
8590 *no_add_attrs = true;
8596 /* Handle an "interrupt_handler" attribute; arguments as in
8597 struct attribute_spec.handler. */
8599 sh_handle_interrupt_handler_attribute (tree *node, tree name,
8600 tree args ATTRIBUTE_UNUSED,
8601 int flags ATTRIBUTE_UNUSED,
8604 if (TREE_CODE (*node) != FUNCTION_DECL)
8606 warning (OPT_Wattributes, "%qE attribute only applies to functions",
8608 *no_add_attrs = true;
8610 else if (TARGET_SHCOMPACT)
8612 error ("attribute interrupt_handler is not compatible with -m5-compact");
8613 *no_add_attrs = true;
8619 /* Handle an 'function_vector' attribute; arguments as in
8620 struct attribute_spec.handler. */
8622 sh2a_handle_function_vector_handler_attribute (tree * node, tree name,
8623 tree args ATTRIBUTE_UNUSED,
8624 int flags ATTRIBUTE_UNUSED,
8625 bool * no_add_attrs)
8629 warning (OPT_Wattributes, "%qE attribute only applies to SH2A",
8631 *no_add_attrs = true;
8633 else if (TREE_CODE (*node) != FUNCTION_DECL)
8635 warning (OPT_Wattributes, "%qE attribute only applies to functions",
8637 *no_add_attrs = true;
8639 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
8641 /* The argument must be a constant integer. */
8642 warning (OPT_Wattributes,
8643 "%qE attribute argument not an integer constant",
8645 *no_add_attrs = true;
8647 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
8649 /* The argument value must be between 0 to 255. */
8650 warning (OPT_Wattributes,
8651 "%qE attribute argument should be between 0 to 255",
8653 *no_add_attrs = true;
8658 /* Returns 1 if current function has been assigned the attribute
8659 'function_vector'. */
8661 sh2a_is_function_vector_call (rtx x)
8663 if (GET_CODE (x) == SYMBOL_REF
8664 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
8666 tree tr = SYMBOL_REF_DECL (x);
8668 if (sh2a_function_vector_p (tr))
8675 /* Returns the function vector number, if the the attribute
8676 'function_vector' is assigned, otherwise returns zero. */
8678 sh2a_get_function_vector_number (rtx x)
8683 if ((GET_CODE (x) == SYMBOL_REF)
8684 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
8686 t = SYMBOL_REF_DECL (x);
8688 if (TREE_CODE (t) != FUNCTION_DECL)
8691 list = SH_ATTRIBUTES (t);
8694 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
8696 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
8700 list = TREE_CHAIN (list);
8709 /* Handle an "sp_switch" attribute; arguments as in
8710 struct attribute_spec.handler. */
8712 sh_handle_sp_switch_attribute (tree *node, tree name, tree args,
8713 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
8715 if (TREE_CODE (*node) != FUNCTION_DECL)
8717 warning (OPT_Wattributes, "%qE attribute only applies to functions",
8719 *no_add_attrs = true;
8721 else if (TREE_CODE (TREE_VALUE (args)) != STRING_CST)
8723 /* The argument must be a constant string. */
8724 warning (OPT_Wattributes, "%qE attribute argument not a string constant",
8726 *no_add_attrs = true;
8732 /* Handle an "trap_exit" attribute; arguments as in
8733 struct attribute_spec.handler. */
8735 sh_handle_trap_exit_attribute (tree *node, tree name, tree args,
8736 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
8738 if (TREE_CODE (*node) != FUNCTION_DECL)
8740 warning (OPT_Wattributes, "%qE attribute only applies to functions",
8742 *no_add_attrs = true;
8744 /* The argument specifies a trap number to be used in a trapa instruction
8745 at function exit (instead of an rte instruction). */
8746 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
8748 /* The argument must be a constant integer. */
8749 warning (OPT_Wattributes, "%qE attribute argument not an "
8750 "integer constant", name);
8751 *no_add_attrs = true;
8758 sh_handle_renesas_attribute (tree *node ATTRIBUTE_UNUSED,
8759 tree name ATTRIBUTE_UNUSED,
8760 tree args ATTRIBUTE_UNUSED,
8761 int flags ATTRIBUTE_UNUSED,
8762 bool *no_add_attrs ATTRIBUTE_UNUSED)
8767 /* True if __attribute__((renesas)) or -mrenesas. */
8769 sh_attr_renesas_p (const_tree td)
8776 td = TREE_TYPE (td);
8777 if (td == error_mark_node)
8779 return (lookup_attribute ("renesas", TYPE_ATTRIBUTES (td))
8783 /* True if __attribute__((renesas)) or -mrenesas, for the current
8786 sh_cfun_attr_renesas_p (void)
8788 return sh_attr_renesas_p (current_function_decl);
8792 sh_cfun_interrupt_handler_p (void)
8794 return (lookup_attribute ("interrupt_handler",
8795 DECL_ATTRIBUTES (current_function_decl))
8799 /* Returns 1 if FUNC has been assigned the attribute
8800 "function_vector". */
8802 sh2a_function_vector_p (tree func)
8805 if (TREE_CODE (func) != FUNCTION_DECL)
8808 list = SH_ATTRIBUTES (func);
8811 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
8814 list = TREE_CHAIN (list);
8819 /* Returns TRUE if given tree has the "resbank" attribute. */
8822 sh_cfun_resbank_handler_p (void)
8824 return ((lookup_attribute ("resbank",
8825 DECL_ATTRIBUTES (current_function_decl))
8827 && (lookup_attribute ("interrupt_handler",
8828 DECL_ATTRIBUTES (current_function_decl))
8829 != NULL_TREE) && TARGET_SH2A);
8832 /* Implement TARGET_CHECK_PCH_TARGET_FLAGS. */
8835 sh_check_pch_target_flags (int old_flags)
8837 if ((old_flags ^ target_flags) & (MASK_SH1 | MASK_SH2 | MASK_SH3
8838 | MASK_SH_E | MASK_HARD_SH4
8839 | MASK_FPU_SINGLE | MASK_SH4))
8840 return _("created and used with different architectures / ABIs");
8841 if ((old_flags ^ target_flags) & MASK_HITACHI)
8842 return _("created and used with different ABIs");
8843 if ((old_flags ^ target_flags) & MASK_LITTLE_ENDIAN)
8844 return _("created and used with different endianness");
8848 /* Predicates used by the templates. */
8850 /* Returns 1 if OP is MACL, MACH or PR. The input must be a REG rtx.
8851 Used only in general_movsrc_operand. */
8854 system_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8866 /* Nonzero if OP is a floating point value with value 0.0. */
8869 fp_zero_operand (rtx op)
8873 if (GET_MODE (op) != SFmode)
8876 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
8877 return REAL_VALUES_EQUAL (r, dconst0) && ! REAL_VALUE_MINUS_ZERO (r);
8880 /* Nonzero if OP is a floating point value with value 1.0. */
8883 fp_one_operand (rtx op)
8887 if (GET_MODE (op) != SFmode)
8890 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
8891 return REAL_VALUES_EQUAL (r, dconst1);
8894 /* In general mode switching is used. If we are
8895 compiling without -mfmovd, movsf_ie isn't taken into account for
8896 mode switching. We could check in machine_dependent_reorg for
8897 cases where we know we are in single precision mode, but there is
8898 interface to find that out during reload, so we must avoid
8899 choosing an fldi alternative during reload and thus failing to
8900 allocate a scratch register for the constant loading. */
8908 tertiary_reload_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8910 enum rtx_code code = GET_CODE (op);
8911 return code == MEM || (TARGET_SH4 && code == CONST_DOUBLE);
8914 /* Return the TLS type for TLS symbols, 0 for otherwise. */
8916 tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8918 if (GET_CODE (op) != SYMBOL_REF)
8919 return TLS_MODEL_NONE;
8920 return SYMBOL_REF_TLS_MODEL (op);
8923 /* Return the destination address of a branch. */
8926 branch_dest (rtx branch)
8928 rtx dest = SET_SRC (PATTERN (branch));
8931 if (GET_CODE (dest) == IF_THEN_ELSE)
8932 dest = XEXP (dest, 1);
8933 dest = XEXP (dest, 0);
8934 dest_uid = INSN_UID (dest);
8935 return INSN_ADDRESSES (dest_uid);
8938 /* Return nonzero if REG is not used after INSN.
8939 We assume REG is a reload reg, and therefore does
8940 not live past labels. It may live past calls or jumps though. */
8942 reg_unused_after (rtx reg, rtx insn)
8947 /* If the reg is set by this instruction, then it is safe for our
8948 case. Disregard the case where this is a store to memory, since
8949 we are checking a register used in the store address. */
8950 set = single_set (insn);
8951 if (set && !MEM_P (SET_DEST (set))
8952 && reg_overlap_mentioned_p (reg, SET_DEST (set)))
8955 while ((insn = NEXT_INSN (insn)))
8961 code = GET_CODE (insn);
8964 /* If this is a label that existed before reload, then the register
8965 if dead here. However, if this is a label added by reorg, then
8966 the register may still be live here. We can't tell the difference,
8967 so we just ignore labels completely. */
8968 if (code == CODE_LABEL)
8973 if (code == JUMP_INSN)
8976 /* If this is a sequence, we must handle them all at once.
8977 We could have for instance a call that sets the target register,
8978 and an insn in a delay slot that uses the register. In this case,
8979 we must return 0. */
8980 else if (code == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
8985 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8987 rtx this_insn = XVECEXP (PATTERN (insn), 0, i);
8988 rtx set = single_set (this_insn);
8990 if (CALL_P (this_insn))
8992 else if (JUMP_P (this_insn))
8994 if (INSN_ANNULLED_BRANCH_P (this_insn))
8999 if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
9001 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
9003 if (!MEM_P (SET_DEST (set)))
9009 && reg_overlap_mentioned_p (reg, PATTERN (this_insn)))
9014 else if (code == JUMP_INSN)
9018 set = single_set (insn);
9019 if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
9021 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
9022 return !MEM_P (SET_DEST (set));
9023 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
9026 if (code == CALL_INSN && call_really_used_regs[REGNO (reg)])
9034 static GTY(()) rtx fpscr_rtx;
9036 get_fpscr_rtx (void)
9040 fpscr_rtx = gen_rtx_REG (PSImode, FPSCR_REG);
9041 REG_USERVAR_P (fpscr_rtx) = 1;
9042 mark_user_reg (fpscr_rtx);
9044 if (! reload_completed || mdep_reorg_phase != SH_AFTER_MDEP_REORG)
9045 mark_user_reg (fpscr_rtx);
9049 static GTY(()) tree fpscr_values;
9052 emit_fpu_switch (rtx scratch, int index)
9056 if (fpscr_values == NULL)
9060 t = build_index_type (integer_one_node);
9061 t = build_array_type (integer_type_node, t);
9062 t = build_decl (BUILTINS_LOCATION,
9063 VAR_DECL, get_identifier ("__fpscr_values"), t);
9064 DECL_ARTIFICIAL (t) = 1;
9065 DECL_IGNORED_P (t) = 1;
9066 DECL_EXTERNAL (t) = 1;
9067 TREE_STATIC (t) = 1;
9068 TREE_PUBLIC (t) = 1;
9074 src = DECL_RTL (fpscr_values);
9075 if (!can_create_pseudo_p ())
9077 emit_move_insn (scratch, XEXP (src, 0));
9079 emit_insn (gen_addsi3 (scratch, scratch, GEN_INT (index * 4)));
9080 src = adjust_automodify_address (src, PSImode, scratch, index * 4);
9083 src = adjust_address (src, PSImode, index * 4);
9085 dst = get_fpscr_rtx ();
9086 emit_move_insn (dst, src);
9090 emit_sf_insn (rtx pat)
9096 emit_df_insn (rtx pat)
9102 expand_sf_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
9104 emit_sf_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
9108 expand_sf_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
9110 emit_sf_insn ((*fun) (operands[0], operands[1], operands[2],
9115 expand_df_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
9117 emit_df_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
9121 expand_df_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
9123 emit_df_insn ((*fun) (operands[0], operands[1], operands[2],
9127 static rtx get_free_reg (HARD_REG_SET);
9129 /* This function returns a register to use to load the address to load
9130 the fpscr from. Currently it always returns r1 or r7, but when we are
9131 able to use pseudo registers after combine, or have a better mechanism
9132 for choosing a register, it should be done here. */
9133 /* REGS_LIVE is the liveness information for the point for which we
9134 need this allocation. In some bare-bones exit blocks, r1 is live at the
9135 start. We can even have all of r0..r3 being live:
9136 __complex__ long long f (double d) { if (d == 0) return 2; else return 3; }
9137 INSN before which new insns are placed with will clobber the register
9138 we return. If a basic block consists only of setting the return value
9139 register to a pseudo and using that register, the return value is not
9140 live before or after this block, yet we we'll insert our insns right in
9144 get_free_reg (HARD_REG_SET regs_live)
9146 if (! TEST_HARD_REG_BIT (regs_live, 1))
9147 return gen_rtx_REG (Pmode, 1);
9149 /* Hard reg 1 is live; since this is a SMALL_REGISTER_CLASSES target,
9150 there shouldn't be anything but a jump before the function end. */
9151 gcc_assert (!TEST_HARD_REG_BIT (regs_live, 7));
9152 return gen_rtx_REG (Pmode, 7);
9155 /* This function will set the fpscr from memory.
9156 MODE is the mode we are setting it to. */
9158 fpscr_set_from_mem (int mode, HARD_REG_SET regs_live)
9160 enum attr_fp_mode fp_mode = (enum attr_fp_mode) mode;
9161 enum attr_fp_mode norm_mode = ACTUAL_NORMAL_MODE (FP_MODE);
9164 addr_reg = !can_create_pseudo_p () ? get_free_reg (regs_live) : NULL_RTX;
9165 emit_fpu_switch (addr_reg, fp_mode == norm_mode);
9168 /* Is the given character a logical line separator for the assembler? */
9169 #ifndef IS_ASM_LOGICAL_LINE_SEPARATOR
9170 #define IS_ASM_LOGICAL_LINE_SEPARATOR(C, STR) ((C) == ';')
9174 sh_insn_length_adjustment (rtx insn)
9176 /* Instructions with unfilled delay slots take up an extra two bytes for
9177 the nop in the delay slot. */
9178 if (((NONJUMP_INSN_P (insn)
9179 && GET_CODE (PATTERN (insn)) != USE
9180 && GET_CODE (PATTERN (insn)) != CLOBBER)
9183 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
9184 && GET_CODE (PATTERN (insn)) != ADDR_VEC))
9185 && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE
9186 && get_attr_needs_delay_slot (insn) == NEEDS_DELAY_SLOT_YES)
9189 /* SH2e has a bug that prevents the use of annulled branches, so if
9190 the delay slot is not filled, we'll have to put a NOP in it. */
9191 if (sh_cpu_attr == CPU_SH2E
9193 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
9194 && GET_CODE (PATTERN (insn)) != ADDR_VEC
9195 && get_attr_type (insn) == TYPE_CBRANCH
9196 && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE)
9199 /* sh-dsp parallel processing insn take four bytes instead of two. */
9201 if (NONJUMP_INSN_P (insn))
9204 rtx body = PATTERN (insn);
9207 int maybe_label = 1;
9209 if (GET_CODE (body) == ASM_INPUT)
9210 templ = XSTR (body, 0);
9211 else if (asm_noperands (body) >= 0)
9213 = decode_asm_operands (body, NULL, NULL, NULL, NULL, NULL);
9222 while (c == ' ' || c == '\t');
9223 /* all sh-dsp parallel-processing insns start with p.
9224 The only non-ppi sh insn starting with p is pref.
9225 The only ppi starting with pr is prnd. */
9226 if ((c == 'p' || c == 'P') && strncasecmp ("re", templ, 2))
9228 /* The repeat pseudo-insn expands two three insns, a total of
9229 six bytes in size. */
9230 else if ((c == 'r' || c == 'R')
9231 && ! strncasecmp ("epeat", templ, 5))
9233 while (c && c != '\n'
9234 && ! IS_ASM_LOGICAL_LINE_SEPARATOR (c, templ))
9236 /* If this is a label, it is obviously not a ppi insn. */
9237 if (c == ':' && maybe_label)
9242 else if (c == '\'' || c == '"')
9247 maybe_label = c != ':';
9255 /* Return TRUE for a valid displacement for the REG+disp addressing
9258 /* ??? The SH2e does not have the REG+disp addressing mode when loading values
9259 into the FRx registers. We implement this by setting the maximum offset
9260 to zero when the value is SFmode. This also restricts loading of SFmode
9261 values into the integer registers, but that can't be helped. */
9263 /* The SH allows a displacement in a QI or HI amode, but only when the
9264 other operand is R0. GCC doesn't handle this very well, so we forgot
9267 A legitimate index for a QI or HI is 0, SI can be any number 0..63,
9268 DI can be any number 0..60. */
9271 sh_legitimate_index_p (enum machine_mode mode, rtx op)
9273 if (CONST_INT_P (op))
9279 /* Check if this the address of an unaligned load / store. */
9280 if (mode == VOIDmode)
9281 return CONST_OK_FOR_I06 (INTVAL (op));
9283 size = GET_MODE_SIZE (mode);
9284 return (!(INTVAL (op) & (size - 1))
9285 && INTVAL (op) >= -512 * size
9286 && INTVAL (op) < 512 * size);
9291 if (GET_MODE_SIZE (mode) == 1
9292 && (unsigned) INTVAL (op) < 4096)
9296 if ((GET_MODE_SIZE (mode) == 4
9297 && (unsigned) INTVAL (op) < 64
9298 && !(INTVAL (op) & 3)
9299 && !(TARGET_SH2E && mode == SFmode))
9300 || (GET_MODE_SIZE (mode) == 4
9301 && (unsigned) INTVAL (op) < 16383
9302 && !(INTVAL (op) & 3) && TARGET_SH2A))
9305 if ((GET_MODE_SIZE (mode) == 8
9306 && (unsigned) INTVAL (op) < 60
9307 && !(INTVAL (op) & 3)
9308 && !((TARGET_SH4 || TARGET_SH2A) && mode == DFmode))
9309 || ((GET_MODE_SIZE (mode)==8)
9310 && (unsigned) INTVAL (op) < 8192
9311 && !(INTVAL (op) & (TARGET_SH2A_DOUBLE ? 7 : 3))
9312 && (TARGET_SH2A && mode == DFmode)))
9319 /* Recognize an RTL expression that is a valid memory address for
9321 The MODE argument is the machine mode for the MEM expression
9322 that wants to use this address.
9330 sh_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
9332 if (MAYBE_BASE_REGISTER_RTX_P (x, strict))
9334 else if ((GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_DEC)
9336 && MAYBE_BASE_REGISTER_RTX_P (XEXP (x, 0), strict))
9338 else if (GET_CODE (x) == PLUS
9339 && (mode != PSImode || reload_completed))
9341 rtx xop0 = XEXP (x, 0);
9342 rtx xop1 = XEXP (x, 1);
9344 if (GET_MODE_SIZE (mode) <= 8
9345 && MAYBE_BASE_REGISTER_RTX_P (xop0, strict)
9346 && sh_legitimate_index_p (mode, xop1))
9349 if ((ALLOW_INDEXED_ADDRESS || GET_MODE (x) == DImode
9350 || ((xop0 == stack_pointer_rtx
9351 || xop0 == hard_frame_pointer_rtx)
9352 && REG_P (xop1) && REGNO (xop1) == R0_REG)
9353 || ((xop1 == stack_pointer_rtx
9354 || xop1 == hard_frame_pointer_rtx)
9355 && REG_P (xop0) && REGNO (xop0) == R0_REG))
9356 && ((!TARGET_SHMEDIA && GET_MODE_SIZE (mode) <= 4)
9357 || (TARGET_SHMEDIA && GET_MODE_SIZE (mode) <= 8)
9358 || ((TARGET_SH4 || TARGET_SH2A_DOUBLE)
9359 && TARGET_FMOVD && mode == DFmode)))
9361 if (MAYBE_BASE_REGISTER_RTX_P (xop1, strict)
9362 && MAYBE_INDEX_REGISTER_RTX_P (xop0, strict))
9364 if (MAYBE_INDEX_REGISTER_RTX_P (xop1, strict)
9365 && MAYBE_BASE_REGISTER_RTX_P (xop0, strict))
9373 /* Return TRUE if X references a SYMBOL_REF or LABEL_REF whose symbol
9374 isn't protected by a PIC unspec. */
9376 nonpic_symbol_mentioned_p (rtx x)
9378 register const char *fmt;
9381 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF
9382 || GET_CODE (x) == PC)
9385 /* We don't want to look into the possible MEM location of a
9386 CONST_DOUBLE, since we're not going to use it, in general. */
9387 if (GET_CODE (x) == CONST_DOUBLE)
9390 if (GET_CODE (x) == UNSPEC
9391 && (XINT (x, 1) == UNSPEC_PIC
9392 || XINT (x, 1) == UNSPEC_GOT
9393 || XINT (x, 1) == UNSPEC_GOTOFF
9394 || XINT (x, 1) == UNSPEC_GOTPLT
9395 || XINT (x, 1) == UNSPEC_GOTTPOFF
9396 || XINT (x, 1) == UNSPEC_DTPOFF
9397 || XINT (x, 1) == UNSPEC_PLT
9398 || XINT (x, 1) == UNSPEC_SYMOFF
9399 || XINT (x, 1) == UNSPEC_PCREL_SYMOFF))
9402 fmt = GET_RTX_FORMAT (GET_CODE (x));
9403 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9409 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9410 if (nonpic_symbol_mentioned_p (XVECEXP (x, i, j)))
9413 else if (fmt[i] == 'e' && nonpic_symbol_mentioned_p (XEXP (x, i)))
9420 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
9421 @GOTOFF in `reg'. */
9423 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
9426 if (tls_symbolic_operand (orig, Pmode) != TLS_MODEL_NONE)
9429 if (GET_CODE (orig) == LABEL_REF
9430 || (GET_CODE (orig) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (orig)))
9433 reg = gen_reg_rtx (Pmode);
9435 emit_insn (gen_symGOTOFF2reg (reg, orig));
9438 else if (GET_CODE (orig) == SYMBOL_REF)
9441 reg = gen_reg_rtx (Pmode);
9443 emit_insn (gen_symGOT2reg (reg, orig));
9449 /* Try machine-dependent ways of modifying an illegitimate address
9450 to be legitimate. If we find one, return the new, valid address.
9451 Otherwise, return X.
9453 For the SH, if X is almost suitable for indexing, but the offset is
9454 out of range, convert it into a normal form so that CSE has a chance
9455 of reducing the number of address registers used. */
9458 sh_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
9461 x = legitimize_pic_address (oldx, mode, NULL_RTX);
9463 if (GET_CODE (x) == PLUS
9464 && (GET_MODE_SIZE (mode) == 4
9465 || GET_MODE_SIZE (mode) == 8)
9466 && CONST_INT_P (XEXP (x, 1))
9467 && BASE_REGISTER_RTX_P (XEXP (x, 0))
9469 && ! ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && mode == DFmode)
9470 && ! (TARGET_SH2E && mode == SFmode))
9472 rtx index_rtx = XEXP (x, 1);
9473 HOST_WIDE_INT offset = INTVAL (index_rtx), offset_base;
9476 /* On rare occasions, we might get an unaligned pointer
9477 that is indexed in a way to give an aligned address.
9478 Therefore, keep the lower two bits in offset_base. */
9479 /* Instead of offset_base 128..131 use 124..127, so that
9480 simple add suffices. */
9482 offset_base = ((offset + 4) & ~60) - 4;
9484 offset_base = offset & ~60;
9486 /* Sometimes the normal form does not suit DImode. We
9487 could avoid that by using smaller ranges, but that
9488 would give less optimized code when SImode is
9490 if (GET_MODE_SIZE (mode) + offset - offset_base <= 64)
9492 sum = expand_binop (Pmode, add_optab, XEXP (x, 0),
9493 GEN_INT (offset_base), NULL_RTX, 0,
9496 return gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - offset_base));
9503 /* Mark the use of a constant in the literal table. If the constant
9504 has multiple labels, make it unique. */
9506 mark_constant_pool_use (rtx x)
9508 rtx insn, lab, pattern;
9513 switch (GET_CODE (x))
9523 /* Get the first label in the list of labels for the same constant
9524 and delete another labels in the list. */
9526 for (insn = PREV_INSN (x); insn; insn = PREV_INSN (insn))
9529 || LABEL_REFS (insn) != NEXT_INSN (insn))
9534 for (insn = LABEL_REFS (lab); insn; insn = LABEL_REFS (insn))
9535 INSN_DELETED_P (insn) = 1;
9537 /* Mark constants in a window. */
9538 for (insn = NEXT_INSN (x); insn; insn = NEXT_INSN (insn))
9540 if (!NONJUMP_INSN_P (insn))
9543 pattern = PATTERN (insn);
9544 if (GET_CODE (pattern) != UNSPEC_VOLATILE)
9547 switch (XINT (pattern, 1))
9549 case UNSPECV_CONST2:
9550 case UNSPECV_CONST4:
9551 case UNSPECV_CONST8:
9552 XVECEXP (pattern, 0, 1) = const1_rtx;
9554 case UNSPECV_WINDOW_END:
9555 if (XVECEXP (pattern, 0, 0) == x)
9558 case UNSPECV_CONST_END:
9568 /* Return true if it's possible to redirect BRANCH1 to the destination
9569 of an unconditional jump BRANCH2. We only want to do this if the
9570 resulting branch will have a short displacement. */
9572 sh_can_redirect_branch (rtx branch1, rtx branch2)
9574 if (flag_expensive_optimizations && simplejump_p (branch2))
9576 rtx dest = XEXP (SET_SRC (single_set (branch2)), 0);
9580 for (distance = 0, insn = NEXT_INSN (branch1);
9581 insn && distance < 256;
9582 insn = PREV_INSN (insn))
9587 distance += get_attr_length (insn);
9589 for (distance = 0, insn = NEXT_INSN (branch1);
9590 insn && distance < 256;
9591 insn = NEXT_INSN (insn))
9596 distance += get_attr_length (insn);
9602 /* Return nonzero if register old_reg can be renamed to register new_reg. */
9604 sh_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
9605 unsigned int new_reg)
9607 /* Interrupt functions can only use registers that have already been
9608 saved by the prologue, even if they would normally be
9611 if (sh_cfun_interrupt_handler_p () && !df_regs_ever_live_p (new_reg))
9617 /* Function to update the integer COST
9618 based on the relationship between INSN that is dependent on
9619 DEP_INSN through the dependence LINK. The default is to make no
9620 adjustment to COST. This can be used for example to specify to
9621 the scheduler that an output- or anti-dependence does not incur
9622 the same cost as a data-dependence. The return value should be
9623 the new value for COST. */
9625 sh_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx dep_insn, int cost)
9631 /* On SHmedia, if the dependence is an anti-dependence or
9632 output-dependence, there is no cost. */
9633 if (REG_NOTE_KIND (link) != 0)
9635 /* However, dependencies between target register loads and
9636 uses of the register in a subsequent block that are separated
9637 by a conditional branch are not modelled - we have to do with
9638 the anti-dependency between the target register load and the
9639 conditional branch that ends the current block. */
9640 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
9641 && GET_CODE (PATTERN (dep_insn)) == SET
9642 && (get_attr_type (dep_insn) == TYPE_PT_MEDIA
9643 || get_attr_type (dep_insn) == TYPE_PTABS_MEDIA)
9644 && get_attr_type (insn) == TYPE_CBRANCH_MEDIA)
9646 int orig_cost = cost;
9647 rtx note = find_reg_note (insn, REG_BR_PROB, 0);
9648 rtx target = ((! note
9649 || INTVAL (XEXP (note, 0)) * 2 < REG_BR_PROB_BASE)
9650 ? insn : JUMP_LABEL (insn));
9651 /* On the likely path, the branch costs 1, on the unlikely path,
9655 target = next_active_insn (target);
9656 while (target && ! flow_dependent_p (target, dep_insn)
9658 /* If two branches are executed in immediate succession, with the
9659 first branch properly predicted, this causes a stall at the
9660 second branch, hence we won't need the target for the
9661 second branch for two cycles after the launch of the first
9663 if (cost > orig_cost - 2)
9664 cost = orig_cost - 2;
9670 else if (get_attr_is_mac_media (insn)
9671 && get_attr_is_mac_media (dep_insn))
9674 else if (! reload_completed
9675 && GET_CODE (PATTERN (insn)) == SET
9676 && GET_CODE (SET_SRC (PATTERN (insn))) == FLOAT
9677 && GET_CODE (PATTERN (dep_insn)) == SET
9678 && fp_arith_reg_operand (SET_SRC (PATTERN (dep_insn)), VOIDmode)
9681 /* Schedule the ptabs for a casesi_jump_media in preference to stuff
9682 that is needed at the target. */
9683 else if (get_attr_type (insn) == TYPE_JUMP_MEDIA
9684 && ! flow_dependent_p (insn, dep_insn))
9687 else if (REG_NOTE_KIND (link) == 0)
9689 enum attr_type type;
9692 if (recog_memoized (insn) < 0
9693 || recog_memoized (dep_insn) < 0)
9696 dep_set = single_set (dep_insn);
9698 /* The latency that we specify in the scheduling description refers
9699 to the actual output, not to an auto-increment register; for that,
9700 the latency is one. */
9701 if (dep_set && MEM_P (SET_SRC (dep_set)) && cost > 1)
9703 rtx set = single_set (insn);
9706 && !reg_mentioned_p (SET_DEST (dep_set), SET_SRC (set))
9707 && (!MEM_P (SET_DEST (set))
9708 || !reg_mentioned_p (SET_DEST (dep_set),
9709 XEXP (SET_DEST (set), 0))))
9712 /* The only input for a call that is timing-critical is the
9713 function's address. */
9716 rtx call = PATTERN (insn);
9718 if (GET_CODE (call) == PARALLEL)
9719 call = XVECEXP (call, 0 ,0);
9720 if (GET_CODE (call) == SET)
9721 call = SET_SRC (call);
9722 if (GET_CODE (call) == CALL && MEM_P (XEXP (call, 0))
9723 /* sibcalli_thunk uses a symbol_ref in an unspec. */
9724 && (GET_CODE (XEXP (XEXP (call, 0), 0)) == UNSPEC
9725 || ! reg_set_p (XEXP (XEXP (call, 0), 0), dep_insn)))
9726 cost -= TARGET_SH4_300 ? 3 : 6;
9728 /* Likewise, the most timing critical input for an sfuncs call
9729 is the function address. However, sfuncs typically start
9730 using their arguments pretty quickly.
9731 Assume a four cycle delay for SH4 before they are needed.
9732 Cached ST40-300 calls are quicker, so assume only a one
9734 ??? Maybe we should encode the delays till input registers
9735 are needed by sfuncs into the sfunc call insn. */
9736 /* All sfunc calls are parallels with at least four components.
9737 Exploit this to avoid unnecessary calls to sfunc_uses_reg. */
9738 else if (GET_CODE (PATTERN (insn)) == PARALLEL
9739 && XVECLEN (PATTERN (insn), 0) >= 4
9740 && (reg = sfunc_uses_reg (insn)))
9742 if (! reg_set_p (reg, dep_insn))
9743 cost -= TARGET_SH4_300 ? 1 : 4;
9745 if (TARGET_HARD_SH4 && !TARGET_SH4_300)
9747 enum attr_type dep_type = get_attr_type (dep_insn);
9749 if (dep_type == TYPE_FLOAD || dep_type == TYPE_PCFLOAD)
9751 else if ((dep_type == TYPE_LOAD_SI || dep_type == TYPE_PCLOAD_SI)
9752 && (type = get_attr_type (insn)) != TYPE_CALL
9753 && type != TYPE_SFUNC)
9755 /* When the preceding instruction loads the shift amount of
9756 the following SHAD/SHLD, the latency of the load is increased
9758 if (get_attr_type (insn) == TYPE_DYN_SHIFT
9759 && get_attr_any_int_load (dep_insn) == ANY_INT_LOAD_YES
9760 && reg_overlap_mentioned_p (SET_DEST (dep_set),
9761 XEXP (SET_SRC (single_set (insn)),
9764 /* When an LS group instruction with a latency of less than
9765 3 cycles is followed by a double-precision floating-point
9766 instruction, FIPR, or FTRV, the latency of the first
9767 instruction is increased to 3 cycles. */
9769 && get_attr_insn_class (dep_insn) == INSN_CLASS_LS_GROUP
9770 && get_attr_dfp_comp (insn) == DFP_COMP_YES)
9772 /* The lsw register of a double-precision computation is ready one
9774 else if (reload_completed
9775 && get_attr_dfp_comp (dep_insn) == DFP_COMP_YES
9776 && (use_pat = single_set (insn))
9777 && ! regno_use_in (REGNO (SET_DEST (single_set (dep_insn))),
9781 if (get_attr_any_fp_comp (dep_insn) == ANY_FP_COMP_YES
9782 && get_attr_late_fp_use (insn) == LATE_FP_USE_YES)
9785 else if (TARGET_SH4_300)
9787 /* Stores need their input register two cycles later. */
9788 if (dep_set && cost >= 1
9789 && ((type = get_attr_type (insn)) == TYPE_STORE
9790 || type == TYPE_PSTORE
9791 || type == TYPE_FSTORE || type == TYPE_MAC_MEM))
9793 rtx set = single_set (insn);
9795 if (!reg_mentioned_p (SET_SRC (set), XEXP (SET_DEST (set), 0))
9796 && rtx_equal_p (SET_SRC (set), SET_DEST (dep_set)))
9799 /* But don't reduce the cost below 1 if the address depends
9800 on a side effect of dep_insn. */
9802 && modified_in_p (XEXP (SET_DEST (set), 0), dep_insn))
9808 /* An anti-dependence penalty of two applies if the first insn is a double
9809 precision fadd / fsub / fmul. */
9810 else if (!TARGET_SH4_300
9811 && REG_NOTE_KIND (link) == REG_DEP_ANTI
9812 && recog_memoized (dep_insn) >= 0
9813 && (get_attr_type (dep_insn) == TYPE_DFP_ARITH
9814 || get_attr_type (dep_insn) == TYPE_DFP_MUL)
9815 /* A lot of alleged anti-flow dependences are fake,
9816 so check this one is real. */
9817 && flow_dependent_p (dep_insn, insn))
9823 /* Check if INSN is flow-dependent on DEP_INSN. Can also be used to check
9824 if DEP_INSN is anti-flow dependent on INSN. */
9826 flow_dependent_p (rtx insn, rtx dep_insn)
9828 rtx tmp = PATTERN (insn);
9830 note_stores (PATTERN (dep_insn), flow_dependent_p_1, &tmp);
9831 return tmp == NULL_RTX;
9834 /* A helper function for flow_dependent_p called through note_stores. */
9836 flow_dependent_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
9838 rtx * pinsn = (rtx *) data;
9840 if (*pinsn && reg_referenced_p (x, *pinsn))
9844 /* For use by sh_allocate_initial_value. Note that sh.md contains some
9845 'special function' patterns (type sfunc) that clobber pr, but that
9846 do not look like function calls to leaf_function_p. Hence we must
9847 do this extra check. */
9851 return DF_REG_DEF_COUNT (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
9854 /* Return where to allocate pseudo for a given hard register initial
9857 sh_allocate_initial_value (rtx hard_reg)
9861 if (REGNO (hard_reg) == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG))
9863 if (current_function_is_leaf
9864 && ! sh_pr_n_sets ()
9865 && ! (TARGET_SHCOMPACT
9866 && ((crtl->args.info.call_cookie
9867 & ~ CALL_COOKIE_RET_TRAMP (1))
9868 || crtl->saves_all_registers)))
9871 x = gen_frame_mem (Pmode, return_address_pointer_rtx);
9879 /* This function returns "2" to indicate dual issue for the SH4
9880 processor. To be used by the DFA pipeline description. */
9882 sh_issue_rate (void)
9884 if (TARGET_SUPERSCALAR)
9890 /* Functions for ready queue reordering for sched1. */
9892 /* Get weight for mode for a set x. */
9894 find_set_regmode_weight (rtx x, enum machine_mode mode)
9896 if (GET_CODE (x) == CLOBBER && register_operand (SET_DEST (x), mode))
9898 if (GET_CODE (x) == SET && register_operand (SET_DEST (x), mode))
9900 if (REG_P (SET_DEST (x)))
9902 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
9912 /* Get regmode weight for insn. */
9914 find_insn_regmode_weight (rtx insn, enum machine_mode mode)
9916 short reg_weight = 0;
9919 /* Increment weight for each register born here. */
9921 reg_weight += find_set_regmode_weight (x, mode);
9922 if (GET_CODE (x) == PARALLEL)
9925 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
9927 x = XVECEXP (PATTERN (insn), 0, j);
9928 reg_weight += find_set_regmode_weight (x, mode);
9931 /* Decrement weight for each register that dies here. */
9932 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
9934 if (REG_NOTE_KIND (x) == REG_DEAD || REG_NOTE_KIND (x) == REG_UNUSED)
9936 rtx note = XEXP (x, 0);
9937 if (REG_P (note) && GET_MODE (note) == mode)
9944 /* Calculate regmode weights for all insns of a basic block. */
9946 find_regmode_weight (basic_block b, enum machine_mode mode)
9948 rtx insn, next_tail, head, tail;
9950 get_ebb_head_tail (b, b, &head, &tail);
9951 next_tail = NEXT_INSN (tail);
9953 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
9955 /* Handle register life information. */
9960 INSN_REGMODE_WEIGHT (insn, mode) =
9961 find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DFmode);
9962 else if (mode == SImode)
9963 INSN_REGMODE_WEIGHT (insn, mode) =
9964 find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DImode);
9968 /* Comparison function for ready queue sorting. */
9970 rank_for_reorder (const void *x, const void *y)
9972 rtx tmp = *(const rtx *) y;
9973 rtx tmp2 = *(const rtx *) x;
9975 /* The insn in a schedule group should be issued the first. */
9976 if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
9977 return SCHED_GROUP_P (tmp2) ? 1 : -1;
9979 /* If insns are equally good, sort by INSN_LUID (original insn order), This
9980 minimizes instruction movement, thus minimizing sched's effect on
9981 register pressure. */
9982 return INSN_LUID (tmp) - INSN_LUID (tmp2);
9985 /* Resort the array A in which only element at index N may be out of order. */
9987 swap_reorder (rtx *a, int n)
9989 rtx insn = a[n - 1];
9992 while (i >= 0 && rank_for_reorder (a + i, &insn) >= 0)
10000 #define SCHED_REORDER(READY, N_READY) \
10003 if ((N_READY) == 2) \
10004 swap_reorder (READY, N_READY); \
10005 else if ((N_READY) > 2) \
10006 qsort (READY, N_READY, sizeof (rtx), rank_for_reorder); \
10010 /* Sort the ready list READY by ascending priority, using the SCHED_REORDER
10013 ready_reorder (rtx *ready, int nready)
10015 SCHED_REORDER (ready, nready);
10018 /* Count life regions of r0 for a block. */
10020 find_r0_life_regions (basic_block b)
10029 if (REGNO_REG_SET_P (df_get_live_in (b), R0_REG))
10040 insn = BB_HEAD (b);
10042 r0_reg = gen_rtx_REG (SImode, R0_REG);
10047 if (find_regno_note (insn, REG_DEAD, R0_REG))
10053 && (pset = single_set (insn))
10054 && reg_overlap_mentioned_p (r0_reg, SET_DEST (pset))
10055 && !find_regno_note (insn, REG_UNUSED, R0_REG))
10063 insn = NEXT_INSN (insn);
10065 return set - death;
10068 /* Calculate regmode weights for all insns of all basic block. */
10070 sh_md_init_global (FILE *dump ATTRIBUTE_UNUSED,
10071 int verbose ATTRIBUTE_UNUSED,
10076 regmode_weight[0] = (short *) xcalloc (old_max_uid, sizeof (short));
10077 regmode_weight[1] = (short *) xcalloc (old_max_uid, sizeof (short));
10078 r0_life_regions = 0;
10080 FOR_EACH_BB_REVERSE (b)
10082 find_regmode_weight (b, SImode);
10083 find_regmode_weight (b, SFmode);
10084 if (!reload_completed)
10085 r0_life_regions += find_r0_life_regions (b);
10088 CURR_REGMODE_PRESSURE (SImode) = 0;
10089 CURR_REGMODE_PRESSURE (SFmode) = 0;
10095 sh_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
10096 int verbose ATTRIBUTE_UNUSED)
10098 if (regmode_weight[0])
10100 free (regmode_weight[0]);
10101 regmode_weight[0] = NULL;
10103 if (regmode_weight[1])
10105 free (regmode_weight[1]);
10106 regmode_weight[1] = NULL;
10110 /* The scalar modes supported differs from the default version in TImode
10111 for 32-bit SHMEDIA. */
10113 sh_scalar_mode_supported_p (enum machine_mode mode)
10115 if (TARGET_SHMEDIA32 && mode == TImode)
10118 return default_scalar_mode_supported_p (mode);
10121 /* Cache the can_issue_more so that we can return it from reorder2. Also,
10122 keep count of register pressures on SImode and SFmode. */
10124 sh_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
10125 int sched_verbose ATTRIBUTE_UNUSED,
10127 int can_issue_more)
10129 if (GET_CODE (PATTERN (insn)) != USE
10130 && GET_CODE (PATTERN (insn)) != CLOBBER)
10131 cached_can_issue_more = can_issue_more - 1;
10133 cached_can_issue_more = can_issue_more;
10135 if (reload_completed)
10136 return cached_can_issue_more;
10138 CURR_REGMODE_PRESSURE (SImode) += INSN_REGMODE_WEIGHT (insn, SImode);
10139 CURR_REGMODE_PRESSURE (SFmode) += INSN_REGMODE_WEIGHT (insn, SFmode);
10141 return cached_can_issue_more;
10145 sh_md_init (FILE *dump ATTRIBUTE_UNUSED,
10146 int verbose ATTRIBUTE_UNUSED,
10147 int veclen ATTRIBUTE_UNUSED)
10149 CURR_REGMODE_PRESSURE (SImode) = 0;
10150 CURR_REGMODE_PRESSURE (SFmode) = 0;
10153 /* Some magic numbers. */
10154 /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
10155 functions that already have high pressure on r0. */
10156 #define R0_MAX_LIFE_REGIONS 2
10157 /* Register Pressure thresholds for SImode and SFmode registers. */
10158 #define SIMODE_MAX_WEIGHT 5
10159 #define SFMODE_MAX_WEIGHT 10
10161 /* Return true if the pressure is high for MODE. */
10163 high_pressure (enum machine_mode mode)
10165 /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
10166 functions that already have high pressure on r0. */
10167 if (r0_life_regions >= R0_MAX_LIFE_REGIONS)
10170 if (mode == SFmode)
10171 return (CURR_REGMODE_PRESSURE (SFmode) > SFMODE_MAX_WEIGHT);
10173 return (CURR_REGMODE_PRESSURE (SImode) > SIMODE_MAX_WEIGHT);
10176 /* Reorder ready queue if register pressure is high. */
10178 sh_reorder (FILE *dump ATTRIBUTE_UNUSED,
10179 int sched_verbose ATTRIBUTE_UNUSED,
10182 int clock_var ATTRIBUTE_UNUSED)
10184 if (reload_completed)
10185 return sh_issue_rate ();
10187 if (high_pressure (SFmode) || high_pressure (SImode))
10189 ready_reorder (ready, *n_readyp);
10192 return sh_issue_rate ();
10195 /* Skip cycles if the current register pressure is high. */
10197 sh_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
10198 int sched_verbose ATTRIBUTE_UNUSED,
10199 rtx *ready ATTRIBUTE_UNUSED,
10200 int *n_readyp ATTRIBUTE_UNUSED,
10201 int clock_var ATTRIBUTE_UNUSED)
10203 if (reload_completed)
10204 return cached_can_issue_more;
10206 if (high_pressure(SFmode) || high_pressure (SImode))
10209 return cached_can_issue_more;
10212 /* Skip cycles without sorting the ready queue. This will move insn from
10213 Q->R. If this is the last cycle we are skipping; allow sorting of ready
10214 queue by sh_reorder. */
10216 /* Generally, skipping these many cycles are sufficient for all insns to move
10218 #define MAX_SKIPS 8
10221 sh_dfa_new_cycle (FILE *sched_dump ATTRIBUTE_UNUSED,
10222 int sched_verbose ATTRIBUTE_UNUSED,
10223 rtx insn ATTRIBUTE_UNUSED,
10224 int last_clock_var,
10228 if (reload_completed)
10233 if ((clock_var - last_clock_var) < MAX_SKIPS)
10238 /* If this is the last cycle we are skipping, allow reordering of R. */
10239 if ((clock_var - last_clock_var) == MAX_SKIPS)
10251 /* SHmedia requires registers for branches, so we can't generate new
10252 branches past reload. */
10254 sh_cannot_modify_jumps_p (void)
10256 return (TARGET_SHMEDIA && (reload_in_progress || reload_completed));
10259 static enum reg_class
10260 sh_target_reg_class (void)
10262 return TARGET_SHMEDIA ? TARGET_REGS : NO_REGS;
10266 sh_optimize_target_register_callee_saved (bool after_prologue_epilogue_gen)
10268 HARD_REG_SET dummy;
10273 if (! shmedia_space_reserved_for_target_registers)
10275 if (after_prologue_epilogue_gen && ! TARGET_SAVE_ALL_TARGET_REGS)
10277 if (calc_live_regs (&dummy) >= 6 * 8)
10283 sh_ms_bitfield_layout_p (const_tree record_type ATTRIBUTE_UNUSED)
10285 return (TARGET_SH5 || TARGET_HITACHI || sh_attr_renesas_p (record_type));
10289 On the SH1..SH4, the trampoline looks like
10290 2 0002 D202 mov.l l2,r2
10291 1 0000 D301 mov.l l1,r3
10292 3 0004 422B jmp @r2
10294 5 0008 00000000 l1: .long area
10295 6 000c 00000000 l2: .long function
10297 SH5 (compact) uses r1 instead of r3 for the static chain. */
10300 /* Emit RTL insns to initialize the variable parts of a trampoline.
10301 FNADDR is an RTX for the address of the function's pure code.
10302 CXT is an RTX for the static chain value for the function. */
10305 sh_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
10307 rtx tramp_mem = gen_frame_mem (BLKmode, tramp);
10309 if (TARGET_SHMEDIA64)
10314 rtx movi1 = GEN_INT (0xcc000010);
10315 rtx shori1 = GEN_INT (0xc8000010);
10318 /* The following trampoline works within a +- 128 KB range for cxt:
10319 ptb/u cxt,tr1; movi fnaddr >> 48,r0; shori fnaddr >> 32,r0;
10320 shori fnaddr >> 16,r0; shori fnaddr,r0; ptabs/l r0,tr0
10321 gettr tr1,r1; blink tr0,r63 */
10322 /* Address rounding makes it hard to compute the exact bounds of the
10323 offset for this trampoline, but we have a rather generous offset
10324 range, so frame_offset should do fine as an upper bound. */
10325 if (cxt == virtual_stack_vars_rtx && frame_offset < 0x20000)
10327 /* ??? could optimize this trampoline initialization
10328 by writing DImode words with two insns each. */
10329 rtx mask = force_reg (DImode, GEN_INT (0x3fffc00));
10330 rtx insn = gen_rtx_MINUS (DImode, cxt, tramp);
10331 insn = gen_rtx_ASHIFT (DImode, insn, GEN_INT (10-2));
10332 insn = gen_rtx_AND (DImode, insn, mask);
10333 /* Or in ptb/u .,tr1 pattern */
10334 insn = gen_rtx_IOR (DImode, insn, gen_int_mode (0xec000010, SImode));
10335 insn = force_operand (insn, NULL_RTX);
10336 insn = gen_lowpart (SImode, insn);
10337 emit_move_insn (change_address (tramp_mem, SImode, NULL_RTX), insn);
10338 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (38));
10339 insn = gen_rtx_AND (DImode, insn, mask);
10340 insn = force_operand (gen_rtx_IOR (DImode, movi1, insn), NULL_RTX);
10341 insn = gen_lowpart (SImode, insn);
10342 emit_move_insn (adjust_address (tramp_mem, SImode, 4), insn);
10343 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (22));
10344 insn = gen_rtx_AND (DImode, insn, mask);
10345 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
10346 insn = gen_lowpart (SImode, insn);
10347 emit_move_insn (adjust_address (tramp_mem, SImode, 8), insn);
10348 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (6));
10349 insn = gen_rtx_AND (DImode, insn, mask);
10350 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
10351 insn = gen_lowpart (SImode, insn);
10352 emit_move_insn (adjust_address (tramp_mem, SImode, 12), insn);
10353 insn = gen_rtx_ASHIFT (DImode, fnaddr, GEN_INT (10));
10354 insn = gen_rtx_AND (DImode, insn, mask);
10355 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
10356 insn = gen_lowpart (SImode, insn);
10357 emit_move_insn (adjust_address (tramp_mem, SImode, 16), insn);
10358 emit_move_insn (adjust_address (tramp_mem, SImode, 20),
10359 GEN_INT (0x6bf10600));
10360 emit_move_insn (adjust_address (tramp_mem, SImode, 24),
10361 GEN_INT (0x4415fc10));
10362 emit_move_insn (adjust_address (tramp_mem, SImode, 28),
10363 GEN_INT (0x4401fff0));
10364 emit_insn (gen_ic_invalidate_line (tramp));
10367 tramp_templ = gen_rtx_SYMBOL_REF (Pmode,"__GCC_nested_trampoline");
10368 fixed_len = TRAMPOLINE_SIZE - 2 * GET_MODE_SIZE (Pmode);
10370 tramp_templ = gen_datalabel_ref (tramp_templ);
10372 src = gen_const_mem (BLKmode, tramp_templ);
10373 set_mem_align (dst, 256);
10374 set_mem_align (src, 64);
10375 emit_block_move (dst, src, GEN_INT (fixed_len), BLOCK_OP_NORMAL);
10377 emit_move_insn (adjust_address (tramp_mem, Pmode, fixed_len), fnaddr);
10378 emit_move_insn (adjust_address (tramp_mem, Pmode,
10379 fixed_len + GET_MODE_SIZE (Pmode)),
10381 emit_insn (gen_ic_invalidate_line (tramp));
10384 else if (TARGET_SHMEDIA)
10386 /* movi fnaddr >> 16,r1; shori fnaddr,r1; ptabs/l r1,tr0
10387 movi cxt >> 16,r1; shori cxt,r1; blink tr0,r63 */
10388 rtx quad0 = gen_reg_rtx (DImode), cxtload = gen_reg_rtx (DImode);
10389 rtx quad1 = gen_reg_rtx (DImode), quad2 = gen_reg_rtx (DImode);
10390 /* movi 0,r1: 0xcc000010 shori 0,r1: c8000010 concatenated,
10391 rotated 10 right, and higher 16 bit of every 32 selected. */
10393 = force_reg (V2HImode, (simplify_gen_subreg
10394 (V2HImode, GEN_INT (0x4330432), SImode, 0)));
10395 rtx ptabs = force_reg (DImode, GEN_INT (0x6bf10600));
10396 rtx blink = force_reg (DImode, GEN_INT (0x4401fff0));
10398 tramp = force_reg (Pmode, tramp);
10399 fnaddr = force_reg (SImode, fnaddr);
10400 cxt = force_reg (SImode, cxt);
10401 emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, quad0, 0),
10402 gen_rtx_SUBREG (V2HImode, fnaddr, 0),
10404 emit_insn (gen_rotrdi3_mextr (quad0, quad0,
10405 GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
10406 emit_insn (gen_ashldi3_media (quad0, quad0, const2_rtx));
10407 emit_move_insn (change_address (tramp_mem, DImode, NULL_RTX), quad0);
10408 emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, cxtload, 0),
10409 gen_rtx_SUBREG (V2HImode, cxt, 0),
10411 emit_insn (gen_rotrdi3_mextr (cxtload, cxtload,
10412 GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
10413 emit_insn (gen_ashldi3_media (cxtload, cxtload, const2_rtx));
10414 if (TARGET_LITTLE_ENDIAN)
10416 emit_insn (gen_mshflo_l_di (quad1, ptabs, cxtload));
10417 emit_insn (gen_mextr4 (quad2, cxtload, blink));
10421 emit_insn (gen_mextr4 (quad1, cxtload, ptabs));
10422 emit_insn (gen_mshflo_l_di (quad2, blink, cxtload));
10424 emit_move_insn (adjust_address (tramp_mem, DImode, 8), quad1);
10425 emit_move_insn (adjust_address (tramp_mem, DImode, 16), quad2);
10426 emit_insn (gen_ic_invalidate_line (tramp));
10429 else if (TARGET_SHCOMPACT)
10431 emit_insn (gen_initialize_trampoline (tramp, cxt, fnaddr));
10434 emit_move_insn (change_address (tramp_mem, SImode, NULL_RTX),
10435 gen_int_mode (TARGET_LITTLE_ENDIAN ? 0xd301d202 : 0xd202d301,
10437 emit_move_insn (adjust_address (tramp_mem, SImode, 4),
10438 gen_int_mode (TARGET_LITTLE_ENDIAN ? 0x0009422b : 0x422b0009,
10440 emit_move_insn (adjust_address (tramp_mem, SImode, 8), cxt);
10441 emit_move_insn (adjust_address (tramp_mem, SImode, 12), fnaddr);
10442 if (TARGET_HARVARD)
10444 if (!TARGET_INLINE_IC_INVALIDATE
10445 || (!(TARGET_SH4A_ARCH || TARGET_SH4_300) && TARGET_USERMODE))
10446 emit_library_call (function_symbol (NULL, "__ic_invalidate",
10447 FUNCTION_ORDINARY),
10448 LCT_NORMAL, VOIDmode, 1, tramp, SImode);
10450 emit_insn (gen_ic_invalidate_line (tramp));
10454 /* FIXME: This is overly conservative. A SHcompact function that
10455 receives arguments ``by reference'' will have them stored in its
10456 own stack frame, so it must not pass pointers or references to
10457 these arguments to other functions by means of sibling calls. */
10458 /* If PIC, we cannot make sibling calls to global functions
10459 because the PLT requires r12 to be live. */
10461 sh_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
10464 && (! TARGET_SHCOMPACT
10465 || crtl->args.info.stack_regs == 0)
10466 && ! sh_cfun_interrupt_handler_p ()
10468 || (decl && ! TREE_PUBLIC (decl))
10469 || (decl && DECL_VISIBILITY (decl) != VISIBILITY_DEFAULT)));
10472 /* Machine specific built-in functions. */
10474 struct builtin_description
10476 const enum insn_code icode;
10477 const char *const name;
10481 /* describe number and signedness of arguments; arg[0] == result
10482 (1: unsigned, 2: signed, 4: don't care, 8: pointer 0: no argument */
10483 /* 9: 64-bit pointer, 10: 32-bit pointer */
10484 static const char signature_args[][4] =
10486 #define SH_BLTIN_V2SI2 0
10488 #define SH_BLTIN_V4HI2 1
10490 #define SH_BLTIN_V2SI3 2
10492 #define SH_BLTIN_V4HI3 3
10494 #define SH_BLTIN_V8QI3 4
10496 #define SH_BLTIN_MAC_HISI 5
10498 #define SH_BLTIN_SH_HI 6
10500 #define SH_BLTIN_SH_SI 7
10502 #define SH_BLTIN_V4HI2V2SI 8
10504 #define SH_BLTIN_V4HI2V8QI 9
10506 #define SH_BLTIN_SISF 10
10508 #define SH_BLTIN_LDUA_L 11
10510 #define SH_BLTIN_LDUA_Q 12
10512 #define SH_BLTIN_STUA_L 13
10514 #define SH_BLTIN_STUA_Q 14
10516 #define SH_BLTIN_LDUA_L64 15
10518 #define SH_BLTIN_LDUA_Q64 16
10520 #define SH_BLTIN_STUA_L64 17
10522 #define SH_BLTIN_STUA_Q64 18
10524 #define SH_BLTIN_NUM_SHARED_SIGNATURES 19
10525 #define SH_BLTIN_2 19
10526 #define SH_BLTIN_SU 19
10528 #define SH_BLTIN_3 20
10529 #define SH_BLTIN_SUS 20
10531 #define SH_BLTIN_PSSV 21
10533 #define SH_BLTIN_XXUU 22
10534 #define SH_BLTIN_UUUU 22
10536 #define SH_BLTIN_PV 23
10539 /* mcmv: operands considered unsigned. */
10540 /* mmulsum_wq, msad_ubq: result considered unsigned long long. */
10541 /* mperm: control value considered unsigned int. */
10542 /* mshalds, mshard, mshards, mshlld, mshlrd: shift count is unsigned int. */
10543 /* mshards_q: returns signed short. */
10544 /* nsb: takes long long arg, returns unsigned char. */
10545 static const struct builtin_description bdesc[] =
10547 { CODE_FOR_absv2si2, "__builtin_absv2si2", SH_BLTIN_V2SI2 },
10548 { CODE_FOR_absv4hi2, "__builtin_absv4hi2", SH_BLTIN_V4HI2 },
10549 { CODE_FOR_addv2si3, "__builtin_addv2si3", SH_BLTIN_V2SI3 },
10550 { CODE_FOR_addv4hi3, "__builtin_addv4hi3", SH_BLTIN_V4HI3 },
10551 { CODE_FOR_ssaddv2si3,"__builtin_ssaddv2si3", SH_BLTIN_V2SI3 },
10552 { CODE_FOR_usaddv8qi3,"__builtin_usaddv8qi3", SH_BLTIN_V8QI3 },
10553 { CODE_FOR_ssaddv4hi3,"__builtin_ssaddv4hi3", SH_BLTIN_V4HI3 },
10554 { CODE_FOR_alloco_i, "__builtin_sh_media_ALLOCO", SH_BLTIN_PV },
10555 { CODE_FOR_negcmpeqv8qi,"__builtin_sh_media_MCMPEQ_B", SH_BLTIN_V8QI3 },
10556 { CODE_FOR_negcmpeqv2si,"__builtin_sh_media_MCMPEQ_L", SH_BLTIN_V2SI3 },
10557 { CODE_FOR_negcmpeqv4hi,"__builtin_sh_media_MCMPEQ_W", SH_BLTIN_V4HI3 },
10558 { CODE_FOR_negcmpgtuv8qi,"__builtin_sh_media_MCMPGT_UB", SH_BLTIN_V8QI3 },
10559 { CODE_FOR_negcmpgtv2si,"__builtin_sh_media_MCMPGT_L", SH_BLTIN_V2SI3 },
10560 { CODE_FOR_negcmpgtv4hi,"__builtin_sh_media_MCMPGT_W", SH_BLTIN_V4HI3 },
10561 { CODE_FOR_mcmv, "__builtin_sh_media_MCMV", SH_BLTIN_UUUU },
10562 { CODE_FOR_mcnvs_lw, "__builtin_sh_media_MCNVS_LW", SH_BLTIN_3 },
10563 { CODE_FOR_mcnvs_wb, "__builtin_sh_media_MCNVS_WB", SH_BLTIN_V4HI2V8QI },
10564 { CODE_FOR_mcnvs_wub, "__builtin_sh_media_MCNVS_WUB", SH_BLTIN_V4HI2V8QI },
10565 { CODE_FOR_mextr1, "__builtin_sh_media_MEXTR1", SH_BLTIN_V8QI3 },
10566 { CODE_FOR_mextr2, "__builtin_sh_media_MEXTR2", SH_BLTIN_V8QI3 },
10567 { CODE_FOR_mextr3, "__builtin_sh_media_MEXTR3", SH_BLTIN_V8QI3 },
10568 { CODE_FOR_mextr4, "__builtin_sh_media_MEXTR4", SH_BLTIN_V8QI3 },
10569 { CODE_FOR_mextr5, "__builtin_sh_media_MEXTR5", SH_BLTIN_V8QI3 },
10570 { CODE_FOR_mextr6, "__builtin_sh_media_MEXTR6", SH_BLTIN_V8QI3 },
10571 { CODE_FOR_mextr7, "__builtin_sh_media_MEXTR7", SH_BLTIN_V8QI3 },
10572 { CODE_FOR_mmacfx_wl, "__builtin_sh_media_MMACFX_WL", SH_BLTIN_MAC_HISI },
10573 { CODE_FOR_mmacnfx_wl,"__builtin_sh_media_MMACNFX_WL", SH_BLTIN_MAC_HISI },
10574 { CODE_FOR_mulv2si3, "__builtin_mulv2si3", SH_BLTIN_V2SI3, },
10575 { CODE_FOR_mulv4hi3, "__builtin_mulv4hi3", SH_BLTIN_V4HI3 },
10576 { CODE_FOR_mmulfx_l, "__builtin_sh_media_MMULFX_L", SH_BLTIN_V2SI3 },
10577 { CODE_FOR_mmulfx_w, "__builtin_sh_media_MMULFX_W", SH_BLTIN_V4HI3 },
10578 { CODE_FOR_mmulfxrp_w,"__builtin_sh_media_MMULFXRP_W", SH_BLTIN_V4HI3 },
10579 { CODE_FOR_mmulhi_wl, "__builtin_sh_media_MMULHI_WL", SH_BLTIN_V4HI2V2SI },
10580 { CODE_FOR_mmullo_wl, "__builtin_sh_media_MMULLO_WL", SH_BLTIN_V4HI2V2SI },
10581 { CODE_FOR_mmulsum_wq,"__builtin_sh_media_MMULSUM_WQ", SH_BLTIN_XXUU },
10582 { CODE_FOR_mperm_w, "__builtin_sh_media_MPERM_W", SH_BLTIN_SH_HI },
10583 { CODE_FOR_msad_ubq, "__builtin_sh_media_MSAD_UBQ", SH_BLTIN_XXUU },
10584 { CODE_FOR_mshalds_l, "__builtin_sh_media_MSHALDS_L", SH_BLTIN_SH_SI },
10585 { CODE_FOR_mshalds_w, "__builtin_sh_media_MSHALDS_W", SH_BLTIN_SH_HI },
10586 { CODE_FOR_ashrv2si3, "__builtin_ashrv2si3", SH_BLTIN_SH_SI },
10587 { CODE_FOR_ashrv4hi3, "__builtin_ashrv4hi3", SH_BLTIN_SH_HI },
10588 { CODE_FOR_mshards_q, "__builtin_sh_media_MSHARDS_Q", SH_BLTIN_SUS },
10589 { CODE_FOR_mshfhi_b, "__builtin_sh_media_MSHFHI_B", SH_BLTIN_V8QI3 },
10590 { CODE_FOR_mshfhi_l, "__builtin_sh_media_MSHFHI_L", SH_BLTIN_V2SI3 },
10591 { CODE_FOR_mshfhi_w, "__builtin_sh_media_MSHFHI_W", SH_BLTIN_V4HI3 },
10592 { CODE_FOR_mshflo_b, "__builtin_sh_media_MSHFLO_B", SH_BLTIN_V8QI3 },
10593 { CODE_FOR_mshflo_l, "__builtin_sh_media_MSHFLO_L", SH_BLTIN_V2SI3 },
10594 { CODE_FOR_mshflo_w, "__builtin_sh_media_MSHFLO_W", SH_BLTIN_V4HI3 },
10595 { CODE_FOR_ashlv2si3, "__builtin_ashlv2si3", SH_BLTIN_SH_SI },
10596 { CODE_FOR_ashlv4hi3, "__builtin_ashlv4hi3", SH_BLTIN_SH_HI },
10597 { CODE_FOR_lshrv2si3, "__builtin_lshrv2si3", SH_BLTIN_SH_SI },
10598 { CODE_FOR_lshrv4hi3, "__builtin_lshrv4hi3", SH_BLTIN_SH_HI },
10599 { CODE_FOR_subv2si3, "__builtin_subv2si3", SH_BLTIN_V2SI3 },
10600 { CODE_FOR_subv4hi3, "__builtin_subv4hi3", SH_BLTIN_V4HI3 },
10601 { CODE_FOR_sssubv2si3,"__builtin_sssubv2si3", SH_BLTIN_V2SI3 },
10602 { CODE_FOR_ussubv8qi3,"__builtin_ussubv8qi3", SH_BLTIN_V8QI3 },
10603 { CODE_FOR_sssubv4hi3,"__builtin_sssubv4hi3", SH_BLTIN_V4HI3 },
10604 { CODE_FOR_fcosa_s, "__builtin_sh_media_FCOSA_S", SH_BLTIN_SISF },
10605 { CODE_FOR_fsina_s, "__builtin_sh_media_FSINA_S", SH_BLTIN_SISF },
10606 { CODE_FOR_fipr, "__builtin_sh_media_FIPR_S", SH_BLTIN_3 },
10607 { CODE_FOR_ftrv, "__builtin_sh_media_FTRV_S", SH_BLTIN_3 },
10608 { CODE_FOR_mac_media, "__builtin_sh_media_FMAC_S", SH_BLTIN_3 },
10609 { CODE_FOR_sqrtdf2, "__builtin_sh_media_FSQRT_D", SH_BLTIN_2 },
10610 { CODE_FOR_sqrtsf2, "__builtin_sh_media_FSQRT_S", SH_BLTIN_2 },
10611 { CODE_FOR_fsrra_s, "__builtin_sh_media_FSRRA_S", SH_BLTIN_2 },
10612 { CODE_FOR_ldhi_l, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L },
10613 { CODE_FOR_ldhi_q, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q },
10614 { CODE_FOR_ldlo_l, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L },
10615 { CODE_FOR_ldlo_q, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q },
10616 { CODE_FOR_sthi_l, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L },
10617 { CODE_FOR_sthi_q, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q },
10618 { CODE_FOR_stlo_l, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L },
10619 { CODE_FOR_stlo_q, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q },
10620 { CODE_FOR_ldhi_l64, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L64 },
10621 { CODE_FOR_ldhi_q64, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q64 },
10622 { CODE_FOR_ldlo_l64, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L64 },
10623 { CODE_FOR_ldlo_q64, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q64 },
10624 { CODE_FOR_sthi_l64, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L64 },
10625 { CODE_FOR_sthi_q64, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q64 },
10626 { CODE_FOR_stlo_l64, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L64 },
10627 { CODE_FOR_stlo_q64, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q64 },
10628 { CODE_FOR_nsb, "__builtin_sh_media_NSB", SH_BLTIN_SU },
10629 { CODE_FOR_byterev, "__builtin_sh_media_BYTEREV", SH_BLTIN_2 },
10630 { CODE_FOR_prefetch, "__builtin_sh_media_PREFO", SH_BLTIN_PSSV },
10634 sh_media_init_builtins (void)
10636 tree shared[SH_BLTIN_NUM_SHARED_SIGNATURES];
10637 const struct builtin_description *d;
10639 memset (shared, 0, sizeof shared);
10640 for (d = bdesc; d - bdesc < (int) ARRAY_SIZE (bdesc); d++)
10642 tree type, arg_type = 0;
10643 int signature = d->signature;
10646 if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES && shared[signature])
10647 type = shared[signature];
10650 int has_result = signature_args[signature][0] != 0;
10652 if ((signature_args[signature][1] & 8)
10653 && (((signature_args[signature][1] & 1) && TARGET_SHMEDIA32)
10654 || ((signature_args[signature][1] & 2) && TARGET_SHMEDIA64)))
10656 if (! TARGET_FPU_ANY
10657 && FLOAT_MODE_P (insn_data[d->icode].operand[0].mode))
10659 type = void_list_node;
10662 int arg = signature_args[signature][i];
10663 int opno = i - 1 + has_result;
10666 arg_type = ptr_type_node;
10668 arg_type = (*lang_hooks.types.type_for_mode)
10669 (insn_data[d->icode].operand[opno].mode,
10674 arg_type = void_type_node;
10677 type = tree_cons (NULL_TREE, arg_type, type);
10679 type = build_function_type (arg_type, type);
10680 if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES)
10681 shared[signature] = type;
10683 add_builtin_function (d->name, type, d - bdesc, BUILT_IN_MD,
10688 /* Implements target hook vector_mode_supported_p. */
10690 sh_vector_mode_supported_p (enum machine_mode mode)
10693 && ((mode == V2SFmode)
10694 || (mode == V4SFmode)
10695 || (mode == V16SFmode)))
10698 else if (TARGET_SHMEDIA
10699 && ((mode == V8QImode)
10700 || (mode == V2HImode)
10701 || (mode == V4HImode)
10702 || (mode == V2SImode)))
10708 /* Implements target hook dwarf_calling_convention. Return an enum
10709 of dwarf_calling_convention. */
10711 sh_dwarf_calling_convention (const_tree func)
10713 if (sh_attr_renesas_p (func))
10714 return DW_CC_GNU_renesas_sh;
10716 return DW_CC_normal;
10720 sh_init_builtins (void)
10722 if (TARGET_SHMEDIA)
10723 sh_media_init_builtins ();
10726 /* Expand an expression EXP that calls a built-in function,
10727 with result going to TARGET if that's convenient
10728 (and in mode MODE if that's convenient).
10729 SUBTARGET may be used as the target for computing one of EXP's operands.
10730 IGNORE is nonzero if the value is to be ignored. */
10733 sh_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10734 enum machine_mode mode ATTRIBUTE_UNUSED, int ignore)
10736 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10737 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10738 const struct builtin_description *d = &bdesc[fcode];
10739 enum insn_code icode = d->icode;
10740 int signature = d->signature;
10741 enum machine_mode tmode = VOIDmode;
10746 if (signature_args[signature][0])
10751 tmode = insn_data[icode].operand[0].mode;
10753 || GET_MODE (target) != tmode
10754 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10755 target = gen_reg_rtx (tmode);
10756 op[nop++] = target;
10761 for (i = 1; i <= 3; i++, nop++)
10764 enum machine_mode opmode, argmode;
10767 if (! signature_args[signature][i])
10769 arg = CALL_EXPR_ARG (exp, i - 1);
10770 if (arg == error_mark_node)
10772 if (signature_args[signature][i] & 8)
10775 optype = ptr_type_node;
10779 opmode = insn_data[icode].operand[nop].mode;
10780 optype = (*lang_hooks.types.type_for_mode) (opmode, 0);
10782 argmode = TYPE_MODE (TREE_TYPE (arg));
10783 if (argmode != opmode)
10784 arg = build1 (NOP_EXPR, optype, arg);
10785 op[nop] = expand_expr (arg, NULL_RTX, opmode, EXPAND_NORMAL);
10786 if (! (*insn_data[icode].operand[nop].predicate) (op[nop], opmode))
10787 op[nop] = copy_to_mode_reg (opmode, op[nop]);
10793 pat = (*insn_data[d->icode].genfun) (op[0]);
10796 pat = (*insn_data[d->icode].genfun) (op[0], op[1]);
10799 pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2]);
10802 pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2], op[3]);
10805 gcc_unreachable ();
10814 sh_expand_unop_v2sf (enum rtx_code code, rtx op0, rtx op1)
10816 rtx sel0 = const0_rtx;
10817 rtx sel1 = const1_rtx;
10818 rtx (*fn) (rtx, rtx, rtx, rtx, rtx) = gen_unary_sf_op;
10819 rtx op = gen_rtx_fmt_e (code, SFmode, op1);
10821 emit_insn ((*fn) (op0, op1, op, sel0, sel0));
10822 emit_insn ((*fn) (op0, op1, op, sel1, sel1));
10826 sh_expand_binop_v2sf (enum rtx_code code, rtx op0, rtx op1, rtx op2)
10828 rtx op = gen_rtx_fmt_ee (code, SFmode, op1, op2);
10830 emit_insn (gen_binary_sf_op0 (op0, op1, op2, op));
10831 emit_insn (gen_binary_sf_op1 (op0, op1, op2, op));
10834 /* Return true if hard register REGNO can hold a value of machine-mode MODE.
10835 We can allow any mode in any general register. The special registers
10836 only allow SImode. Don't allow any mode in the PR.
10838 We cannot hold DCmode values in the XD registers because alter_reg
10839 handles subregs of them incorrectly. We could work around this by
10840 spacing the XD registers like the DR registers, but this would require
10841 additional memory in every compilation to hold larger register vectors.
10842 We could hold SFmode / SCmode values in XD registers, but that
10843 would require a tertiary reload when reloading from / to memory,
10844 and a secondary reload to reload from / to general regs; that
10845 seems to be a loosing proposition.
10847 We want to allow TImode FP regs so that when V4SFmode is loaded as TImode,
10848 it won't be ferried through GP registers first. */
10851 sh_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
10853 if (SPECIAL_REGISTER_P (regno))
10854 return mode == SImode;
10856 if (regno == FPUL_REG)
10857 return (mode == SImode || mode == SFmode);
10859 if (FP_REGISTER_P (regno) && mode == SFmode)
10862 if (mode == V2SFmode)
10864 if (((FP_REGISTER_P (regno) && (regno - FIRST_FP_REG) % 2 == 0)
10865 || GENERAL_REGISTER_P (regno)))
10871 if (mode == V4SFmode)
10873 if ((FP_REGISTER_P (regno) && (regno - FIRST_FP_REG) % 4 == 0)
10874 || GENERAL_REGISTER_P (regno))
10880 if (mode == V16SFmode)
10882 if (TARGET_SHMEDIA)
10884 if (FP_REGISTER_P (regno) && (regno - FIRST_FP_REG) % 16 == 0)
10890 return regno == FIRST_XD_REG;
10893 if (FP_REGISTER_P (regno))
10897 || ((TARGET_SH2E || TARGET_SHMEDIA) && mode == SCmode)
10898 || ((((TARGET_SH4 || TARGET_SH2A_DOUBLE) && mode == DFmode)
10901 && (mode == DFmode || mode == DImode
10902 || mode == V2SFmode || mode == TImode)))
10903 && ((regno - FIRST_FP_REG) & 1) == 0)
10904 || ((TARGET_SH4 || TARGET_SHMEDIA) && mode == TImode
10905 && ((regno - FIRST_FP_REG) & 3) == 0))
10911 if (XD_REGISTER_P (regno))
10912 return mode == DFmode;
10914 if (TARGET_REGISTER_P (regno))
10915 return (mode == DImode || mode == SImode || mode == PDImode);
10917 if (regno == PR_REG)
10918 return mode == SImode;
10920 if (regno == FPSCR_REG)
10921 return mode == PSImode;
10923 /* FIXME. This works around PR target/37633 for -O0. */
10924 if (!optimize && TARGET_SHMEDIA32 && GET_MODE_SIZE (mode) > 4)
10926 unsigned int n = GET_MODE_SIZE (mode) / 8;
10928 if (regno >= FIRST_GENERAL_REG + 10 - n + 1
10929 && regno <= FIRST_GENERAL_REG + 14)
10936 /* Return the class of registers for which a mode change from FROM to TO
10939 sh_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
10940 enum reg_class rclass)
10942 /* We want to enable the use of SUBREGs as a means to
10943 VEC_SELECT a single element of a vector. */
10944 if (to == SFmode && VECTOR_MODE_P (from) && GET_MODE_INNER (from) == SFmode)
10945 return (reg_classes_intersect_p (GENERAL_REGS, rclass));
10947 if (GET_MODE_SIZE (from) != GET_MODE_SIZE (to))
10949 if (TARGET_LITTLE_ENDIAN)
10951 if (GET_MODE_SIZE (to) < 8 || GET_MODE_SIZE (from) < 8)
10952 return reg_classes_intersect_p (DF_REGS, rclass);
10956 if (GET_MODE_SIZE (from) < 8)
10957 return reg_classes_intersect_p (DF_HI_REGS, rclass);
10964 /* If ADDRESS refers to a CODE_LABEL, add NUSES to the number of times
10965 that label is used. */
10968 sh_mark_label (rtx address, int nuses)
10970 if (GOTOFF_P (address))
10972 /* Extract the label or symbol. */
10973 address = XEXP (address, 0);
10974 if (GET_CODE (address) == PLUS)
10975 address = XEXP (address, 0);
10976 address = XVECEXP (address, 0, 0);
10978 if (GET_CODE (address) == LABEL_REF
10979 && LABEL_P (XEXP (address, 0)))
10980 LABEL_NUSES (XEXP (address, 0)) += nuses;
10983 /* Compute extra cost of moving data between one register class
10986 /* If SECONDARY*_RELOAD_CLASS says something about the src/dst pair, regclass
10987 uses this information. Hence, the general register <-> floating point
10988 register information here is not used for SFmode. */
10991 sh_register_move_cost (enum machine_mode mode,
10992 enum reg_class srcclass, enum reg_class dstclass)
10994 if (dstclass == T_REGS || dstclass == PR_REGS)
10997 if (dstclass == MAC_REGS && srcclass == MAC_REGS)
11000 if (mode == SImode && ! TARGET_SHMEDIA && TARGET_FMOVD
11001 && REGCLASS_HAS_FP_REG (srcclass)
11002 && REGCLASS_HAS_FP_REG (dstclass))
11005 if (REGCLASS_HAS_FP_REG (dstclass) && srcclass == T_REGS)
11006 return ((TARGET_HARD_SH4 && !optimize_size) ? 10 : 7);
11008 if ((REGCLASS_HAS_FP_REG (dstclass) && srcclass == MAC_REGS)
11009 || (dstclass == MAC_REGS && REGCLASS_HAS_FP_REG (srcclass)))
11012 if ((REGCLASS_HAS_FP_REG (dstclass)
11013 && REGCLASS_HAS_GENERAL_REG (srcclass))
11014 || (REGCLASS_HAS_GENERAL_REG (dstclass)
11015 && REGCLASS_HAS_FP_REG (srcclass)))
11016 return ((TARGET_SHMEDIA ? 4 : TARGET_FMOVD ? 8 : 12)
11017 * ((GET_MODE_SIZE (mode) + 7) / 8U));
11019 if ((dstclass == FPUL_REGS
11020 && REGCLASS_HAS_GENERAL_REG (srcclass))
11021 || (srcclass == FPUL_REGS
11022 && REGCLASS_HAS_GENERAL_REG (dstclass)))
11025 if ((dstclass == FPUL_REGS
11026 && (srcclass == PR_REGS || srcclass == MAC_REGS || srcclass == T_REGS))
11027 || (srcclass == FPUL_REGS
11028 && (dstclass == PR_REGS || dstclass == MAC_REGS)))
11031 if ((srcclass == TARGET_REGS && ! REGCLASS_HAS_GENERAL_REG (dstclass))
11032 || ((dstclass) == TARGET_REGS && ! REGCLASS_HAS_GENERAL_REG (srcclass)))
11035 /* ??? ptabs faults on (value & 0x3) == 0x3 */
11037 && ((srcclass) == TARGET_REGS || (srcclass) == SIBCALL_REGS))
11039 if (sh_gettrcost >= 0)
11040 return sh_gettrcost;
11041 else if (!TARGET_PT_FIXED)
11045 if ((srcclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (dstclass))
11046 || (dstclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (srcclass)))
11051 && ! REGCLASS_HAS_GENERAL_REG (srcclass)
11052 && ! REGCLASS_HAS_GENERAL_REG (dstclass)))
11053 return 2 * ((GET_MODE_SIZE (mode) + 7) / 8U);
11055 return 2 * ((GET_MODE_SIZE (mode) + 3) / 4U);
11058 static rtx emit_load_ptr (rtx, rtx);
11061 emit_load_ptr (rtx reg, rtx addr)
11063 rtx mem = gen_const_mem (ptr_mode, addr);
11065 if (Pmode != ptr_mode)
11066 mem = gen_rtx_SIGN_EXTEND (Pmode, mem);
11067 return emit_move_insn (reg, mem);
11071 sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
11072 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
11075 CUMULATIVE_ARGS cum;
11076 int structure_value_byref = 0;
11077 rtx this_rtx, this_value, sibcall, insns, funexp;
11078 tree funtype = TREE_TYPE (function);
11079 int simple_add = CONST_OK_FOR_ADD (delta);
11081 rtx scratch0, scratch1, scratch2;
11084 reload_completed = 1;
11085 epilogue_completed = 1;
11086 current_function_uses_only_leaf_regs = 1;
11088 emit_note (NOTE_INSN_PROLOGUE_END);
11090 /* Find the "this" pointer. We have such a wide range of ABIs for the
11091 SH that it's best to do this completely machine independently.
11092 "this" is passed as first argument, unless a structure return pointer
11093 comes first, in which case "this" comes second. */
11094 INIT_CUMULATIVE_ARGS (cum, funtype, NULL_RTX, 0, 1);
11095 #ifndef PCC_STATIC_STRUCT_RETURN
11096 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
11097 structure_value_byref = 1;
11098 #endif /* not PCC_STATIC_STRUCT_RETURN */
11099 if (structure_value_byref && sh_struct_value_rtx (function, 0) == 0)
11101 tree ptype = build_pointer_type (TREE_TYPE (funtype));
11103 FUNCTION_ARG_ADVANCE (cum, Pmode, ptype, 1);
11105 this_rtx = FUNCTION_ARG (cum, Pmode, ptr_type_node, 1);
11107 /* For SHcompact, we only have r0 for a scratch register: r1 is the
11108 static chain pointer (even if you can't have nested virtual functions
11109 right now, someone might implement them sometime), and the rest of the
11110 registers are used for argument passing, are callee-saved, or reserved. */
11111 /* We need to check call_used_regs / fixed_regs in case -fcall_saved-reg /
11112 -ffixed-reg has been used. */
11113 if (! call_used_regs[0] || fixed_regs[0])
11114 error ("r0 needs to be available as a call-clobbered register");
11115 scratch0 = scratch1 = scratch2 = gen_rtx_REG (Pmode, 0);
11118 if (call_used_regs[1] && ! fixed_regs[1])
11119 scratch1 = gen_rtx_REG (ptr_mode, 1);
11120 /* N.B., if not TARGET_HITACHI, register 2 is used to pass the pointer
11121 pointing where to return struct values. */
11122 if (call_used_regs[3] && ! fixed_regs[3])
11123 scratch2 = gen_rtx_REG (Pmode, 3);
11125 else if (TARGET_SHMEDIA)
11127 for (i = FIRST_GENERAL_REG; i <= LAST_GENERAL_REG; i++)
11128 if (i != REGNO (scratch0) &&
11129 call_used_regs[i] && ! fixed_regs[i] && ! FUNCTION_ARG_REGNO_P (i))
11131 scratch1 = gen_rtx_REG (ptr_mode, i);
11134 if (scratch1 == scratch0)
11135 error ("Need a second call-clobbered general purpose register");
11136 for (i = FIRST_TARGET_REG; i <= LAST_TARGET_REG; i++)
11137 if (call_used_regs[i] && ! fixed_regs[i])
11139 scratch2 = gen_rtx_REG (Pmode, i);
11142 if (scratch2 == scratch0)
11143 error ("Need a call-clobbered target register");
11146 this_value = plus_constant (this_rtx, delta);
11148 && (simple_add || scratch0 != scratch1)
11149 && strict_memory_address_p (ptr_mode, this_value))
11151 emit_load_ptr (scratch0, this_value);
11156 ; /* Do nothing. */
11157 else if (simple_add)
11158 emit_move_insn (this_rtx, this_value);
11161 emit_move_insn (scratch1, GEN_INT (delta));
11162 emit_insn (gen_add2_insn (this_rtx, scratch1));
11170 emit_load_ptr (scratch0, this_rtx);
11172 offset_addr = plus_constant (scratch0, vcall_offset);
11173 if (strict_memory_address_p (ptr_mode, offset_addr))
11174 ; /* Do nothing. */
11175 else if (! TARGET_SH5 && scratch0 != scratch1)
11177 /* scratch0 != scratch1, and we have indexed loads. Get better
11178 schedule by loading the offset into r1 and using an indexed
11179 load - then the load of r1 can issue before the load from
11180 (this_rtx + delta) finishes. */
11181 emit_move_insn (scratch1, GEN_INT (vcall_offset));
11182 offset_addr = gen_rtx_PLUS (Pmode, scratch0, scratch1);
11184 else if (CONST_OK_FOR_ADD (vcall_offset))
11186 emit_insn (gen_add2_insn (scratch0, GEN_INT (vcall_offset)));
11187 offset_addr = scratch0;
11189 else if (scratch0 != scratch1)
11191 emit_move_insn (scratch1, GEN_INT (vcall_offset));
11192 emit_insn (gen_add2_insn (scratch0, scratch1));
11193 offset_addr = scratch0;
11196 gcc_unreachable (); /* FIXME */
11197 emit_load_ptr (scratch0, offset_addr);
11199 if (Pmode != ptr_mode)
11200 scratch0 = gen_rtx_TRUNCATE (ptr_mode, scratch0);
11201 emit_insn (gen_add2_insn (this_rtx, scratch0));
11204 /* Generate a tail call to the target function. */
11205 if (! TREE_USED (function))
11207 assemble_external (function);
11208 TREE_USED (function) = 1;
11210 funexp = XEXP (DECL_RTL (function), 0);
11211 /* If the function is overridden, so is the thunk, hence we don't
11212 need GOT addressing even if this is a public symbol. */
11214 if (TARGET_SH1 && ! flag_weak)
11215 sibcall = gen_sibcalli_thunk (funexp, const0_rtx);
11218 if (TARGET_SH2 && flag_pic)
11220 sibcall = gen_sibcall_pcrel (funexp, const0_rtx);
11221 XEXP (XVECEXP (sibcall, 0, 2), 0) = scratch2;
11225 if (TARGET_SHMEDIA && flag_pic)
11227 funexp = gen_sym2PIC (funexp);
11228 PUT_MODE (funexp, Pmode);
11230 emit_move_insn (scratch2, funexp);
11231 funexp = gen_rtx_MEM (FUNCTION_MODE, scratch2);
11232 sibcall = gen_sibcall (funexp, const0_rtx, NULL_RTX);
11234 sibcall = emit_call_insn (sibcall);
11235 SIBLING_CALL_P (sibcall) = 1;
11236 use_reg (&CALL_INSN_FUNCTION_USAGE (sibcall), this_rtx);
11239 /* Run just enough of rest_of_compilation to do scheduling and get
11240 the insns emitted. Note that use_thunk calls
11241 assemble_start_function and assemble_end_function. */
11243 insn_locators_alloc ();
11244 insns = get_insns ();
11250 split_all_insns_noflow ();
11255 if (optimize > 0 && flag_delayed_branch)
11256 dbr_schedule (insns);
11258 shorten_branches (insns);
11259 final_start_function (insns, file, 1);
11260 final (insns, file, 1);
11261 final_end_function ();
11262 free_after_compilation (cfun);
11264 reload_completed = 0;
11265 epilogue_completed = 0;
11269 function_symbol (rtx target, const char *name, enum sh_function_kind kind)
11273 /* If this is not an ordinary function, the name usually comes from a
11274 string literal or an sprintf buffer. Make sure we use the same
11275 string consistently, so that cse will be able to unify address loads. */
11276 if (kind != FUNCTION_ORDINARY)
11277 name = IDENTIFIER_POINTER (get_identifier (name));
11278 sym = gen_rtx_SYMBOL_REF (Pmode, name);
11279 SYMBOL_REF_FLAGS (sym) = SYMBOL_FLAG_FUNCTION;
11283 case FUNCTION_ORDINARY:
11287 rtx reg = target ? target : gen_reg_rtx (Pmode);
11289 emit_insn (gen_symGOT2reg (reg, sym));
11295 /* ??? To allow cse to work, we use GOTOFF relocations.
11296 we could add combiner patterns to transform this into
11297 straight pc-relative calls with sym2PIC / bsrf when
11298 label load and function call are still 1:1 and in the
11299 same basic block during combine. */
11300 rtx reg = target ? target : gen_reg_rtx (Pmode);
11302 emit_insn (gen_symGOTOFF2reg (reg, sym));
11307 if (target && sym != target)
11309 emit_move_insn (target, sym);
11315 /* Find the number of a general purpose register in S. */
11317 scavenge_reg (HARD_REG_SET *s)
11320 for (r = FIRST_GENERAL_REG; r <= LAST_GENERAL_REG; r++)
11321 if (TEST_HARD_REG_BIT (*s, r))
11327 sh_get_pr_initial_val (void)
11331 /* ??? Unfortunately, get_hard_reg_initial_val doesn't always work for the
11332 PR register on SHcompact, because it might be clobbered by the prologue.
11333 We check first if that is known to be the case. */
11334 if (TARGET_SHCOMPACT
11335 && ((crtl->args.info.call_cookie
11336 & ~ CALL_COOKIE_RET_TRAMP (1))
11337 || crtl->saves_all_registers))
11338 return gen_frame_mem (SImode, return_address_pointer_rtx);
11340 /* If we haven't finished rtl generation, there might be a nonlocal label
11341 that we haven't seen yet.
11342 ??? get_hard_reg_initial_val fails if it is called after register
11343 allocation has started, unless it has been called before for the
11344 same register. And even then, we end in trouble if we didn't use
11345 the register in the same basic block before. So call
11346 get_hard_reg_initial_val now and wrap it in an unspec if we might
11347 need to replace it. */
11348 /* ??? We also must do this for TARGET_SH1 in general, because otherwise
11349 combine can put the pseudo returned by get_hard_reg_initial_val into
11350 instructions that need a general purpose registers, which will fail to
11351 be recognized when the pseudo becomes allocated to PR. */
11353 = get_hard_reg_initial_val (Pmode, TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
11355 return gen_rtx_UNSPEC (SImode, gen_rtvec (1, val), UNSPEC_RA);
11360 sh_expand_t_scc (rtx operands[])
11362 enum rtx_code code = GET_CODE (operands[1]);
11363 rtx target = operands[0];
11364 rtx op0 = operands[2];
11365 rtx op1 = operands[3];
11366 rtx result = target;
11369 if (!REG_P (op0) || REGNO (op0) != T_REG
11370 || !CONST_INT_P (op1))
11372 if (!REG_P (result))
11373 result = gen_reg_rtx (SImode);
11374 val = INTVAL (op1);
11375 if ((code == EQ && val == 1) || (code == NE && val == 0))
11376 emit_insn (gen_movt (result));
11377 else if (TARGET_SH2A && ((code == EQ && val == 0)
11378 || (code == NE && val == 1)))
11379 emit_insn (gen_xorsi3_movrt (result));
11380 else if ((code == EQ && val == 0) || (code == NE && val == 1))
11382 emit_clobber (result);
11383 emit_insn (gen_subc (result, result, result));
11384 emit_insn (gen_addsi3 (result, result, const1_rtx));
11386 else if (code == EQ || code == NE)
11387 emit_insn (gen_move_insn (result, GEN_INT (code == NE)));
11390 if (result != target)
11391 emit_move_insn (target, result);
11395 /* INSN is an sfunc; return the rtx that describes the address used. */
11397 extract_sfunc_addr (rtx insn)
11399 rtx pattern, part = NULL_RTX;
11402 pattern = PATTERN (insn);
11403 len = XVECLEN (pattern, 0);
11404 for (i = 0; i < len; i++)
11406 part = XVECEXP (pattern, 0, i);
11407 if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == Pmode
11408 && GENERAL_REGISTER_P (true_regnum (XEXP (part, 0))))
11409 return XEXP (part, 0);
11411 gcc_assert (GET_CODE (XVECEXP (pattern, 0, 0)) == UNSPEC_VOLATILE);
11412 return XVECEXP (XVECEXP (pattern, 0, 0), 0, 1);
11415 /* Verify that the register in use_sfunc_addr still agrees with the address
11416 used in the sfunc. This prevents fill_slots_from_thread from changing
11418 INSN is the use_sfunc_addr instruction, and REG is the register it
11421 check_use_sfunc_addr (rtx insn, rtx reg)
11423 /* Search for the sfunc. It should really come right after INSN. */
11424 while ((insn = NEXT_INSN (insn)))
11426 if (LABEL_P (insn) || JUMP_P (insn))
11428 if (! INSN_P (insn))
11431 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
11432 insn = XVECEXP (PATTERN (insn), 0, 0);
11433 if (GET_CODE (PATTERN (insn)) != PARALLEL
11434 || get_attr_type (insn) != TYPE_SFUNC)
11436 return rtx_equal_p (extract_sfunc_addr (insn), reg);
11438 gcc_unreachable ();
11441 /* This function returns a constant rtx that represents pi / 2**15 in
11442 SFmode. it's used to scale SFmode angles, in radians, to a
11443 fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
11444 maps to 0x10000). */
11446 static GTY(()) rtx sh_fsca_sf2int_rtx;
11449 sh_fsca_sf2int (void)
11451 if (! sh_fsca_sf2int_rtx)
11453 REAL_VALUE_TYPE rv;
11455 real_from_string (&rv, "10430.378350470453");
11456 sh_fsca_sf2int_rtx = const_double_from_real_value (rv, SFmode);
11459 return sh_fsca_sf2int_rtx;
11462 /* This function returns a constant rtx that represents pi / 2**15 in
11463 DFmode. it's used to scale DFmode angles, in radians, to a
11464 fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
11465 maps to 0x10000). */
11467 static GTY(()) rtx sh_fsca_df2int_rtx;
11470 sh_fsca_df2int (void)
11472 if (! sh_fsca_df2int_rtx)
11474 REAL_VALUE_TYPE rv;
11476 real_from_string (&rv, "10430.378350470453");
11477 sh_fsca_df2int_rtx = const_double_from_real_value (rv, DFmode);
11480 return sh_fsca_df2int_rtx;
11483 /* This function returns a constant rtx that represents 2**15 / pi in
11484 SFmode. it's used to scale a fixed-point signed 16.16-bit fraction
11485 of a full circle back to a SFmode value, i.e., 0x10000 maps to
11488 static GTY(()) rtx sh_fsca_int2sf_rtx;
11491 sh_fsca_int2sf (void)
11493 if (! sh_fsca_int2sf_rtx)
11495 REAL_VALUE_TYPE rv;
11497 real_from_string (&rv, "9.587379924285257e-5");
11498 sh_fsca_int2sf_rtx = const_double_from_real_value (rv, SFmode);
11501 return sh_fsca_int2sf_rtx;
11504 /* Initialize the CUMULATIVE_ARGS structure. */
11507 sh_init_cumulative_args (CUMULATIVE_ARGS * pcum,
11509 rtx libname ATTRIBUTE_UNUSED,
11511 signed int n_named_args,
11512 enum machine_mode mode)
11514 pcum->arg_count [(int) SH_ARG_FLOAT] = 0;
11515 pcum->free_single_fp_reg = 0;
11516 pcum->stack_regs = 0;
11517 pcum->byref_regs = 0;
11519 pcum->outgoing = (n_named_args == -1) ? 0 : 1;
11521 /* XXX - Should we check TARGET_HITACHI here ??? */
11522 pcum->renesas_abi = sh_attr_renesas_p (fntype) ? 1 : 0;
11526 pcum->force_mem = ((TARGET_HITACHI || pcum->renesas_abi)
11527 && aggregate_value_p (TREE_TYPE (fntype), fndecl));
11528 pcum->prototype_p = TYPE_ARG_TYPES (fntype) ? TRUE : FALSE;
11529 pcum->arg_count [(int) SH_ARG_INT]
11530 = TARGET_SH5 && aggregate_value_p (TREE_TYPE (fntype), fndecl);
11533 = CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
11534 && pcum->arg_count [(int) SH_ARG_INT] == 0
11535 && (TYPE_MODE (TREE_TYPE (fntype)) == BLKmode
11536 ? int_size_in_bytes (TREE_TYPE (fntype))
11537 : GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (fntype)))) > 4
11538 && (BASE_RETURN_VALUE_REG (TYPE_MODE (TREE_TYPE (fntype)))
11539 == FIRST_RET_REG));
11543 pcum->arg_count [(int) SH_ARG_INT] = 0;
11544 pcum->prototype_p = FALSE;
11545 if (mode != VOIDmode)
11547 pcum->call_cookie =
11548 CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
11549 && GET_MODE_SIZE (mode) > 4
11550 && BASE_RETURN_VALUE_REG (mode) == FIRST_RET_REG);
11552 /* If the default ABI is the Renesas ABI then all library
11553 calls must assume that the library will be using the
11554 Renesas ABI. So if the function would return its result
11555 in memory then we must force the address of this memory
11556 block onto the stack. Ideally we would like to call
11557 targetm.calls.return_in_memory() here but we do not have
11558 the TYPE or the FNDECL available so we synthesize the
11559 contents of that function as best we can. */
11561 (TARGET_DEFAULT & MASK_HITACHI)
11562 && (mode == BLKmode
11563 || (GET_MODE_SIZE (mode) > 4
11564 && !(mode == DFmode
11565 && TARGET_FPU_DOUBLE)));
11569 pcum->call_cookie = 0;
11570 pcum->force_mem = FALSE;
11575 /* Replace any occurrence of FROM(n) in X with TO(n). The function does
11576 not enter into CONST_DOUBLE for the replace.
11578 Note that copying is not done so X must not be shared unless all copies
11579 are to be modified.
11581 This is like replace_rtx, except that we operate on N_REPLACEMENTS
11582 replacements simultaneously - FROM(n) is replacements[n*2] and to(n) is
11583 replacements[n*2+1] - and that we take mode changes into account.
11585 If a replacement is ambiguous, return NULL_RTX.
11587 If MODIFY is zero, don't modify any rtl in place,
11588 just return zero or nonzero for failure / success. */
11591 replace_n_hard_rtx (rtx x, rtx *replacements, int n_replacements, int modify)
11596 /* The following prevents loops occurrence when we change MEM in
11597 CONST_DOUBLE onto the same CONST_DOUBLE. */
11598 if (x != 0 && GET_CODE (x) == CONST_DOUBLE)
11601 for (i = n_replacements - 1; i >= 0 ; i--)
11602 if (x == replacements[i*2] && GET_MODE (x) == GET_MODE (replacements[i*2+1]))
11603 return replacements[i*2+1];
11605 /* Allow this function to make replacements in EXPR_LISTs. */
11609 if (GET_CODE (x) == SUBREG)
11611 rtx new_rtx = replace_n_hard_rtx (SUBREG_REG (x), replacements,
11612 n_replacements, modify);
11614 if (CONST_INT_P (new_rtx))
11616 x = simplify_subreg (GET_MODE (x), new_rtx,
11617 GET_MODE (SUBREG_REG (x)),
11623 SUBREG_REG (x) = new_rtx;
11627 else if (REG_P (x))
11629 unsigned regno = REGNO (x);
11630 unsigned nregs = (regno < FIRST_PSEUDO_REGISTER
11631 ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
11632 rtx result = NULL_RTX;
11634 for (i = n_replacements - 1; i >= 0; i--)
11636 rtx from = replacements[i*2];
11637 rtx to = replacements[i*2+1];
11638 unsigned from_regno, from_nregs, to_regno, new_regno;
11642 from_regno = REGNO (from);
11643 from_nregs = (from_regno < FIRST_PSEUDO_REGISTER
11644 ? HARD_REGNO_NREGS (from_regno, GET_MODE (from)) : 1);
11645 if (regno < from_regno + from_nregs && regno + nregs > from_regno)
11647 if (regno < from_regno
11648 || regno + nregs > from_regno + nregs
11652 to_regno = REGNO (to);
11653 if (to_regno < FIRST_PSEUDO_REGISTER)
11655 new_regno = regno + to_regno - from_regno;
11656 if ((unsigned) HARD_REGNO_NREGS (new_regno, GET_MODE (x))
11659 result = gen_rtx_REG (GET_MODE (x), new_regno);
11661 else if (GET_MODE (x) <= GET_MODE (to))
11662 result = gen_lowpart_common (GET_MODE (x), to);
11664 result = gen_lowpart_SUBREG (GET_MODE (x), to);
11667 return result ? result : x;
11669 else if (GET_CODE (x) == ZERO_EXTEND)
11671 rtx new_rtx = replace_n_hard_rtx (XEXP (x, 0), replacements,
11672 n_replacements, modify);
11674 if (CONST_INT_P (new_rtx))
11676 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
11677 new_rtx, GET_MODE (XEXP (x, 0)));
11682 XEXP (x, 0) = new_rtx;
11687 fmt = GET_RTX_FORMAT (GET_CODE (x));
11688 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
11694 new_rtx = replace_n_hard_rtx (XEXP (x, i), replacements,
11695 n_replacements, modify);
11699 XEXP (x, i) = new_rtx;
11701 else if (fmt[i] == 'E')
11702 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
11704 new_rtx = replace_n_hard_rtx (XVECEXP (x, i, j), replacements,
11705 n_replacements, modify);
11709 XVECEXP (x, i, j) = new_rtx;
11717 sh_gen_truncate (enum machine_mode mode, rtx x, int need_sign_ext)
11719 enum rtx_code code = TRUNCATE;
11721 if (GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
11723 rtx inner = XEXP (x, 0);
11724 enum machine_mode inner_mode = GET_MODE (inner);
11726 if (inner_mode == mode)
11728 else if (GET_MODE_SIZE (inner_mode) >= GET_MODE_SIZE (mode))
11730 else if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode)
11731 && (! need_sign_ext || GET_CODE (x) == SIGN_EXTEND))
11733 code = GET_CODE (x);
11737 return gen_rtx_fmt_e (code, mode, x);
11740 /* called via for_each_rtx after reload, to clean up truncates of
11741 registers that span multiple actual hard registers. */
11743 shmedia_cleanup_truncate (rtx *p, void *n_changes)
11747 if (GET_CODE (x) != TRUNCATE)
11750 if (GET_MODE_SIZE (GET_MODE (reg)) > 8 && REG_P (reg))
11752 enum machine_mode reg_mode = GET_MODE (reg);
11753 XEXP (x, 0) = simplify_subreg (DImode, reg, reg_mode,
11754 subreg_lowpart_offset (DImode, reg_mode));
11755 *(int*) n_changes += 1;
11761 /* Load and store depend on the highpart of the address. However,
11762 set_attr_alternative does not give well-defined results before reload,
11763 so we must look at the rtl ourselves to see if any of the feeding
11764 registers is used in a memref. */
11766 /* Called by sh_contains_memref_p via for_each_rtx. */
11768 sh_contains_memref_p_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
11770 return (MEM_P (*loc));
11773 /* Return nonzero iff INSN contains a MEM. */
11775 sh_contains_memref_p (rtx insn)
11777 return for_each_rtx (&PATTERN (insn), &sh_contains_memref_p_1, NULL);
11780 /* Return nonzero iff INSN loads a banked register. */
11782 sh_loads_bankedreg_p (rtx insn)
11784 if (GET_CODE (PATTERN (insn)) == SET)
11786 rtx op = SET_DEST (PATTERN(insn));
11787 if (REG_P (op) && BANKED_REGISTER_P (REGNO (op)))
11794 /* FNADDR is the MEM expression from a call expander. Return an address
11795 to use in an SHmedia insn pattern. */
11797 shmedia_prepare_call_address (rtx fnaddr, int is_sibcall)
11801 fnaddr = XEXP (fnaddr, 0);
11802 is_sym = GET_CODE (fnaddr) == SYMBOL_REF;
11803 if (flag_pic && is_sym)
11805 if (! SYMBOL_REF_LOCAL_P (fnaddr))
11807 rtx reg = gen_reg_rtx (Pmode);
11809 /* We must not use GOTPLT for sibcalls, because PIC_REG
11810 must be restored before the PLT code gets to run. */
11812 emit_insn (gen_symGOT2reg (reg, fnaddr));
11814 emit_insn (gen_symGOTPLT2reg (reg, fnaddr));
11819 fnaddr = gen_sym2PIC (fnaddr);
11820 PUT_MODE (fnaddr, Pmode);
11823 /* If ptabs might trap, make this visible to the rest of the compiler.
11824 We generally assume that symbols pertain to valid locations, but
11825 it is possible to generate invalid symbols with asm or linker tricks.
11826 In a list of functions where each returns its successor, an invalid
11827 symbol might denote an empty list. */
11828 if (!TARGET_PT_FIXED
11829 && (!is_sym || TARGET_INVALID_SYMBOLS)
11830 && (!REG_P (fnaddr) || ! TARGET_REGISTER_P (REGNO (fnaddr))))
11832 rtx tr = gen_reg_rtx (PDImode);
11834 emit_insn (gen_ptabs (tr, fnaddr));
11837 else if (! target_reg_operand (fnaddr, Pmode))
11838 fnaddr = copy_to_mode_reg (Pmode, fnaddr);
11843 sh_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
11844 enum machine_mode mode, secondary_reload_info *sri)
11848 if (REGCLASS_HAS_FP_REG (rclass)
11849 && ! TARGET_SHMEDIA
11850 && immediate_operand ((x), mode)
11851 && ! ((fp_zero_operand (x) || fp_one_operand (x))
11852 && mode == SFmode && fldi_ok ()))
11856 sri->icode = CODE_FOR_reload_insf__frn;
11859 sri->icode = CODE_FOR_reload_indf__frn;
11862 /* ??? If we knew that we are in the appropriate mode -
11863 single precision - we could use a reload pattern directly. */
11868 if (rclass == FPUL_REGS
11870 && (REGNO (x) == MACL_REG || REGNO (x) == MACH_REG
11871 || REGNO (x) == T_REG))
11872 || GET_CODE (x) == PLUS))
11873 return GENERAL_REGS;
11874 if (rclass == FPUL_REGS && immediate_operand (x, mode))
11876 if (satisfies_constraint_I08 (x) || fp_zero_operand (x))
11877 return GENERAL_REGS;
11878 else if (mode == SFmode)
11880 sri->icode = CODE_FOR_reload_insi__i_fpul;
11883 if (rclass == FPSCR_REGS
11884 && ((REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
11885 || (MEM_P (x) && GET_CODE (XEXP (x, 0)) == PLUS)))
11886 return GENERAL_REGS;
11887 if (REGCLASS_HAS_FP_REG (rclass)
11889 && immediate_operand (x, mode)
11890 && x != CONST0_RTX (GET_MODE (x))
11891 && GET_MODE (x) != V4SFmode)
11892 return GENERAL_REGS;
11893 if ((mode == QImode || mode == HImode)
11894 && TARGET_SHMEDIA && inqhi_operand (x, mode))
11896 sri->icode = ((mode == QImode)
11897 ? CODE_FOR_reload_inqi : CODE_FOR_reload_inhi);
11900 if (TARGET_SHMEDIA && rclass == GENERAL_REGS
11901 && (GET_CODE (x) == LABEL_REF || PIC_ADDR_P (x)))
11902 return TARGET_REGS;
11903 } /* end of input-only processing. */
11905 if (((REGCLASS_HAS_FP_REG (rclass)
11907 && (GENERAL_OR_AP_REGISTER_P (REGNO (x))
11908 || (FP_REGISTER_P (REGNO (x)) && mode == SImode
11909 && TARGET_FMOVD))))
11910 || (REGCLASS_HAS_GENERAL_REG (rclass)
11912 && FP_REGISTER_P (REGNO (x))))
11913 && ! TARGET_SHMEDIA
11914 && (mode == SFmode || mode == SImode))
11916 if ((rclass == FPUL_REGS
11917 || (REGCLASS_HAS_FP_REG (rclass)
11918 && ! TARGET_SHMEDIA && mode == SImode))
11921 && (REGNO (x) >= FIRST_PSEUDO_REGISTER
11922 || REGNO (x) == T_REG
11923 || system_reg_operand (x, VOIDmode)))))
11925 if (rclass == FPUL_REGS)
11926 return GENERAL_REGS;
11929 if ((rclass == TARGET_REGS
11930 || (TARGET_SHMEDIA && rclass == SIBCALL_REGS))
11931 && !satisfies_constraint_Csy (x)
11932 && (!REG_P (x) || ! GENERAL_REGISTER_P (REGNO (x))))
11933 return GENERAL_REGS;
11934 if ((rclass == MAC_REGS || rclass == PR_REGS)
11935 && REG_P (x) && ! GENERAL_REGISTER_P (REGNO (x))
11936 && rclass != REGNO_REG_CLASS (REGNO (x)))
11937 return GENERAL_REGS;
11938 if (rclass != GENERAL_REGS && REG_P (x)
11939 && TARGET_REGISTER_P (REGNO (x)))
11940 return GENERAL_REGS;
11944 enum sh_divide_strategy_e sh_div_strategy = SH_DIV_STRATEGY_DEFAULT;