1 /* Output routines for GCC for Renesas / SuperH SH.
2 Copyright (C) 1993, 1994, 1995, 1997, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Steve Chamberlain (sac@cygnus.com).
5 Improved by Jim Wilson (wilson@cygnus.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
26 #include "coretypes.h"
28 #include "insn-config.h"
36 #include "hard-reg-set.h"
38 #include "insn-attr.h"
42 #include "integrate.h"
46 #include "target-def.h"
48 #include "langhooks.h"
49 #include "basic-block.h"
51 #include "cfglayout.h"
53 #include "sched-int.h"
55 #include "tree-gimple.h"
58 int code_for_indirect_jump_scratch = CODE_FOR_indirect_jump_scratch;
60 #define MSW (TARGET_LITTLE_ENDIAN ? 1 : 0)
61 #define LSW (TARGET_LITTLE_ENDIAN ? 0 : 1)
63 /* These are some macros to abstract register modes. */
64 #define CONST_OK_FOR_ADD(size) \
65 (TARGET_SHMEDIA ? CONST_OK_FOR_I10 (size) : CONST_OK_FOR_I08 (size))
66 #define GEN_MOV (*(TARGET_SHMEDIA64 ? gen_movdi : gen_movsi))
67 #define GEN_ADD3 (*(TARGET_SHMEDIA64 ? gen_adddi3 : gen_addsi3))
68 #define GEN_SUB3 (*(TARGET_SHMEDIA64 ? gen_subdi3 : gen_subsi3))
70 /* Set to 1 by expand_prologue() when the function is an interrupt handler. */
71 int current_function_interrupt;
73 /* ??? The pragma interrupt support will not work for SH3. */
74 /* This is set by #pragma interrupt and #pragma trapa, and causes gcc to
75 output code for the next function appropriate for an interrupt handler. */
78 /* This is set by the trap_exit attribute for functions. It specifies
79 a trap number to be used in a trapa instruction at function exit
80 (instead of an rte instruction). */
83 /* This is used by the sp_switch attribute for functions. It specifies
84 a variable holding the address of the stack the interrupt function
85 should switch to/from at entry/exit. */
88 /* This is set by #pragma trapa, and is similar to the above, except that
89 the compiler doesn't emit code to preserve all registers. */
90 static int pragma_trapa;
92 /* This is set by #pragma nosave_low_regs. This is useful on the SH3,
93 which has a separate set of low regs for User and Supervisor modes.
94 This should only be used for the lowest level of interrupts. Higher levels
95 of interrupts must save the registers in case they themselves are
97 int pragma_nosave_low_regs;
99 /* This is used for communication between TARGET_SETUP_INCOMING_VARARGS and
100 sh_expand_prologue. */
101 int current_function_anonymous_args;
103 /* Global variables for machine-dependent things. */
105 /* Which cpu are we scheduling for. */
106 enum processor_type sh_cpu;
108 /* Definitions used in ready queue reordering for first scheduling pass. */
110 /* Reg weights arrays for modes SFmode and SImode, indexed by insn LUID. */
111 static short *regmode_weight[2];
113 /* Total SFmode and SImode weights of scheduled insns. */
114 static int curr_regmode_pressure[2];
116 /* If true, skip cycles for Q -> R movement. */
117 static int skip_cycles = 0;
119 /* Cached value of can_issue_more. This is cached in sh_variable_issue hook
120 and returned from sh_reorder2. */
121 static short cached_can_issue_more;
123 /* Saved operands from the last compare to use when we generate an scc
129 /* Provides the class number of the smallest class containing
132 enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
134 R0_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
135 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
136 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
137 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
138 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
139 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
140 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
141 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
142 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
143 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
144 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
145 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
146 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
147 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
148 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
149 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
150 FP0_REGS,FP_REGS, FP_REGS, FP_REGS,
151 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
152 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
153 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
154 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
155 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
156 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
157 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
158 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
159 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
160 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
161 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
162 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
163 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
164 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
165 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
166 TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
167 TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
168 DF_REGS, DF_REGS, DF_REGS, DF_REGS,
169 DF_REGS, DF_REGS, DF_REGS, DF_REGS,
170 NO_REGS, GENERAL_REGS, PR_REGS, T_REGS,
171 MAC_REGS, MAC_REGS, FPUL_REGS, FPSCR_REGS,
175 char sh_register_names[FIRST_PSEUDO_REGISTER] \
176 [MAX_REGISTER_NAME_LENGTH + 1] = SH_REGISTER_NAMES_INITIALIZER;
178 char sh_additional_register_names[ADDREGNAMES_SIZE] \
179 [MAX_ADDITIONAL_REGISTER_NAME_LENGTH + 1]
180 = SH_ADDITIONAL_REGISTER_NAMES_INITIALIZER;
182 /* Provide reg_class from a letter such as appears in the machine
183 description. *: target independently reserved letter.
184 reg_class_from_letter['e' - 'a'] is set to NO_REGS for TARGET_FMOVD. */
186 enum reg_class reg_class_from_letter[] =
188 /* a */ ALL_REGS, /* b */ TARGET_REGS, /* c */ FPSCR_REGS, /* d */ DF_REGS,
189 /* e */ FP_REGS, /* f */ FP_REGS, /* g **/ NO_REGS, /* h */ NO_REGS,
190 /* i **/ NO_REGS, /* j */ NO_REGS, /* k */ SIBCALL_REGS, /* l */ PR_REGS,
191 /* m **/ NO_REGS, /* n **/ NO_REGS, /* o **/ NO_REGS, /* p **/ NO_REGS,
192 /* q */ NO_REGS, /* r **/ NO_REGS, /* s **/ NO_REGS, /* t */ T_REGS,
193 /* u */ NO_REGS, /* v */ NO_REGS, /* w */ FP0_REGS, /* x */ MAC_REGS,
194 /* y */ FPUL_REGS, /* z */ R0_REGS
197 int assembler_dialect;
199 static bool shmedia_space_reserved_for_target_registers;
201 static void split_branches (rtx);
202 static int branch_dest (rtx);
203 static void force_into (rtx, rtx);
204 static void print_slot (rtx);
205 static rtx add_constant (rtx, enum machine_mode, rtx);
206 static void dump_table (rtx, rtx);
207 static int hi_const (rtx);
208 static int broken_move (rtx);
209 static int mova_p (rtx);
210 static rtx find_barrier (int, rtx, rtx);
211 static int noncall_uses_reg (rtx, rtx, rtx *);
212 static rtx gen_block_redirect (rtx, int, int);
213 static void sh_reorg (void);
214 static void output_stack_adjust (int, rtx, int, HARD_REG_SET *);
215 static rtx frame_insn (rtx);
216 static rtx push (int);
217 static void pop (int);
218 static void push_regs (HARD_REG_SET *, int);
219 static int calc_live_regs (HARD_REG_SET *);
220 static void mark_use (rtx, rtx *);
221 static HOST_WIDE_INT rounded_frame_size (int);
222 static rtx mark_constant_pool_use (rtx);
223 const struct attribute_spec sh_attribute_table[];
224 static tree sh_handle_interrupt_handler_attribute (tree *, tree, tree, int, bool *);
225 static tree sh_handle_sp_switch_attribute (tree *, tree, tree, int, bool *);
226 static tree sh_handle_trap_exit_attribute (tree *, tree, tree, int, bool *);
227 static tree sh_handle_renesas_attribute (tree *, tree, tree, int, bool *);
228 static void sh_output_function_epilogue (FILE *, HOST_WIDE_INT);
229 static void sh_insert_attributes (tree, tree *);
230 static int sh_adjust_cost (rtx, rtx, rtx, int);
231 static int sh_issue_rate (void);
232 static int sh_dfa_new_cycle (FILE *, int, rtx, int, int, int *sort_p);
233 static short find_set_regmode_weight (rtx, enum machine_mode);
234 static short find_insn_regmode_weight (rtx, enum machine_mode);
235 static void find_regmode_weight (int, enum machine_mode);
236 static void sh_md_init_global (FILE *, int, int);
237 static void sh_md_finish_global (FILE *, int);
238 static int rank_for_reorder (const void *, const void *);
239 static void swap_reorder (rtx *, int);
240 static void ready_reorder (rtx *, int);
241 static short high_pressure (enum machine_mode);
242 static int sh_reorder (FILE *, int, rtx *, int *, int);
243 static int sh_reorder2 (FILE *, int, rtx *, int *, int);
244 static void sh_md_init (FILE *, int, int);
245 static int sh_variable_issue (FILE *, int, rtx, int);
247 static bool sh_function_ok_for_sibcall (tree, tree);
249 static bool sh_cannot_modify_jumps_p (void);
250 static int sh_target_reg_class (void);
251 static bool sh_optimize_target_register_callee_saved (bool);
252 static bool sh_ms_bitfield_layout_p (tree);
254 static void sh_init_builtins (void);
255 static void sh_media_init_builtins (void);
256 static rtx sh_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
257 static void sh_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
258 static void sh_file_start (void);
259 static int flow_dependent_p (rtx, rtx);
260 static void flow_dependent_p_1 (rtx, rtx, void *);
261 static int shiftcosts (rtx);
262 static int andcosts (rtx);
263 static int addsubcosts (rtx);
264 static int multcosts (rtx);
265 static bool unspec_caller_rtx_p (rtx);
266 static bool sh_cannot_copy_insn_p (rtx);
267 static bool sh_rtx_costs (rtx, int, int, int *);
268 static int sh_address_cost (rtx);
269 static int shmedia_target_regs_stack_space (HARD_REG_SET *);
270 static int shmedia_reserve_space_for_target_registers_p (int, HARD_REG_SET *);
271 static int shmedia_target_regs_stack_adjust (HARD_REG_SET *);
272 static int scavenge_reg (HARD_REG_SET *s);
273 struct save_schedule_s;
274 static struct save_entry_s *sh5_schedule_saves (HARD_REG_SET *,
275 struct save_schedule_s *, int);
277 static rtx sh_struct_value_rtx (tree, int);
278 static bool sh_return_in_memory (tree, tree);
279 static rtx sh_builtin_saveregs (void);
280 static void sh_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
281 static bool sh_strict_argument_naming (CUMULATIVE_ARGS *);
282 static bool sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *);
283 static tree sh_build_builtin_va_list (void);
284 static tree sh_gimplify_va_arg_expr (tree, tree, tree *, tree *);
285 static bool sh_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
287 static bool sh_callee_copies (CUMULATIVE_ARGS *, enum machine_mode,
289 static int sh_dwarf_calling_convention (tree);
292 /* Initialize the GCC target structure. */
293 #undef TARGET_ATTRIBUTE_TABLE
294 #define TARGET_ATTRIBUTE_TABLE sh_attribute_table
296 /* The next two are used for debug info when compiling with -gdwarf. */
297 #undef TARGET_ASM_UNALIGNED_HI_OP
298 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uaword\t"
299 #undef TARGET_ASM_UNALIGNED_SI_OP
300 #define TARGET_ASM_UNALIGNED_SI_OP "\t.ualong\t"
302 /* These are NULLed out on non-SH5 in OVERRIDE_OPTIONS. */
303 #undef TARGET_ASM_UNALIGNED_DI_OP
304 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaquad\t"
305 #undef TARGET_ASM_ALIGNED_DI_OP
306 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
308 #undef TARGET_ASM_FUNCTION_EPILOGUE
309 #define TARGET_ASM_FUNCTION_EPILOGUE sh_output_function_epilogue
311 #undef TARGET_ASM_OUTPUT_MI_THUNK
312 #define TARGET_ASM_OUTPUT_MI_THUNK sh_output_mi_thunk
314 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
315 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
317 #undef TARGET_ASM_FILE_START
318 #define TARGET_ASM_FILE_START sh_file_start
319 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
320 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
322 #undef TARGET_INSERT_ATTRIBUTES
323 #define TARGET_INSERT_ATTRIBUTES sh_insert_attributes
325 #undef TARGET_SCHED_ADJUST_COST
326 #define TARGET_SCHED_ADJUST_COST sh_adjust_cost
328 #undef TARGET_SCHED_ISSUE_RATE
329 #define TARGET_SCHED_ISSUE_RATE sh_issue_rate
331 /* The next 5 hooks have been implemented for reenabling sched1. With the
332 help of these macros we are limiting the movement of insns in sched1 to
333 reduce the register pressure. The overall idea is to keep count of SImode
334 and SFmode regs required by already scheduled insns. When these counts
335 cross some threshold values; give priority to insns that free registers.
336 The insn that frees registers is most likely to be the insn with lowest
337 LUID (original insn order); but such an insn might be there in the stalled
338 queue (Q) instead of the ready queue (R). To solve this, we skip cycles
339 upto a max of 8 cycles so that such insns may move from Q -> R.
341 The description of the hooks are as below:
343 TARGET_SCHED_INIT_GLOBAL: Added a new target hook in the generic
344 scheduler; it is called inside the sched_init function just after
345 find_insn_reg_weights function call. It is used to calculate the SImode
346 and SFmode weights of insns of basic blocks; much similar to what
347 find_insn_reg_weights does.
348 TARGET_SCHED_FINISH_GLOBAL: Corresponding cleanup hook.
350 TARGET_SCHED_DFA_NEW_CYCLE: Skip cycles if high register pressure is
351 indicated by TARGET_SCHED_REORDER2; doing this may move insns from
354 TARGET_SCHED_REORDER: If the register pressure for SImode or SFmode is
355 high; reorder the ready queue so that the insn with lowest LUID will be
358 TARGET_SCHED_REORDER2: If the register pressure is high, indicate to
359 TARGET_SCHED_DFA_NEW_CYCLE to skip cycles.
361 TARGET_SCHED_VARIABLE_ISSUE: Cache the value of can_issue_more so that it
362 can be returned from TARGET_SCHED_REORDER2.
364 TARGET_SCHED_INIT: Reset the register pressure counting variables. */
366 #undef TARGET_SCHED_DFA_NEW_CYCLE
367 #define TARGET_SCHED_DFA_NEW_CYCLE sh_dfa_new_cycle
369 #undef TARGET_SCHED_INIT_GLOBAL
370 #define TARGET_SCHED_INIT_GLOBAL sh_md_init_global
372 #undef TARGET_SCHED_FINISH_GLOBAL
373 #define TARGET_SCHED_FINISH_GLOBAL sh_md_finish_global
375 #undef TARGET_SCHED_VARIABLE_ISSUE
376 #define TARGET_SCHED_VARIABLE_ISSUE sh_variable_issue
378 #undef TARGET_SCHED_REORDER
379 #define TARGET_SCHED_REORDER sh_reorder
381 #undef TARGET_SCHED_REORDER2
382 #define TARGET_SCHED_REORDER2 sh_reorder2
384 #undef TARGET_SCHED_INIT
385 #define TARGET_SCHED_INIT sh_md_init
387 #undef TARGET_CANNOT_MODIFY_JUMPS_P
388 #define TARGET_CANNOT_MODIFY_JUMPS_P sh_cannot_modify_jumps_p
389 #undef TARGET_BRANCH_TARGET_REGISTER_CLASS
390 #define TARGET_BRANCH_TARGET_REGISTER_CLASS sh_target_reg_class
391 #undef TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED
392 #define TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED \
393 sh_optimize_target_register_callee_saved
395 #undef TARGET_MS_BITFIELD_LAYOUT_P
396 #define TARGET_MS_BITFIELD_LAYOUT_P sh_ms_bitfield_layout_p
398 #undef TARGET_INIT_BUILTINS
399 #define TARGET_INIT_BUILTINS sh_init_builtins
400 #undef TARGET_EXPAND_BUILTIN
401 #define TARGET_EXPAND_BUILTIN sh_expand_builtin
403 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
404 #define TARGET_FUNCTION_OK_FOR_SIBCALL sh_function_ok_for_sibcall
406 #undef TARGET_CANNOT_COPY_INSN_P
407 #define TARGET_CANNOT_COPY_INSN_P sh_cannot_copy_insn_p
408 #undef TARGET_RTX_COSTS
409 #define TARGET_RTX_COSTS sh_rtx_costs
410 #undef TARGET_ADDRESS_COST
411 #define TARGET_ADDRESS_COST sh_address_cost
413 #undef TARGET_MACHINE_DEPENDENT_REORG
414 #define TARGET_MACHINE_DEPENDENT_REORG sh_reorg
417 #undef TARGET_HAVE_TLS
418 #define TARGET_HAVE_TLS true
421 #undef TARGET_PROMOTE_PROTOTYPES
422 #define TARGET_PROMOTE_PROTOTYPES sh_promote_prototypes
423 #undef TARGET_PROMOTE_FUNCTION_ARGS
424 #define TARGET_PROMOTE_FUNCTION_ARGS sh_promote_prototypes
425 #undef TARGET_PROMOTE_FUNCTION_RETURN
426 #define TARGET_PROMOTE_FUNCTION_RETURN sh_promote_prototypes
428 #undef TARGET_STRUCT_VALUE_RTX
429 #define TARGET_STRUCT_VALUE_RTX sh_struct_value_rtx
430 #undef TARGET_RETURN_IN_MEMORY
431 #define TARGET_RETURN_IN_MEMORY sh_return_in_memory
433 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
434 #define TARGET_EXPAND_BUILTIN_SAVEREGS sh_builtin_saveregs
435 #undef TARGET_SETUP_INCOMING_VARARGS
436 #define TARGET_SETUP_INCOMING_VARARGS sh_setup_incoming_varargs
437 #undef TARGET_STRICT_ARGUMENT_NAMING
438 #define TARGET_STRICT_ARGUMENT_NAMING sh_strict_argument_naming
439 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
440 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED sh_pretend_outgoing_varargs_named
441 #undef TARGET_MUST_PASS_IN_STACK
442 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
443 #undef TARGET_PASS_BY_REFERENCE
444 #define TARGET_PASS_BY_REFERENCE sh_pass_by_reference
445 #undef TARGET_CALLEE_COPIES
446 #define TARGET_CALLEE_COPIES sh_callee_copies
448 #undef TARGET_BUILD_BUILTIN_VA_LIST
449 #define TARGET_BUILD_BUILTIN_VA_LIST sh_build_builtin_va_list
450 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
451 #define TARGET_GIMPLIFY_VA_ARG_EXPR sh_gimplify_va_arg_expr
453 #undef TARGET_VECTOR_MODE_SUPPORTED_P
454 #define TARGET_VECTOR_MODE_SUPPORTED_P sh_vector_mode_supported_p
456 #undef TARGET_PCH_VALID_P
457 #define TARGET_PCH_VALID_P sh_pch_valid_p
459 #undef TARGET_DWARF_CALLING_CONVENTION
460 #define TARGET_DWARF_CALLING_CONVENTION sh_dwarf_calling_convention
462 /* Return regmode weight for insn. */
463 #define INSN_REGMODE_WEIGHT(INSN, MODE) regmode_weight[((MODE) == SImode) ? 0 : 1][INSN_UID (INSN)]
465 /* Return current register pressure for regmode. */
466 #define CURR_REGMODE_PRESSURE(MODE) curr_regmode_pressure[((MODE) == SImode) ? 0 : 1]
470 #undef TARGET_ENCODE_SECTION_INFO
471 #define TARGET_ENCODE_SECTION_INFO sh_symbian_encode_section_info
472 #undef TARGET_STRIP_NAME_ENCODING
473 #define TARGET_STRIP_NAME_ENCODING sh_symbian_strip_name_encoding
474 #undef TARGET_CXX_IMPORT_EXPORT_CLASS
475 #define TARGET_CXX_IMPORT_EXPORT_CLASS symbian_import_export_class
479 struct gcc_target targetm = TARGET_INITIALIZER;
481 /* Print the operand address in x to the stream. */
484 print_operand_address (FILE *stream, rtx x)
486 switch (GET_CODE (x))
490 fprintf (stream, "@%s", reg_names[true_regnum (x)]);
495 rtx base = XEXP (x, 0);
496 rtx index = XEXP (x, 1);
498 switch (GET_CODE (index))
501 fprintf (stream, "@(%d,%s)", (int) INTVAL (index),
502 reg_names[true_regnum (base)]);
508 int base_num = true_regnum (base);
509 int index_num = true_regnum (index);
511 fprintf (stream, "@(r0,%s)",
512 reg_names[MAX (base_num, index_num)]);
524 fprintf (stream, "@-%s", reg_names[true_regnum (XEXP (x, 0))]);
528 fprintf (stream, "@%s+", reg_names[true_regnum (XEXP (x, 0))]);
532 x = mark_constant_pool_use (x);
533 output_addr_const (stream, x);
538 /* Print operand x (an rtx) in assembler syntax to file stream
539 according to modifier code.
541 '.' print a .s if insn needs delay slot
542 ',' print LOCAL_LABEL_PREFIX
543 '@' print trap, rte or rts depending upon pragma interruptness
544 '#' output a nop if there is nothing to put in the delay slot
545 ''' print likelihood suffix (/u for unlikely).
546 'O' print a constant without the #
547 'R' print the LSW of a dp value - changes if in little endian
548 'S' print the MSW of a dp value - changes if in little endian
549 'T' print the next word of a dp value - same as 'R' in big endian mode.
550 'M' print an `x' if `m' will print `base,index'.
551 'N' print 'r63' if the operand is (const_int 0).
552 'd' print a V2SF reg as dN instead of fpN.
553 'm' print a pair `base,offset' or `base,index', for LD and ST.
554 'u' prints the lowest 16 bits of CONST_INT, as an unsigned value.
555 'o' output an operator. */
558 print_operand (FILE *stream, rtx x, int code)
564 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
565 && get_attr_length (XVECEXP (final_sequence, 0, 1)))
566 fprintf (stream, ASSEMBLER_DIALECT ? "/s" : ".s");
569 fprintf (stream, "%s", LOCAL_LABEL_PREFIX);
573 fprintf (stream, "trapa #%d", trap_exit);
574 else if (sh_cfun_interrupt_handler_p ())
575 fprintf (stream, "rte");
577 fprintf (stream, "rts");
580 /* Output a nop if there's nothing in the delay slot. */
581 if (dbr_sequence_length () == 0)
582 fprintf (stream, "\n\tnop");
586 rtx note = find_reg_note (current_output_insn, REG_BR_PROB, 0);
588 if (note && INTVAL (XEXP (note, 0)) * 2 < REG_BR_PROB_BASE)
589 fputs ("/u", stream);
593 x = mark_constant_pool_use (x);
594 output_addr_const (stream, x);
597 fputs (reg_names[REGNO (x) + LSW], (stream));
600 fputs (reg_names[REGNO (x) + MSW], (stream));
603 /* Next word of a double. */
604 switch (GET_CODE (x))
607 fputs (reg_names[REGNO (x) + 1], (stream));
610 if (GET_CODE (XEXP (x, 0)) != PRE_DEC
611 && GET_CODE (XEXP (x, 0)) != POST_INC)
612 x = adjust_address (x, SImode, 4);
613 print_operand_address (stream, XEXP (x, 0));
620 switch (GET_CODE (x))
622 case PLUS: fputs ("add", stream); break;
623 case MINUS: fputs ("sub", stream); break;
624 case MULT: fputs ("mul", stream); break;
625 case DIV: fputs ("div", stream); break;
626 case EQ: fputs ("eq", stream); break;
627 case NE: fputs ("ne", stream); break;
628 case GT: case LT: fputs ("gt", stream); break;
629 case GE: case LE: fputs ("ge", stream); break;
630 case GTU: case LTU: fputs ("gtu", stream); break;
631 case GEU: case LEU: fputs ("geu", stream); break;
637 if (GET_CODE (x) == MEM
638 && GET_CODE (XEXP (x, 0)) == PLUS
639 && (GET_CODE (XEXP (XEXP (x, 0), 1)) == REG
640 || GET_CODE (XEXP (XEXP (x, 0), 1)) == SUBREG))
645 if (GET_CODE (x) != MEM)
648 switch (GET_CODE (x))
652 print_operand (stream, x, 0);
653 fputs (", 0", stream);
657 print_operand (stream, XEXP (x, 0), 0);
658 fputs (", ", stream);
659 print_operand (stream, XEXP (x, 1), 0);
668 if (GET_CODE (x) != REG || GET_MODE (x) != V2SFmode)
671 fprintf ((stream), "d%s", reg_names[REGNO (x)] + 1);
675 if (x == CONST0_RTX (GET_MODE (x)))
677 fprintf ((stream), "r63");
682 if (GET_CODE (x) == CONST_INT)
684 fprintf ((stream), "%u", (unsigned) INTVAL (x) & (0x10000 - 1));
691 switch (GET_CODE (x))
693 /* FIXME: We need this on SHmedia32 because reload generates
694 some sign-extended HI or QI loads into DImode registers
695 but, because Pmode is SImode, the address ends up with a
696 subreg:SI of the DImode register. Maybe reload should be
697 fixed so as to apply alter_subreg to such loads? */
699 if (SUBREG_BYTE (x) != 0
700 || GET_CODE (SUBREG_REG (x)) != REG)
707 if (FP_REGISTER_P (REGNO (x))
708 && GET_MODE (x) == V16SFmode)
709 fprintf ((stream), "mtrx%s", reg_names[REGNO (x)] + 2);
710 else if (FP_REGISTER_P (REGNO (x))
711 && GET_MODE (x) == V4SFmode)
712 fprintf ((stream), "fv%s", reg_names[REGNO (x)] + 2);
713 else if (GET_CODE (x) == REG
714 && GET_MODE (x) == V2SFmode)
715 fprintf ((stream), "fp%s", reg_names[REGNO (x)] + 2);
716 else if (FP_REGISTER_P (REGNO (x))
717 && GET_MODE_SIZE (GET_MODE (x)) > 4)
718 fprintf ((stream), "d%s", reg_names[REGNO (x)] + 1);
720 fputs (reg_names[REGNO (x)], (stream));
724 output_address (XEXP (x, 0));
729 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
730 && GET_MODE (XEXP (x, 0)) == DImode
731 && GET_CODE (XEXP (XEXP (x, 0), 0)) == TRUNCATE
732 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode)
734 rtx val = XEXP (XEXP (XEXP (x, 0), 0), 0);
737 if (GET_CODE (val) == ASHIFTRT)
740 if (GET_CODE (XEXP (val, 0)) == CONST)
742 output_addr_const (stream, XEXP (val, 0));
743 if (GET_CODE (XEXP (val, 0)) == CONST)
745 fputs (" >> ", stream);
746 output_addr_const (stream, XEXP (val, 1));
751 if (GET_CODE (val) == CONST)
753 output_addr_const (stream, val);
754 if (GET_CODE (val) == CONST)
757 fputs (" & 65535)", stream);
765 output_addr_const (stream, x);
772 /* Like force_operand, but guarantees that VALUE ends up in TARGET. */
774 force_into (rtx value, rtx target)
776 value = force_operand (value, target);
777 if (! rtx_equal_p (value, target))
778 emit_insn (gen_move_insn (target, value));
781 /* Emit code to perform a block move. Choose the best method.
783 OPERANDS[0] is the destination.
784 OPERANDS[1] is the source.
785 OPERANDS[2] is the size.
786 OPERANDS[3] is the alignment safe to use. */
789 expand_block_move (rtx *operands)
791 int align = INTVAL (operands[3]);
792 int constp = (GET_CODE (operands[2]) == CONST_INT);
793 int bytes = (constp ? INTVAL (operands[2]) : 0);
798 /* If we could use mov.l to move words and dest is word-aligned, we
799 can use movua.l for loads and still generate a relatively short
800 and efficient sequence. */
801 if (TARGET_SH4A_ARCH && align < 4
802 && MEM_ALIGN (operands[0]) >= 32
803 && can_move_by_pieces (bytes, 32))
805 rtx dest = copy_rtx (operands[0]);
806 rtx src = copy_rtx (operands[1]);
807 /* We could use different pseudos for each copied word, but
808 since movua can only load into r0, it's kind of
810 rtx temp = gen_reg_rtx (SImode);
811 rtx src_addr = copy_addr_to_reg (XEXP (src, 0));
814 while (copied + 4 <= bytes)
816 rtx to = adjust_address (dest, SImode, copied);
817 rtx from = adjust_automodify_address (src, SImode, src_addr, copied);
819 emit_insn (gen_movua (temp, from));
820 emit_move_insn (src_addr, plus_constant (src_addr, 4));
821 emit_move_insn (to, temp);
826 move_by_pieces (adjust_address (dest, BLKmode, copied),
827 adjust_automodify_address (src, BLKmode,
829 bytes - copied, align, 0);
834 /* If it isn't a constant number of bytes, or if it doesn't have 4 byte
835 alignment, or if it isn't a multiple of 4 bytes, then fail. */
836 if (align < 4 || (bytes % 4 != 0))
843 else if (bytes == 12)
848 rtx r4 = gen_rtx_REG (SImode, 4);
849 rtx r5 = gen_rtx_REG (SImode, 5);
851 entry_name = get_identifier ("__movmemSI12_i4");
853 sym = function_symbol (IDENTIFIER_POINTER (entry_name));
854 func_addr_rtx = copy_to_mode_reg (Pmode, sym);
855 force_into (XEXP (operands[0], 0), r4);
856 force_into (XEXP (operands[1], 0), r5);
857 emit_insn (gen_block_move_real_i4 (func_addr_rtx));
860 else if (! TARGET_SMALLCODE)
866 rtx r4 = gen_rtx_REG (SImode, 4);
867 rtx r5 = gen_rtx_REG (SImode, 5);
868 rtx r6 = gen_rtx_REG (SImode, 6);
870 entry_name = get_identifier (bytes & 4
872 : "__movmem_i4_even");
873 sym = function_symbol (IDENTIFIER_POINTER (entry_name));
874 func_addr_rtx = copy_to_mode_reg (Pmode, sym);
875 force_into (XEXP (operands[0], 0), r4);
876 force_into (XEXP (operands[1], 0), r5);
879 emit_insn (gen_move_insn (r6, GEN_INT (dwords - 1)));
880 emit_insn (gen_block_lump_real_i4 (func_addr_rtx));
892 rtx r4 = gen_rtx_REG (SImode, 4);
893 rtx r5 = gen_rtx_REG (SImode, 5);
895 sprintf (entry, "__movmemSI%d", bytes);
896 entry_name = get_identifier (entry);
897 sym = function_symbol (IDENTIFIER_POINTER (entry_name));
898 func_addr_rtx = copy_to_mode_reg (Pmode, sym);
899 force_into (XEXP (operands[0], 0), r4);
900 force_into (XEXP (operands[1], 0), r5);
901 emit_insn (gen_block_move_real (func_addr_rtx));
905 /* This is the same number of bytes as a memcpy call, but to a different
906 less common function name, so this will occasionally use more space. */
907 if (! TARGET_SMALLCODE)
912 int final_switch, while_loop;
913 rtx r4 = gen_rtx_REG (SImode, 4);
914 rtx r5 = gen_rtx_REG (SImode, 5);
915 rtx r6 = gen_rtx_REG (SImode, 6);
917 entry_name = get_identifier ("__movmem");
918 sym = function_symbol (IDENTIFIER_POINTER (entry_name));
919 func_addr_rtx = copy_to_mode_reg (Pmode, sym);
920 force_into (XEXP (operands[0], 0), r4);
921 force_into (XEXP (operands[1], 0), r5);
923 /* r6 controls the size of the move. 16 is decremented from it
924 for each 64 bytes moved. Then the negative bit left over is used
925 as an index into a list of move instructions. e.g., a 72 byte move
926 would be set up with size(r6) = 14, for one iteration through the
927 big while loop, and a switch of -2 for the last part. */
929 final_switch = 16 - ((bytes / 4) % 16);
930 while_loop = ((bytes / 4) / 16 - 1) * 16;
931 emit_insn (gen_move_insn (r6, GEN_INT (while_loop + final_switch)));
932 emit_insn (gen_block_lump_real (func_addr_rtx));
939 /* Prepare operands for a move define_expand; specifically, one of the
940 operands must be in a register. */
943 prepare_move_operands (rtx operands[], enum machine_mode mode)
945 if ((mode == SImode || mode == DImode)
947 && ! ((mode == Pmode || mode == ptr_mode)
948 && tls_symbolic_operand (operands[1], Pmode) != 0))
951 if (SYMBOLIC_CONST_P (operands[1]))
953 if (GET_CODE (operands[0]) == MEM)
954 operands[1] = force_reg (Pmode, operands[1]);
955 else if (TARGET_SHMEDIA
956 && GET_CODE (operands[1]) == LABEL_REF
957 && target_reg_operand (operands[0], mode))
961 temp = no_new_pseudos ? operands[0] : gen_reg_rtx (Pmode);
962 operands[1] = legitimize_pic_address (operands[1], mode, temp);
965 else if (GET_CODE (operands[1]) == CONST
966 && GET_CODE (XEXP (operands[1], 0)) == PLUS
967 && SYMBOLIC_CONST_P (XEXP (XEXP (operands[1], 0), 0)))
969 temp = no_new_pseudos ? operands[0] : gen_reg_rtx (Pmode);
970 temp = legitimize_pic_address (XEXP (XEXP (operands[1], 0), 0),
972 operands[1] = expand_binop (mode, add_optab, temp,
973 XEXP (XEXP (operands[1], 0), 1),
974 no_new_pseudos ? temp
975 : gen_reg_rtx (Pmode),
980 if (! reload_in_progress && ! reload_completed)
982 /* Copy the source to a register if both operands aren't registers. */
983 if (! register_operand (operands[0], mode)
984 && ! sh_register_operand (operands[1], mode))
985 operands[1] = copy_to_mode_reg (mode, operands[1]);
987 if (GET_CODE (operands[0]) == MEM && ! memory_operand (operands[0], mode))
989 /* This is like change_address_1 (operands[0], mode, 0, 1) ,
990 except that we can't use that function because it is static. */
991 rtx new = change_address (operands[0], mode, 0);
992 MEM_COPY_ATTRIBUTES (new, operands[0]);
996 /* This case can happen while generating code to move the result
997 of a library call to the target. Reject `st r0,@(rX,rY)' because
998 reload will fail to find a spill register for rX, since r0 is already
999 being used for the source. */
1000 else if (refers_to_regno_p (R0_REG, R0_REG + 1, operands[1], (rtx *)0)
1001 && GET_CODE (operands[0]) == MEM
1002 && GET_CODE (XEXP (operands[0], 0)) == PLUS
1003 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == REG)
1004 operands[1] = copy_to_mode_reg (mode, operands[1]);
1007 if (mode == Pmode || mode == ptr_mode)
1010 enum tls_model tls_kind;
1014 if ((tls_kind = tls_symbolic_operand (op1, Pmode)))
1016 rtx tga_op1, tga_ret, tmp, tmp2;
1021 case TLS_MODEL_GLOBAL_DYNAMIC:
1022 tga_ret = gen_rtx_REG (Pmode, R0_REG);
1023 emit_call_insn (gen_tls_global_dynamic (tga_ret, op1));
1027 case TLS_MODEL_LOCAL_DYNAMIC:
1028 tga_ret = gen_rtx_REG (Pmode, R0_REG);
1029 emit_call_insn (gen_tls_local_dynamic (tga_ret, op1));
1031 tmp = gen_reg_rtx (Pmode);
1032 emit_move_insn (tmp, tga_ret);
1034 if (register_operand (op0, Pmode))
1037 tmp2 = gen_reg_rtx (Pmode);
1039 emit_insn (gen_symDTPOFF2reg (tmp2, op1, tmp));
1043 case TLS_MODEL_INITIAL_EXEC:
1045 emit_insn (gen_GOTaddr2picreg ());
1046 tga_op1 = gen_reg_rtx (Pmode);
1047 tmp = gen_sym2GOTTPOFF (op1);
1048 emit_insn (gen_tls_initial_exec (tga_op1, tmp));
1052 case TLS_MODEL_LOCAL_EXEC:
1053 tmp2 = gen_reg_rtx (Pmode);
1054 emit_insn (gen_load_gbr (tmp2));
1055 tmp = gen_reg_rtx (Pmode);
1056 emit_insn (gen_symTPOFF2reg (tmp, op1));
1058 if (register_operand (op0, Pmode))
1061 op1 = gen_reg_rtx (Pmode);
1063 emit_insn (gen_addsi3 (op1, tmp, tmp2));
1076 /* Prepare the operands for an scc instruction; make sure that the
1077 compare has been done. */
1079 prepare_scc_operands (enum rtx_code code)
1081 rtx t_reg = gen_rtx_REG (SImode, T_REG);
1082 enum rtx_code oldcode = code;
1083 enum machine_mode mode;
1085 /* First need a compare insn. */
1089 /* It isn't possible to handle this case. */
1106 if (code != oldcode)
1108 rtx tmp = sh_compare_op0;
1109 sh_compare_op0 = sh_compare_op1;
1110 sh_compare_op1 = tmp;
1113 mode = GET_MODE (sh_compare_op0);
1114 if (mode == VOIDmode)
1115 mode = GET_MODE (sh_compare_op1);
1117 sh_compare_op0 = force_reg (mode, sh_compare_op0);
1118 if ((code != EQ && code != NE
1119 && (sh_compare_op1 != const0_rtx
1120 || code == GTU || code == GEU || code == LTU || code == LEU))
1121 || (mode == DImode && sh_compare_op1 != const0_rtx)
1122 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1123 sh_compare_op1 = force_reg (mode, sh_compare_op1);
1125 if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
1126 (mode == SFmode ? emit_sf_insn : emit_df_insn)
1127 (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
1128 gen_rtx_SET (VOIDmode, t_reg,
1129 gen_rtx_fmt_ee (code, SImode,
1130 sh_compare_op0, sh_compare_op1)),
1131 gen_rtx_USE (VOIDmode, get_fpscr_rtx ()))));
1133 emit_insn (gen_rtx_SET (VOIDmode, t_reg,
1134 gen_rtx_fmt_ee (code, SImode,
1135 sh_compare_op0, sh_compare_op1)));
1140 /* Called from the md file, set up the operands of a compare instruction. */
1143 from_compare (rtx *operands, int code)
1145 enum machine_mode mode = GET_MODE (sh_compare_op0);
1147 if (mode == VOIDmode)
1148 mode = GET_MODE (sh_compare_op1);
1151 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1153 /* Force args into regs, since we can't use constants here. */
1154 sh_compare_op0 = force_reg (mode, sh_compare_op0);
1155 if (sh_compare_op1 != const0_rtx
1156 || code == GTU || code == GEU
1157 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1158 sh_compare_op1 = force_reg (mode, sh_compare_op1);
1160 if (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT && code == GE)
1162 from_compare (operands, GT);
1163 insn = gen_ieee_ccmpeqsf_t (sh_compare_op0, sh_compare_op1);
1166 insn = gen_rtx_SET (VOIDmode,
1167 gen_rtx_REG (SImode, T_REG),
1168 gen_rtx_fmt_ee (code, SImode,
1169 sh_compare_op0, sh_compare_op1));
1170 if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
1172 insn = gen_rtx_PARALLEL (VOIDmode,
1174 gen_rtx_USE (VOIDmode, get_fpscr_rtx ())));
1175 (mode == SFmode ? emit_sf_insn : emit_df_insn) (insn);
1181 /* Functions to output assembly code. */
1183 /* Return a sequence of instructions to perform DI or DF move.
1185 Since the SH cannot move a DI or DF in one instruction, we have
1186 to take care when we see overlapping source and dest registers. */
1189 output_movedouble (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1190 enum machine_mode mode)
1192 rtx dst = operands[0];
1193 rtx src = operands[1];
1195 if (GET_CODE (dst) == MEM
1196 && GET_CODE (XEXP (dst, 0)) == PRE_DEC)
1197 return "mov.l %T1,%0\n\tmov.l %1,%0";
1199 if (register_operand (dst, mode)
1200 && register_operand (src, mode))
1202 if (REGNO (src) == MACH_REG)
1203 return "sts mach,%S0\n\tsts macl,%R0";
1205 /* When mov.d r1,r2 do r2->r3 then r1->r2;
1206 when mov.d r1,r0 do r1->r0 then r2->r1. */
1208 if (REGNO (src) + 1 == REGNO (dst))
1209 return "mov %T1,%T0\n\tmov %1,%0";
1211 return "mov %1,%0\n\tmov %T1,%T0";
1213 else if (GET_CODE (src) == CONST_INT)
1215 if (INTVAL (src) < 0)
1216 output_asm_insn ("mov #-1,%S0", operands);
1218 output_asm_insn ("mov #0,%S0", operands);
1220 return "mov %1,%R0";
1222 else if (GET_CODE (src) == MEM)
1225 int dreg = REGNO (dst);
1226 rtx inside = XEXP (src, 0);
1228 if (GET_CODE (inside) == REG)
1229 ptrreg = REGNO (inside);
1230 else if (GET_CODE (inside) == SUBREG)
1231 ptrreg = subreg_regno (inside);
1232 else if (GET_CODE (inside) == PLUS)
1234 ptrreg = REGNO (XEXP (inside, 0));
1235 /* ??? A r0+REG address shouldn't be possible here, because it isn't
1236 an offsettable address. Unfortunately, offsettable addresses use
1237 QImode to check the offset, and a QImode offsettable address
1238 requires r0 for the other operand, which is not currently
1239 supported, so we can't use the 'o' constraint.
1240 Thus we must check for and handle r0+REG addresses here.
1241 We punt for now, since this is likely very rare. */
1242 if (GET_CODE (XEXP (inside, 1)) == REG)
1245 else if (GET_CODE (inside) == LABEL_REF)
1246 return "mov.l %1,%0\n\tmov.l %1+4,%T0";
1247 else if (GET_CODE (inside) == POST_INC)
1248 return "mov.l %1,%0\n\tmov.l %1,%T0";
1252 /* Work out the safe way to copy. Copy into the second half first. */
1254 return "mov.l %T1,%T0\n\tmov.l %1,%0";
1257 return "mov.l %1,%0\n\tmov.l %T1,%T0";
1260 /* Print an instruction which would have gone into a delay slot after
1261 another instruction, but couldn't because the other instruction expanded
1262 into a sequence where putting the slot insn at the end wouldn't work. */
1265 print_slot (rtx insn)
1267 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file, optimize, 0, 1, NULL);
1269 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
1273 output_far_jump (rtx insn, rtx op)
1275 struct { rtx lab, reg, op; } this;
1276 rtx braf_base_lab = NULL_RTX;
1279 int offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn));
1282 this.lab = gen_label_rtx ();
1286 && offset - get_attr_length (insn) <= 32766)
1289 jump = "mov.w %O0,%1; braf %1";
1297 jump = "mov.l %O0,%1; braf %1";
1299 jump = "mov.l r0,@-r15; mova %O0,r0; mov.l @r0,%1; add r0,%1; mov.l @r15+,r0; jmp @%1";
1302 jump = "mov.l %O0,%1; jmp @%1";
1304 /* If we have a scratch register available, use it. */
1305 if (GET_CODE ((prev = prev_nonnote_insn (insn))) == INSN
1306 && INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
1308 this.reg = SET_DEST (XVECEXP (PATTERN (prev), 0, 0));
1309 if (REGNO (this.reg) == R0_REG && flag_pic && ! TARGET_SH2)
1310 jump = "mov.l r1,@-r15; mova %O0,r0; mov.l @r0,r1; add r1,r0; mov.l @r15+,r1; jmp @%1";
1311 output_asm_insn (jump, &this.lab);
1312 if (dbr_sequence_length ())
1313 print_slot (final_sequence);
1315 output_asm_insn ("nop", 0);
1319 /* Output the delay slot insn first if any. */
1320 if (dbr_sequence_length ())
1321 print_slot (final_sequence);
1323 this.reg = gen_rtx_REG (SImode, 13);
1324 /* We must keep the stack aligned to 8-byte boundaries on SH5.
1325 Fortunately, MACL is fixed and call-clobbered, and we never
1326 need its value across jumps, so save r13 in it instead of in
1329 output_asm_insn ("lds r13, macl", 0);
1331 output_asm_insn ("mov.l r13,@-r15", 0);
1332 output_asm_insn (jump, &this.lab);
1334 output_asm_insn ("sts macl, r13", 0);
1336 output_asm_insn ("mov.l @r15+,r13", 0);
1338 if (far && flag_pic && TARGET_SH2)
1340 braf_base_lab = gen_label_rtx ();
1341 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1342 CODE_LABEL_NUMBER (braf_base_lab));
1345 output_asm_insn (".align 2", 0);
1346 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (this.lab));
1348 if (far && flag_pic)
1351 this.lab = braf_base_lab;
1352 output_asm_insn (".long %O2-%O0", &this.lab);
1355 output_asm_insn (far ? ".long %O2" : ".word %O2-%O0", &this.lab);
1359 /* Local label counter, used for constants in the pool and inside
1360 pattern branches. */
1362 static int lf = 100;
1364 /* Output code for ordinary branches. */
1367 output_branch (int logic, rtx insn, rtx *operands)
1369 switch (get_attr_length (insn))
1372 /* This can happen if filling the delay slot has caused a forward
1373 branch to exceed its range (we could reverse it, but only
1374 when we know we won't overextend other branches; this should
1375 best be handled by relaxation).
1376 It can also happen when other condbranches hoist delay slot insn
1377 from their destination, thus leading to code size increase.
1378 But the branch will still be in the range -4092..+4098 bytes. */
1383 /* The call to print_slot will clobber the operands. */
1384 rtx op0 = operands[0];
1386 /* If the instruction in the delay slot is annulled (true), then
1387 there is no delay slot where we can put it now. The only safe
1388 place for it is after the label. final will do that by default. */
1391 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
1392 && get_attr_length (XVECEXP (final_sequence, 0, 1)))
1394 asm_fprintf (asm_out_file, "\tb%s%ss\t%LLF%d\n", logic ? "f" : "t",
1395 ASSEMBLER_DIALECT ? "/" : ".", label);
1396 print_slot (final_sequence);
1399 asm_fprintf (asm_out_file, "\tb%s\t%LLF%d\n", logic ? "f" : "t", label);
1401 output_asm_insn ("bra\t%l0", &op0);
1402 fprintf (asm_out_file, "\tnop\n");
1403 (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
1407 /* When relaxing, handle this like a short branch. The linker
1408 will fix it up if it still doesn't fit after relaxation. */
1410 return logic ? "bt%.\t%l0" : "bf%.\t%l0";
1412 /* These are for SH2e, in which we have to account for the
1413 extra nop because of the hardware bug in annulled branches. */
1420 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0)))
1422 asm_fprintf (asm_out_file, "b%s%ss\t%LLF%d\n",
1424 ASSEMBLER_DIALECT ? "/" : ".", label);
1425 fprintf (asm_out_file, "\tnop\n");
1426 output_asm_insn ("bra\t%l0", operands);
1427 fprintf (asm_out_file, "\tnop\n");
1428 (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
1432 /* When relaxing, fall through. */
1437 sprintf (buffer, "b%s%ss\t%%l0",
1439 ASSEMBLER_DIALECT ? "/" : ".");
1440 output_asm_insn (buffer, &operands[0]);
1445 /* There should be no longer branches now - that would
1446 indicate that something has destroyed the branches set
1447 up in machine_dependent_reorg. */
1453 output_branchy_insn (enum rtx_code code, const char *template,
1454 rtx insn, rtx *operands)
1456 rtx next_insn = NEXT_INSN (insn);
1458 if (next_insn && GET_CODE (next_insn) == JUMP_INSN && condjump_p (next_insn))
1460 rtx src = SET_SRC (PATTERN (next_insn));
1461 if (GET_CODE (src) == IF_THEN_ELSE && GET_CODE (XEXP (src, 0)) != code)
1463 /* Following branch not taken */
1464 operands[9] = gen_label_rtx ();
1465 emit_label_after (operands[9], next_insn);
1466 INSN_ADDRESSES_NEW (operands[9],
1467 INSN_ADDRESSES (INSN_UID (next_insn))
1468 + get_attr_length (next_insn));
1473 int offset = (branch_dest (next_insn)
1474 - INSN_ADDRESSES (INSN_UID (next_insn)) + 4);
1475 if (offset >= -252 && offset <= 258)
1477 if (GET_CODE (src) == IF_THEN_ELSE)
1479 src = XEXP (src, 1);
1485 operands[9] = gen_label_rtx ();
1486 emit_label_after (operands[9], insn);
1487 INSN_ADDRESSES_NEW (operands[9],
1488 INSN_ADDRESSES (INSN_UID (insn))
1489 + get_attr_length (insn));
1494 output_ieee_ccmpeq (rtx insn, rtx *operands)
1496 return output_branchy_insn (NE, "bt\t%l9\\;fcmp/eq\t%1,%0", insn, operands);
1499 /* Output the start of the assembler file. */
1502 sh_file_start (void)
1504 default_file_start ();
1507 /* Declare the .directive section before it is used. */
1508 fputs ("\t.section .directive, \"SM\", @progbits, 1\n", asm_out_file);
1509 fputs ("\t.asciz \"#<SYMEDIT>#\\n\"\n", asm_out_file);
1513 /* We need to show the text section with the proper
1514 attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
1515 emits it without attributes in TEXT_SECTION_ASM_OP, else GAS
1516 will complain. We can teach GAS specifically about the
1517 default attributes for our choice of text section, but
1518 then we would have to change GAS again if/when we change
1519 the text section name. */
1520 fprintf (asm_out_file, "%s\n", TEXT_SECTION_ASM_OP);
1522 /* Switch to the data section so that the coffsem symbol
1523 isn't in the text section. */
1526 if (TARGET_LITTLE_ENDIAN)
1527 fputs ("\t.little\n", asm_out_file);
1531 if (TARGET_SHCOMPACT)
1532 fputs ("\t.mode\tSHcompact\n", asm_out_file);
1533 else if (TARGET_SHMEDIA)
1534 fprintf (asm_out_file, "\t.mode\tSHmedia\n\t.abi\t%i\n",
1535 TARGET_SHMEDIA64 ? 64 : 32);
1539 /* Check if PAT includes UNSPEC_CALLER unspec pattern. */
1542 unspec_caller_rtx_p (rtx pat)
1544 switch (GET_CODE (pat))
1547 return unspec_caller_rtx_p (XEXP (pat, 0));
1550 if (unspec_caller_rtx_p (XEXP (pat, 0)))
1552 return unspec_caller_rtx_p (XEXP (pat, 1));
1554 if (XINT (pat, 1) == UNSPEC_CALLER)
1563 /* Indicate that INSN cannot be duplicated. This is true for insn
1564 that generates an unique label. */
1567 sh_cannot_copy_insn_p (rtx insn)
1571 if (!reload_completed || !flag_pic)
1574 if (GET_CODE (insn) != INSN)
1576 if (asm_noperands (insn) >= 0)
1579 pat = PATTERN (insn);
1580 if (GET_CODE (pat) != SET)
1582 pat = SET_SRC (pat);
1584 if (unspec_caller_rtx_p (pat))
1590 /* Actual number of instructions used to make a shift by N. */
1591 static const char ashiftrt_insns[] =
1592 { 0,1,2,3,4,5,8,8,8,8,8,8,8,8,8,8,2,3,4,5,8,8,8,8,8,8,8,8,8,8,8,2};
1594 /* Left shift and logical right shift are the same. */
1595 static const char shift_insns[] =
1596 { 0,1,1,2,2,3,3,4,1,2,2,3,3,4,3,3,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
1598 /* Individual shift amounts needed to get the above length sequences.
1599 One bit right shifts clobber the T bit, so when possible, put one bit
1600 shifts in the middle of the sequence, so the ends are eligible for
1601 branch delay slots. */
1602 static const short shift_amounts[32][5] = {
1603 {0}, {1}, {2}, {2, 1},
1604 {2, 2}, {2, 1, 2}, {2, 2, 2}, {2, 2, 1, 2},
1605 {8}, {8, 1}, {8, 2}, {8, 1, 2},
1606 {8, 2, 2}, {8, 2, 1, 2}, {8, -2, 8}, {8, -1, 8},
1607 {16}, {16, 1}, {16, 2}, {16, 1, 2},
1608 {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
1609 {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
1610 {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
1612 /* Likewise, but for shift amounts < 16, up to three highmost bits
1613 might be clobbered. This is typically used when combined with some
1614 kind of sign or zero extension. */
1616 static const char ext_shift_insns[] =
1617 { 0,1,1,2,2,3,2,2,1,2,2,3,3,3,2,2,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
1619 static const short ext_shift_amounts[32][4] = {
1620 {0}, {1}, {2}, {2, 1},
1621 {2, 2}, {2, 1, 2}, {8, -2}, {8, -1},
1622 {8}, {8, 1}, {8, 2}, {8, 1, 2},
1623 {8, 2, 2}, {16, -2, -1}, {16, -2}, {16, -1},
1624 {16}, {16, 1}, {16, 2}, {16, 1, 2},
1625 {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
1626 {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
1627 {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
1629 /* Assuming we have a value that has been sign-extended by at least one bit,
1630 can we use the ext_shift_amounts with the last shift turned to an arithmetic shift
1631 to shift it by N without data loss, and quicker than by other means? */
1632 #define EXT_SHIFT_SIGNED(n) (((n) | 8) == 15)
1634 /* This is used in length attributes in sh.md to help compute the length
1635 of arbitrary constant shift instructions. */
1638 shift_insns_rtx (rtx insn)
1640 rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
1641 int shift_count = INTVAL (XEXP (set_src, 1));
1642 enum rtx_code shift_code = GET_CODE (set_src);
1647 return ashiftrt_insns[shift_count];
1650 return shift_insns[shift_count];
1656 /* Return the cost of a shift. */
1666 if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
1668 if (GET_MODE (x) == DImode
1669 && GET_CODE (XEXP (x, 1)) == CONST_INT
1670 && INTVAL (XEXP (x, 1)) == 1)
1673 /* Everything else is invalid, because there is no pattern for it. */
1676 /* If shift by a non constant, then this will be expensive. */
1677 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
1678 return SH_DYNAMIC_SHIFT_COST;
1680 value = INTVAL (XEXP (x, 1));
1682 /* Otherwise, return the true cost in instructions. */
1683 if (GET_CODE (x) == ASHIFTRT)
1685 int cost = ashiftrt_insns[value];
1686 /* If SH3, then we put the constant in a reg and use shad. */
1687 if (cost > 1 + SH_DYNAMIC_SHIFT_COST)
1688 cost = 1 + SH_DYNAMIC_SHIFT_COST;
1692 return shift_insns[value];
1695 /* Return the cost of an AND operation. */
1702 /* Anding with a register is a single cycle and instruction. */
1703 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
1706 i = INTVAL (XEXP (x, 1));
1710 if ((GET_CODE (XEXP (x, 1)) == CONST_INT
1711 && CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1))))
1712 || EXTRA_CONSTRAINT_C16 (XEXP (x, 1)))
1718 /* These constants are single cycle extu.[bw] instructions. */
1719 if (i == 0xff || i == 0xffff)
1721 /* Constants that can be used in an and immediate instruction in a single
1722 cycle, but this requires r0, so make it a little more expensive. */
1723 if (CONST_OK_FOR_K08 (i))
1725 /* Constants that can be loaded with a mov immediate and an and.
1726 This case is probably unnecessary. */
1727 if (CONST_OK_FOR_I08 (i))
1729 /* Any other constants requires a 2 cycle pc-relative load plus an and.
1730 This case is probably unnecessary. */
1734 /* Return the cost of an addition or a subtraction. */
1739 /* Adding a register is a single cycle insn. */
1740 if (GET_CODE (XEXP (x, 1)) == REG
1741 || GET_CODE (XEXP (x, 1)) == SUBREG)
1744 /* Likewise for small constants. */
1745 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1746 && CONST_OK_FOR_ADD (INTVAL (XEXP (x, 1))))
1750 switch (GET_CODE (XEXP (x, 1)))
1755 return TARGET_SHMEDIA64 ? 5 : 3;
1758 if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1))))
1760 else if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1)) >> 16))
1762 else if (CONST_OK_FOR_I16 ((INTVAL (XEXP (x, 1)) >> 16) >> 16))
1770 /* Any other constant requires a 2 cycle pc-relative load plus an
1775 /* Return the cost of a multiply. */
1777 multcosts (rtx x ATTRIBUTE_UNUSED)
1784 /* We have a mul insn, so we can never take more than the mul and the
1785 read of the mac reg, but count more because of the latency and extra
1787 if (TARGET_SMALLCODE)
1792 /* If we're aiming at small code, then just count the number of
1793 insns in a multiply call sequence. */
1794 if (TARGET_SMALLCODE)
1797 /* Otherwise count all the insns in the routine we'd be calling too. */
1801 /* Compute a (partial) cost for rtx X. Return true if the complete
1802 cost has been computed, and false if subexpressions should be
1803 scanned. In either case, *TOTAL contains the cost result. */
1806 sh_rtx_costs (rtx x, int code, int outer_code, int *total)
1813 if (INTVAL (x) == 0)
1815 else if (outer_code == AND && and_operand ((x), DImode))
1817 else if ((outer_code == IOR || outer_code == XOR
1818 || outer_code == PLUS)
1819 && CONST_OK_FOR_I10 (INTVAL (x)))
1821 else if (CONST_OK_FOR_I16 (INTVAL (x)))
1822 *total = COSTS_N_INSNS (outer_code != SET);
1823 else if (CONST_OK_FOR_I16 (INTVAL (x) >> 16))
1824 *total = COSTS_N_INSNS (2);
1825 else if (CONST_OK_FOR_I16 ((INTVAL (x) >> 16) >> 16))
1826 *total = COSTS_N_INSNS (3);
1828 *total = COSTS_N_INSNS (4);
1831 if (CONST_OK_FOR_I08 (INTVAL (x)))
1833 else if ((outer_code == AND || outer_code == IOR || outer_code == XOR)
1834 && CONST_OK_FOR_K08 (INTVAL (x)))
1843 if (TARGET_SHMEDIA64)
1844 *total = COSTS_N_INSNS (4);
1845 else if (TARGET_SHMEDIA32)
1846 *total = COSTS_N_INSNS (2);
1853 *total = COSTS_N_INSNS (4);
1859 *total = COSTS_N_INSNS (addsubcosts (x));
1863 *total = COSTS_N_INSNS (andcosts (x));
1867 *total = COSTS_N_INSNS (multcosts (x));
1873 *total = COSTS_N_INSNS (shiftcosts (x));
1880 *total = COSTS_N_INSNS (20);
1893 /* Compute the cost of an address. For the SH, all valid addresses are
1894 the same cost. Use a slightly higher cost for reg + reg addressing,
1895 since it increases pressure on r0. */
1898 sh_address_cost (rtx X)
1900 return (GET_CODE (X) == PLUS
1901 && ! CONSTANT_P (XEXP (X, 1))
1902 && ! TARGET_SHMEDIA ? 1 : 0);
1905 /* Code to expand a shift. */
1908 gen_ashift (int type, int n, rtx reg)
1910 /* Negative values here come from the shift_amounts array. */
1923 emit_insn (gen_ashrsi3_k (reg, reg, GEN_INT (n)));
1927 emit_insn (gen_lshrsi3_m (reg, reg, GEN_INT (n)));
1929 emit_insn (gen_lshrsi3_k (reg, reg, GEN_INT (n)));
1932 emit_insn (gen_ashlsi3_std (reg, reg, GEN_INT (n)));
1937 /* Same for HImode */
1940 gen_ashift_hi (int type, int n, rtx reg)
1942 /* Negative values here come from the shift_amounts array. */
1956 /* We don't have HImode right shift operations because using the
1957 ordinary 32 bit shift instructions for that doesn't generate proper
1958 zero/sign extension.
1959 gen_ashift_hi is only called in contexts where we know that the
1960 sign extension works out correctly. */
1963 if (GET_CODE (reg) == SUBREG)
1965 offset = SUBREG_BYTE (reg);
1966 reg = SUBREG_REG (reg);
1968 gen_ashift (type, n, gen_rtx_SUBREG (SImode, reg, offset));
1972 emit_insn (gen_ashlhi3_k (reg, reg, GEN_INT (n)));
1977 /* Output RTL to split a constant shift into its component SH constant
1978 shift instructions. */
1981 gen_shifty_op (int code, rtx *operands)
1983 int value = INTVAL (operands[2]);
1986 /* Truncate the shift count in case it is out of bounds. */
1987 value = value & 0x1f;
1991 if (code == LSHIFTRT)
1993 emit_insn (gen_rotlsi3_1 (operands[0], operands[0]));
1994 emit_insn (gen_movt (operands[0]));
1997 else if (code == ASHIFT)
1999 /* There is a two instruction sequence for 31 bit left shifts,
2000 but it requires r0. */
2001 if (GET_CODE (operands[0]) == REG && REGNO (operands[0]) == 0)
2003 emit_insn (gen_andsi3 (operands[0], operands[0], const1_rtx));
2004 emit_insn (gen_rotlsi3_31 (operands[0], operands[0]));
2009 else if (value == 0)
2011 /* This can happen when not optimizing. We must output something here
2012 to prevent the compiler from aborting in final.c after the try_split
2014 emit_insn (gen_nop ());
2018 max = shift_insns[value];
2019 for (i = 0; i < max; i++)
2020 gen_ashift (code, shift_amounts[value][i], operands[0]);
2023 /* Same as above, but optimized for values where the topmost bits don't
2027 gen_shifty_hi_op (int code, rtx *operands)
2029 int value = INTVAL (operands[2]);
2031 void (*gen_fun) (int, int, rtx);
2033 /* This operation is used by and_shl for SImode values with a few
2034 high bits known to be cleared. */
2038 emit_insn (gen_nop ());
2042 gen_fun = GET_MODE (operands[0]) == HImode ? gen_ashift_hi : gen_ashift;
2045 max = ext_shift_insns[value];
2046 for (i = 0; i < max; i++)
2047 gen_fun (code, ext_shift_amounts[value][i], operands[0]);
2050 /* When shifting right, emit the shifts in reverse order, so that
2051 solitary negative values come first. */
2052 for (i = ext_shift_insns[value] - 1; i >= 0; i--)
2053 gen_fun (code, ext_shift_amounts[value][i], operands[0]);
2056 /* Output RTL for an arithmetic right shift. */
2058 /* ??? Rewrite to use super-optimizer sequences. */
2061 expand_ashiftrt (rtx *operands)
2071 if (GET_CODE (operands[2]) != CONST_INT)
2073 rtx count = copy_to_mode_reg (SImode, operands[2]);
2074 emit_insn (gen_negsi2 (count, count));
2075 emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
2078 else if (ashiftrt_insns[INTVAL (operands[2]) & 31]
2079 > 1 + SH_DYNAMIC_SHIFT_COST)
2082 = force_reg (SImode, GEN_INT (- (INTVAL (operands[2]) & 31)));
2083 emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
2087 if (GET_CODE (operands[2]) != CONST_INT)
2090 value = INTVAL (operands[2]) & 31;
2094 emit_insn (gen_ashrsi2_31 (operands[0], operands[1]));
2097 else if (value >= 16 && value <= 19)
2099 wrk = gen_reg_rtx (SImode);
2100 emit_insn (gen_ashrsi2_16 (wrk, operands[1]));
2103 gen_ashift (ASHIFTRT, 1, wrk);
2104 emit_move_insn (operands[0], wrk);
2107 /* Expand a short sequence inline, longer call a magic routine. */
2108 else if (value <= 5)
2110 wrk = gen_reg_rtx (SImode);
2111 emit_move_insn (wrk, operands[1]);
2113 gen_ashift (ASHIFTRT, 1, wrk);
2114 emit_move_insn (operands[0], wrk);
2118 wrk = gen_reg_rtx (Pmode);
2120 /* Load the value into an arg reg and call a helper. */
2121 emit_move_insn (gen_rtx_REG (SImode, 4), operands[1]);
2122 sprintf (func, "__ashiftrt_r4_%d", value);
2123 func_name = get_identifier (func);
2124 sym = function_symbol (IDENTIFIER_POINTER (func_name));
2125 emit_move_insn (wrk, sym);
2126 emit_insn (gen_ashrsi3_n (GEN_INT (value), wrk));
2127 emit_move_insn (operands[0], gen_rtx_REG (SImode, 4));
2132 sh_dynamicalize_shift_p (rtx count)
2134 return shift_insns[INTVAL (count)] > 1 + SH_DYNAMIC_SHIFT_COST;
2137 /* Try to find a good way to implement the combiner pattern
2138 [(set (match_operand:SI 0 "register_operand" "r")
2139 (and:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
2140 (match_operand:SI 2 "const_int_operand" "n"))
2141 (match_operand:SI 3 "const_int_operand" "n"))) .
2142 LEFT_RTX is operand 2 in the above pattern, and MASK_RTX is operand 3.
2143 return 0 for simple right / left or left/right shift combination.
2144 return 1 for a combination of shifts with zero_extend.
2145 return 2 for a combination of shifts with an AND that needs r0.
2146 return 3 for a combination of shifts with an AND that needs an extra
2147 scratch register, when the three highmost bits of the AND mask are clear.
2148 return 4 for a combination of shifts with an AND that needs an extra
2149 scratch register, when any of the three highmost bits of the AND mask
2151 If ATTRP is set, store an initial right shift width in ATTRP[0],
2152 and the instruction length in ATTRP[1] . These values are not valid
2154 When ATTRP is set and returning 1, ATTRP[2] gets set to the index into
2155 shift_amounts for the last shift value that is to be used before the
2158 shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
2160 unsigned HOST_WIDE_INT mask, lsb, mask2, lsb2;
2161 int left = INTVAL (left_rtx), right;
2163 int cost, best_cost = 10000;
2164 int best_right = 0, best_len = 0;
2168 if (left < 0 || left > 31)
2170 if (GET_CODE (mask_rtx) == CONST_INT)
2171 mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> left;
2173 mask = (unsigned HOST_WIDE_INT) GET_MODE_MASK (SImode) >> left;
2174 /* Can this be expressed as a right shift / left shift pair? */
2175 lsb = ((mask ^ (mask - 1)) >> 1) + 1;
2176 right = exact_log2 (lsb);
2177 mask2 = ~(mask + lsb - 1);
2178 lsb2 = ((mask2 ^ (mask2 - 1)) >> 1) + 1;
2179 /* mask has no zeroes but trailing zeroes <==> ! mask2 */
2181 best_cost = shift_insns[right] + shift_insns[right + left];
2182 /* mask has no trailing zeroes <==> ! right */
2183 else if (! right && mask2 == ~(lsb2 - 1))
2185 int late_right = exact_log2 (lsb2);
2186 best_cost = shift_insns[left + late_right] + shift_insns[late_right];
2188 /* Try to use zero extend. */
2189 if (mask2 == ~(lsb2 - 1))
2193 for (width = 8; width <= 16; width += 8)
2195 /* Can we zero-extend right away? */
2196 if (lsb2 == (unsigned HOST_WIDE_INT) 1 << width)
2199 = 1 + ext_shift_insns[right] + ext_shift_insns[left + right];
2200 if (cost < best_cost)
2211 /* ??? Could try to put zero extend into initial right shift,
2212 or even shift a bit left before the right shift. */
2213 /* Determine value of first part of left shift, to get to the
2214 zero extend cut-off point. */
2215 first = width - exact_log2 (lsb2) + right;
2216 if (first >= 0 && right + left - first >= 0)
2218 cost = ext_shift_insns[right] + ext_shift_insns[first] + 1
2219 + ext_shift_insns[right + left - first];
2220 if (cost < best_cost)
2232 /* Try to use r0 AND pattern */
2233 for (i = 0; i <= 2; i++)
2237 if (! CONST_OK_FOR_K08 (mask >> i))
2239 cost = (i != 0) + 2 + ext_shift_insns[left + i];
2240 if (cost < best_cost)
2245 best_len = cost - 1;
2248 /* Try to use a scratch register to hold the AND operand. */
2249 can_ext = ((mask << left) & ((unsigned HOST_WIDE_INT) 3 << 30)) == 0;
2250 for (i = 0; i <= 2; i++)
2254 cost = (i != 0) + (CONST_OK_FOR_I08 (mask >> i) ? 2 : 3)
2255 + (can_ext ? ext_shift_insns : shift_insns)[left + i];
2256 if (cost < best_cost)
2261 best_len = cost - 1 - ! CONST_OK_FOR_I08 (mask >> i);
2267 attrp[0] = best_right;
2268 attrp[1] = best_len;
2273 /* This is used in length attributes of the unnamed instructions
2274 corresponding to shl_and_kind return values of 1 and 2. */
2276 shl_and_length (rtx insn)
2278 rtx set_src, left_rtx, mask_rtx;
2281 set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2282 left_rtx = XEXP (XEXP (set_src, 0), 1);
2283 mask_rtx = XEXP (set_src, 1);
2284 shl_and_kind (left_rtx, mask_rtx, attributes);
2285 return attributes[1];
2288 /* This is used in length attribute of the and_shl_scratch instruction. */
2291 shl_and_scr_length (rtx insn)
2293 rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2294 int len = shift_insns[INTVAL (XEXP (set_src, 1))];
2295 rtx op = XEXP (set_src, 0);
2296 len += shift_insns[INTVAL (XEXP (op, 1))] + 1;
2297 op = XEXP (XEXP (op, 0), 0);
2298 return len + shift_insns[INTVAL (XEXP (op, 1))];
2301 /* Generate rtl for instructions for which shl_and_kind advised a particular
2302 method of generating them, i.e. returned zero. */
2305 gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
2308 unsigned HOST_WIDE_INT mask;
2309 int kind = shl_and_kind (left_rtx, mask_rtx, attributes);
2310 int right, total_shift;
2311 void (*shift_gen_fun) (int, rtx *) = gen_shifty_hi_op;
2313 right = attributes[0];
2314 total_shift = INTVAL (left_rtx) + right;
2315 mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> total_shift;
2322 int first = attributes[2];
2327 emit_insn ((mask << right) <= 0xff
2328 ? gen_zero_extendqisi2 (dest,
2329 gen_lowpart (QImode, source))
2330 : gen_zero_extendhisi2 (dest,
2331 gen_lowpart (HImode, source)));
2335 emit_insn (gen_movsi (dest, source));
2339 operands[2] = GEN_INT (right);
2340 gen_shifty_hi_op (LSHIFTRT, operands);
2344 operands[2] = GEN_INT (first);
2345 gen_shifty_hi_op (ASHIFT, operands);
2346 total_shift -= first;
2350 emit_insn (mask <= 0xff
2351 ? gen_zero_extendqisi2 (dest, gen_lowpart (QImode, dest))
2352 : gen_zero_extendhisi2 (dest, gen_lowpart (HImode, dest)));
2353 if (total_shift > 0)
2355 operands[2] = GEN_INT (total_shift);
2356 gen_shifty_hi_op (ASHIFT, operands);
2361 shift_gen_fun = gen_shifty_op;
2363 /* If the topmost bit that matters is set, set the topmost bits
2364 that don't matter. This way, we might be able to get a shorter
2366 if (mask & ((HOST_WIDE_INT) 1 << (31 - total_shift)))
2367 mask |= (HOST_WIDE_INT) ~0 << (31 - total_shift);
2369 /* Don't expand fine-grained when combining, because that will
2370 make the pattern fail. */
2371 if (currently_expanding_to_rtl
2372 || reload_in_progress || reload_completed)
2376 /* Cases 3 and 4 should be handled by this split
2377 only while combining */
2382 emit_insn (gen_lshrsi3 (dest, source, GEN_INT (right)));
2385 emit_insn (gen_andsi3 (dest, source, GEN_INT (mask)));
2390 operands[2] = GEN_INT (total_shift);
2391 shift_gen_fun (ASHIFT, operands);
2398 if (kind != 4 && total_shift < 16)
2400 neg = -ext_shift_amounts[total_shift][1];
2402 neg -= ext_shift_amounts[total_shift][2];
2406 emit_insn (gen_and_shl_scratch (dest, source,
2409 GEN_INT (total_shift + neg),
2411 emit_insn (gen_movsi (dest, dest));
2418 /* Try to find a good way to implement the combiner pattern
2419 [(set (match_operand:SI 0 "register_operand" "=r")
2420 (sign_extract:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
2421 (match_operand:SI 2 "const_int_operand" "n")
2422 (match_operand:SI 3 "const_int_operand" "n")
2424 (clobber (reg:SI T_REG))]
2425 LEFT_RTX is operand 2 in the above pattern, and SIZE_RTX is operand 3.
2426 return 0 for simple left / right shift combination.
2427 return 1 for left shift / 8 bit sign extend / left shift.
2428 return 2 for left shift / 16 bit sign extend / left shift.
2429 return 3 for left shift / 8 bit sign extend / shift / sign extend.
2430 return 4 for left shift / 16 bit sign extend / shift / sign extend.
2431 return 5 for left shift / 16 bit sign extend / right shift
2432 return 6 for < 8 bit sign extend / left shift.
2433 return 7 for < 8 bit sign extend / left shift / single right shift.
2434 If COSTP is nonzero, assign the calculated cost to *COSTP. */
2437 shl_sext_kind (rtx left_rtx, rtx size_rtx, int *costp)
2439 int left, size, insize, ext;
2440 int cost = 0, best_cost;
2443 left = INTVAL (left_rtx);
2444 size = INTVAL (size_rtx);
2445 insize = size - left;
2448 /* Default to left / right shift. */
2450 best_cost = shift_insns[32 - insize] + ashiftrt_insns[32 - size];
2453 /* 16 bit shift / sign extend / 16 bit shift */
2454 cost = shift_insns[16 - insize] + 1 + ashiftrt_insns[16 - size];
2455 /* If ashiftrt_insns[16 - size] is 8, this choice will be overridden
2456 below, by alternative 3 or something even better. */
2457 if (cost < best_cost)
2463 /* Try a plain sign extend between two shifts. */
2464 for (ext = 16; ext >= insize; ext -= 8)
2468 cost = ext_shift_insns[ext - insize] + 1 + shift_insns[size - ext];
2469 if (cost < best_cost)
2471 kind = ext / (unsigned) 8;
2475 /* Check if we can do a sloppy shift with a final signed shift
2476 restoring the sign. */
2477 if (EXT_SHIFT_SIGNED (size - ext))
2478 cost = ext_shift_insns[ext - insize] + ext_shift_insns[size - ext] + 1;
2479 /* If not, maybe it's still cheaper to do the second shift sloppy,
2480 and do a final sign extend? */
2481 else if (size <= 16)
2482 cost = ext_shift_insns[ext - insize] + 1
2483 + ext_shift_insns[size > ext ? size - ext : ext - size] + 1;
2486 if (cost < best_cost)
2488 kind = ext / (unsigned) 8 + 2;
2492 /* Check if we can sign extend in r0 */
2495 cost = 3 + shift_insns[left];
2496 if (cost < best_cost)
2501 /* Try the same with a final signed shift. */
2504 cost = 3 + ext_shift_insns[left + 1] + 1;
2505 if (cost < best_cost)
2514 /* Try to use a dynamic shift. */
2515 cost = shift_insns[32 - insize] + 1 + SH_DYNAMIC_SHIFT_COST;
2516 if (cost < best_cost)
2527 /* Function to be used in the length attribute of the instructions
2528 implementing this pattern. */
2531 shl_sext_length (rtx insn)
2533 rtx set_src, left_rtx, size_rtx;
2536 set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2537 left_rtx = XEXP (XEXP (set_src, 0), 1);
2538 size_rtx = XEXP (set_src, 1);
2539 shl_sext_kind (left_rtx, size_rtx, &cost);
2543 /* Generate rtl for this pattern */
2546 gen_shl_sext (rtx dest, rtx left_rtx, rtx size_rtx, rtx source)
2549 int left, size, insize, cost;
2552 kind = shl_sext_kind (left_rtx, size_rtx, &cost);
2553 left = INTVAL (left_rtx);
2554 size = INTVAL (size_rtx);
2555 insize = size - left;
2563 int ext = kind & 1 ? 8 : 16;
2564 int shift2 = size - ext;
2566 /* Don't expand fine-grained when combining, because that will
2567 make the pattern fail. */
2568 if (! currently_expanding_to_rtl
2569 && ! reload_in_progress && ! reload_completed)
2571 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
2572 emit_insn (gen_movsi (dest, source));
2576 emit_insn (gen_movsi (dest, source));
2580 operands[2] = GEN_INT (ext - insize);
2581 gen_shifty_hi_op (ASHIFT, operands);
2584 ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
2585 : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
2590 operands[2] = GEN_INT (shift2);
2591 gen_shifty_op (ASHIFT, operands);
2598 if (EXT_SHIFT_SIGNED (shift2))
2600 operands[2] = GEN_INT (shift2 + 1);
2601 gen_shifty_op (ASHIFT, operands);
2602 operands[2] = const1_rtx;
2603 gen_shifty_op (ASHIFTRT, operands);
2606 operands[2] = GEN_INT (shift2);
2607 gen_shifty_hi_op (ASHIFT, operands);
2611 operands[2] = GEN_INT (-shift2);
2612 gen_shifty_hi_op (LSHIFTRT, operands);
2614 emit_insn (size <= 8
2615 ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
2616 : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
2623 if (! currently_expanding_to_rtl
2624 && ! reload_in_progress && ! reload_completed)
2625 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
2629 operands[2] = GEN_INT (16 - insize);
2630 gen_shifty_hi_op (ASHIFT, operands);
2631 emit_insn (gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
2633 /* Don't use gen_ashrsi3 because it generates new pseudos. */
2635 gen_ashift (ASHIFTRT, 1, dest);
2640 /* Don't expand fine-grained when combining, because that will
2641 make the pattern fail. */
2642 if (! currently_expanding_to_rtl
2643 && ! reload_in_progress && ! reload_completed)
2645 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
2646 emit_insn (gen_movsi (dest, source));
2649 emit_insn (gen_andsi3 (dest, source, GEN_INT ((1 << insize) - 1)));
2650 emit_insn (gen_xorsi3 (dest, dest, GEN_INT (1 << (insize - 1))));
2651 emit_insn (gen_addsi3 (dest, dest, GEN_INT (-1 << (insize - 1))));
2653 operands[2] = kind == 7 ? GEN_INT (left + 1) : left_rtx;
2654 gen_shifty_op (ASHIFT, operands);
2656 emit_insn (gen_ashrsi3_k (dest, dest, const1_rtx));
2664 /* Prefix a symbol_ref name with "datalabel". */
2667 gen_datalabel_ref (rtx sym)
2669 if (GET_CODE (sym) == LABEL_REF)
2670 return gen_rtx_CONST (GET_MODE (sym),
2671 gen_rtx_UNSPEC (GET_MODE (sym),
2675 if (GET_CODE (sym) != SYMBOL_REF)
2682 /* The SH cannot load a large constant into a register, constants have to
2683 come from a pc relative load. The reference of a pc relative load
2684 instruction must be less than 1k infront of the instruction. This
2685 means that we often have to dump a constant inside a function, and
2686 generate code to branch around it.
2688 It is important to minimize this, since the branches will slow things
2689 down and make things bigger.
2691 Worst case code looks like:
2709 We fix this by performing a scan before scheduling, which notices which
2710 instructions need to have their operands fetched from the constant table
2711 and builds the table.
2715 scan, find an instruction which needs a pcrel move. Look forward, find the
2716 last barrier which is within MAX_COUNT bytes of the requirement.
2717 If there isn't one, make one. Process all the instructions between
2718 the find and the barrier.
2720 In the above example, we can tell that L3 is within 1k of L1, so
2721 the first move can be shrunk from the 3 insn+constant sequence into
2722 just 1 insn, and the constant moved to L3 to make:
2733 Then the second move becomes the target for the shortening process. */
2737 rtx value; /* Value in table. */
2738 rtx label; /* Label of value. */
2739 rtx wend; /* End of window. */
2740 enum machine_mode mode; /* Mode of value. */
2742 /* True if this constant is accessed as part of a post-increment
2743 sequence. Note that HImode constants are never accessed in this way. */
2744 bool part_of_sequence_p;
2747 /* The maximum number of constants that can fit into one pool, since
2748 the pc relative range is 0...1020 bytes and constants are at least 4
2751 #define MAX_POOL_SIZE (1020/4)
2752 static pool_node pool_vector[MAX_POOL_SIZE];
2753 static int pool_size;
2754 static rtx pool_window_label;
2755 static int pool_window_last;
2757 /* ??? If we need a constant in HImode which is the truncated value of a
2758 constant we need in SImode, we could combine the two entries thus saving
2759 two bytes. Is this common enough to be worth the effort of implementing
2762 /* ??? This stuff should be done at the same time that we shorten branches.
2763 As it is now, we must assume that all branches are the maximum size, and
2764 this causes us to almost always output constant pools sooner than
2767 /* Add a constant to the pool and return its label. */
2770 add_constant (rtx x, enum machine_mode mode, rtx last_value)
2773 rtx lab, new, ref, newref;
2775 /* First see if we've already got it. */
2776 for (i = 0; i < pool_size; i++)
2778 if (x->code == pool_vector[i].value->code
2779 && mode == pool_vector[i].mode)
2781 if (x->code == CODE_LABEL)
2783 if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
2786 if (rtx_equal_p (x, pool_vector[i].value))
2791 || ! rtx_equal_p (last_value, pool_vector[i-1].value))
2793 new = gen_label_rtx ();
2794 LABEL_REFS (new) = pool_vector[i].label;
2795 pool_vector[i].label = lab = new;
2797 if (lab && pool_window_label)
2799 newref = gen_rtx_LABEL_REF (VOIDmode, pool_window_label);
2800 ref = pool_vector[pool_window_last].wend;
2801 LABEL_NEXTREF (newref) = ref;
2802 pool_vector[pool_window_last].wend = newref;
2805 pool_window_label = new;
2806 pool_window_last = i;
2812 /* Need a new one. */
2813 pool_vector[pool_size].value = x;
2814 if (last_value && rtx_equal_p (last_value, pool_vector[pool_size - 1].value))
2817 pool_vector[pool_size - 1].part_of_sequence_p = true;
2820 lab = gen_label_rtx ();
2821 pool_vector[pool_size].mode = mode;
2822 pool_vector[pool_size].label = lab;
2823 pool_vector[pool_size].wend = NULL_RTX;
2824 pool_vector[pool_size].part_of_sequence_p = (lab == 0);
2825 if (lab && pool_window_label)
2827 newref = gen_rtx_LABEL_REF (VOIDmode, pool_window_label);
2828 ref = pool_vector[pool_window_last].wend;
2829 LABEL_NEXTREF (newref) = ref;
2830 pool_vector[pool_window_last].wend = newref;
2833 pool_window_label = lab;
2834 pool_window_last = pool_size;
2839 /* Output the literal table. START, if nonzero, is the first instruction
2840 this table is needed for, and also indicates that there is at least one
2841 casesi_worker_2 instruction; We have to emit the operand3 labels from
2842 these insns at a 4-byte aligned position. BARRIER is the barrier
2843 after which we are to place the table. */
2846 dump_table (rtx start, rtx barrier)
2854 /* Do two passes, first time dump out the HI sized constants. */
2856 for (i = 0; i < pool_size; i++)
2858 pool_node *p = &pool_vector[i];
2860 if (p->mode == HImode)
2864 scan = emit_insn_after (gen_align_2 (), scan);
2867 for (lab = p->label; lab; lab = LABEL_REFS (lab))
2868 scan = emit_label_after (lab, scan);
2869 scan = emit_insn_after (gen_consttable_2 (p->value, const0_rtx),
2871 for (ref = p->wend; ref; ref = LABEL_NEXTREF (ref))
2873 lab = XEXP (ref, 0);
2874 scan = emit_insn_after (gen_consttable_window_end (lab), scan);
2877 else if (p->mode == DFmode)
2885 scan = emit_insn_after (gen_align_4 (), scan);
2887 for (; start != barrier; start = NEXT_INSN (start))
2888 if (GET_CODE (start) == INSN
2889 && recog_memoized (start) == CODE_FOR_casesi_worker_2)
2891 rtx src = SET_SRC (XVECEXP (PATTERN (start), 0, 0));
2892 rtx lab = XEXP (XVECEXP (src, 0, 3), 0);
2894 scan = emit_label_after (lab, scan);
2897 if (TARGET_FMOVD && TARGET_ALIGN_DOUBLE && have_df)
2899 rtx align_insn = NULL_RTX;
2901 scan = emit_label_after (gen_label_rtx (), scan);
2902 scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
2905 for (i = 0; i < pool_size; i++)
2907 pool_node *p = &pool_vector[i];
2915 if (align_insn && !p->part_of_sequence_p)
2917 for (lab = p->label; lab; lab = LABEL_REFS (lab))
2918 emit_label_before (lab, align_insn);
2919 emit_insn_before (gen_consttable_4 (p->value, const0_rtx),
2921 for (ref = p->wend; ref; ref = LABEL_NEXTREF (ref))
2923 lab = XEXP (ref, 0);
2924 emit_insn_before (gen_consttable_window_end (lab),
2927 delete_insn (align_insn);
2928 align_insn = NULL_RTX;
2933 for (lab = p->label; lab; lab = LABEL_REFS (lab))
2934 scan = emit_label_after (lab, scan);
2935 scan = emit_insn_after (gen_consttable_4 (p->value,
2937 need_align = ! need_align;
2943 scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
2948 for (lab = p->label; lab; lab = LABEL_REFS (lab))
2949 scan = emit_label_after (lab, scan);
2950 scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
2958 if (p->mode != HImode)
2960 for (ref = p->wend; ref; ref = LABEL_NEXTREF (ref))
2962 lab = XEXP (ref, 0);
2963 scan = emit_insn_after (gen_consttable_window_end (lab),
2972 for (i = 0; i < pool_size; i++)
2974 pool_node *p = &pool_vector[i];
2985 scan = emit_label_after (gen_label_rtx (), scan);
2986 scan = emit_insn_after (gen_align_4 (), scan);
2988 for (lab = p->label; lab; lab = LABEL_REFS (lab))
2989 scan = emit_label_after (lab, scan);
2990 scan = emit_insn_after (gen_consttable_4 (p->value, const0_rtx),
2998 scan = emit_label_after (gen_label_rtx (), scan);
2999 scan = emit_insn_after (gen_align_4 (), scan);
3001 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3002 scan = emit_label_after (lab, scan);
3003 scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
3011 if (p->mode != HImode)
3013 for (ref = p->wend; ref; ref = LABEL_NEXTREF (ref))
3015 lab = XEXP (ref, 0);
3016 scan = emit_insn_after (gen_consttable_window_end (lab), scan);
3021 scan = emit_insn_after (gen_consttable_end (), scan);
3022 scan = emit_barrier_after (scan);
3024 pool_window_label = NULL_RTX;
3025 pool_window_last = 0;
3028 /* Return nonzero if constant would be an ok source for a
3029 mov.w instead of a mov.l. */
3034 return (GET_CODE (src) == CONST_INT
3035 && INTVAL (src) >= -32768
3036 && INTVAL (src) <= 32767);
3039 /* Nonzero if the insn is a move instruction which needs to be fixed. */
3041 /* ??? For a DImode/DFmode moves, we don't need to fix it if each half of the
3042 CONST_DOUBLE input value is CONST_OK_FOR_I08. For a SFmode move, we don't
3043 need to fix it if the input value is CONST_OK_FOR_I08. */
3046 broken_move (rtx insn)
3048 if (GET_CODE (insn) == INSN)
3050 rtx pat = PATTERN (insn);
3051 if (GET_CODE (pat) == PARALLEL)
3052 pat = XVECEXP (pat, 0, 0);
3053 if (GET_CODE (pat) == SET
3054 /* We can load any 8 bit value if we don't care what the high
3055 order bits end up as. */
3056 && GET_MODE (SET_DEST (pat)) != QImode
3057 && (CONSTANT_P (SET_SRC (pat))
3058 /* Match mova_const. */
3059 || (GET_CODE (SET_SRC (pat)) == UNSPEC
3060 && XINT (SET_SRC (pat), 1) == UNSPEC_MOVA
3061 && GET_CODE (XVECEXP (SET_SRC (pat), 0, 0)) == CONST))
3063 && GET_CODE (SET_SRC (pat)) == CONST_DOUBLE
3064 && (fp_zero_operand (SET_SRC (pat))
3065 || fp_one_operand (SET_SRC (pat)))
3066 /* ??? If this is a -m4 or -m4-single compilation, in general
3067 we don't know the current setting of fpscr, so disable fldi.
3068 There is an exception if this was a register-register move
3069 before reload - and hence it was ascertained that we have
3070 single precision setting - and in a post-reload optimization
3071 we changed this to do a constant load. In that case
3072 we don't have an r0 clobber, hence we must use fldi. */
3073 && (! TARGET_SH4 || TARGET_FMOVD
3074 || (GET_CODE (XEXP (XVECEXP (PATTERN (insn), 0, 2), 0))
3076 && GET_CODE (SET_DEST (pat)) == REG
3077 && FP_REGISTER_P (REGNO (SET_DEST (pat))))
3079 && GET_MODE (SET_DEST (pat)) == SImode
3080 && GET_CODE (SET_SRC (pat)) == CONST_INT
3081 && CONST_OK_FOR_I20 (INTVAL (SET_SRC (pat))))
3082 && (GET_CODE (SET_SRC (pat)) != CONST_INT
3083 || ! CONST_OK_FOR_I08 (INTVAL (SET_SRC (pat)))))
3093 return (GET_CODE (insn) == INSN
3094 && GET_CODE (PATTERN (insn)) == SET
3095 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
3096 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_MOVA
3097 /* Don't match mova_const. */
3098 && GET_CODE (XVECEXP (SET_SRC (PATTERN (insn)), 0, 0)) == LABEL_REF);
3101 /* Fix up a mova from a switch that went out of range. */
3103 fixup_mova (rtx mova)
3107 SET_SRC (PATTERN (mova)) = XVECEXP (SET_SRC (PATTERN (mova)), 0, 0);
3108 INSN_CODE (mova) = -1;
3113 rtx lab = gen_label_rtx ();
3114 rtx wpat, wpat0, wpat1, wsrc, diff;
3118 worker = NEXT_INSN (worker);
3120 || GET_CODE (worker) == CODE_LABEL
3121 || GET_CODE (worker) == JUMP_INSN)
3123 } while (recog_memoized (worker) != CODE_FOR_casesi_worker_1);
3124 wpat = PATTERN (worker);
3125 wpat0 = XVECEXP (wpat, 0, 0);
3126 wpat1 = XVECEXP (wpat, 0, 1);
3127 wsrc = SET_SRC (wpat0);
3128 PATTERN (worker) = (gen_casesi_worker_2
3129 (SET_DEST (wpat0), XVECEXP (wsrc, 0, 1),
3130 XEXP (XVECEXP (wsrc, 0, 2), 0), lab,
3132 INSN_CODE (worker) = -1;
3133 diff = gen_rtx_MINUS (Pmode, XVECEXP (SET_SRC (PATTERN (mova)), 0, 0),
3134 gen_rtx_LABEL_REF (Pmode, lab));
3135 diff = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, diff), UNSPEC_PIC);
3136 SET_SRC (PATTERN (mova)) = gen_rtx_CONST (Pmode, diff);
3137 INSN_CODE (mova) = -1;
3141 /* Find the last barrier from insn FROM which is close enough to hold the
3142 constant pool. If we can't find one, then create one near the end of
3146 find_barrier (int num_mova, rtx mova, rtx from)
3155 int leading_mova = num_mova;
3156 rtx barrier_before_mova = 0, found_barrier = 0, good_barrier = 0;
3160 /* For HImode: range is 510, add 4 because pc counts from address of
3161 second instruction after this one, subtract 2 for the jump instruction
3162 that we may need to emit before the table, subtract 2 for the instruction
3163 that fills the jump delay slot (in very rare cases, reorg will take an
3164 instruction from after the constant pool or will leave the delay slot
3165 empty). This gives 510.
3166 For SImode: range is 1020, add 4 because pc counts from address of
3167 second instruction after this one, subtract 2 in case pc is 2 byte
3168 aligned, subtract 2 for the jump instruction that we may need to emit
3169 before the table, subtract 2 for the instruction that fills the jump
3170 delay slot. This gives 1018. */
3172 /* The branch will always be shortened now that the reference address for
3173 forward branches is the successor address, thus we need no longer make
3174 adjustments to the [sh]i_limit for -O0. */
3179 while (from && count_si < si_limit && count_hi < hi_limit)
3181 int inc = get_attr_length (from);
3184 if (GET_CODE (from) == CODE_LABEL)
3187 new_align = 1 << label_to_alignment (from);
3188 else if (GET_CODE (prev_nonnote_insn (from)) == BARRIER)
3189 new_align = 1 << barrier_align (from);
3195 if (GET_CODE (from) == BARRIER)
3198 found_barrier = from;
3200 /* If we are at the end of the function, or in front of an alignment
3201 instruction, we need not insert an extra alignment. We prefer
3202 this kind of barrier. */
3203 if (barrier_align (from) > 2)
3204 good_barrier = from;
3207 if (broken_move (from))
3210 enum machine_mode mode;
3212 pat = PATTERN (from);
3213 if (GET_CODE (pat) == PARALLEL)
3214 pat = XVECEXP (pat, 0, 0);
3215 src = SET_SRC (pat);
3216 dst = SET_DEST (pat);
3217 mode = GET_MODE (dst);
3219 /* We must explicitly check the mode, because sometimes the
3220 front end will generate code to load unsigned constants into
3221 HImode targets without properly sign extending them. */
3223 || (mode == SImode && hi_const (src) && REGNO (dst) != FPUL_REG))
3226 /* We put the short constants before the long constants, so
3227 we must count the length of short constants in the range
3228 for the long constants. */
3229 /* ??? This isn't optimal, but is easy to do. */
3234 /* We dump DF/DI constants before SF/SI ones, because
3235 the limit is the same, but the alignment requirements
3236 are higher. We may waste up to 4 additional bytes
3237 for alignment, and the DF/DI constant may have
3238 another SF/SI constant placed before it. */
3239 if (TARGET_SHCOMPACT
3241 && (mode == DFmode || mode == DImode))
3246 while (si_align > 2 && found_si + si_align - 2 > count_si)
3248 if (found_si > count_si)
3249 count_si = found_si;
3250 found_si += GET_MODE_SIZE (mode);
3252 si_limit -= GET_MODE_SIZE (mode);
3255 /* See the code in machine_dependent_reorg, which has a similar if
3256 statement that generates a new mova insn in many cases. */
3257 if (GET_CODE (dst) == REG && FP_ANY_REGISTER_P (REGNO (dst)))
3267 barrier_before_mova = good_barrier ? good_barrier : found_barrier;
3269 if (found_si > count_si)
3270 count_si = found_si;
3272 else if (GET_CODE (from) == JUMP_INSN
3273 && (GET_CODE (PATTERN (from)) == ADDR_VEC
3274 || GET_CODE (PATTERN (from)) == ADDR_DIFF_VEC))
3278 if (barrier_align (next_real_insn (from)) == align_jumps_log)
3280 /* We have just passed the barrier in front of the
3281 ADDR_DIFF_VEC, which is stored in found_barrier. Since
3282 the ADDR_DIFF_VEC is accessed as data, just like our pool
3283 constants, this is a good opportunity to accommodate what
3284 we have gathered so far.
3285 If we waited any longer, we could end up at a barrier in
3286 front of code, which gives worse cache usage for separated
3287 instruction / data caches. */
3288 good_barrier = found_barrier;
3293 rtx body = PATTERN (from);
3294 inc = XVECLEN (body, 1) * GET_MODE_SIZE (GET_MODE (body));
3297 /* For the SH1, we generate alignments even after jumps-around-jumps. */
3298 else if (GET_CODE (from) == JUMP_INSN
3300 && ! TARGET_SMALLCODE)
3306 if (new_align > si_align)
3308 si_limit -= (count_si - 1) & (new_align - si_align);
3309 si_align = new_align;
3311 count_si = (count_si + new_align - 1) & -new_align;
3316 if (new_align > hi_align)
3318 hi_limit -= (count_hi - 1) & (new_align - hi_align);
3319 hi_align = new_align;
3321 count_hi = (count_hi + new_align - 1) & -new_align;
3323 from = NEXT_INSN (from);
3330 /* Try as we might, the leading mova is out of range. Change
3331 it into a load (which will become a pcload) and retry. */
3333 return find_barrier (0, 0, mova);
3337 /* Insert the constant pool table before the mova instruction,
3338 to prevent the mova label reference from going out of range. */
3340 good_barrier = found_barrier = barrier_before_mova;
3346 if (good_barrier && next_real_insn (found_barrier))
3347 found_barrier = good_barrier;
3351 /* We didn't find a barrier in time to dump our stuff,
3352 so we'll make one. */
3353 rtx label = gen_label_rtx ();
3355 /* If we exceeded the range, then we must back up over the last
3356 instruction we looked at. Otherwise, we just need to undo the
3357 NEXT_INSN at the end of the loop. */
3358 if (count_hi > hi_limit || count_si > si_limit)
3359 from = PREV_INSN (PREV_INSN (from));
3361 from = PREV_INSN (from);
3363 /* Walk back to be just before any jump or label.
3364 Putting it before a label reduces the number of times the branch
3365 around the constant pool table will be hit. Putting it before
3366 a jump makes it more likely that the bra delay slot will be
3368 while (GET_CODE (from) == JUMP_INSN || GET_CODE (from) == NOTE
3369 || GET_CODE (from) == CODE_LABEL)
3370 from = PREV_INSN (from);
3372 from = emit_jump_insn_after (gen_jump (label), from);
3373 JUMP_LABEL (from) = label;
3374 LABEL_NUSES (label) = 1;
3375 found_barrier = emit_barrier_after (from);
3376 emit_label_after (label, found_barrier);
3379 return found_barrier;
3382 /* If the instruction INSN is implemented by a special function, and we can
3383 positively find the register that is used to call the sfunc, and this
3384 register is not used anywhere else in this instruction - except as the
3385 destination of a set, return this register; else, return 0. */
3387 sfunc_uses_reg (rtx insn)
3390 rtx pattern, part, reg_part, reg;
3392 if (GET_CODE (insn) != INSN)
3394 pattern = PATTERN (insn);
3395 if (GET_CODE (pattern) != PARALLEL || get_attr_type (insn) != TYPE_SFUNC)
3398 for (reg_part = 0, i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
3400 part = XVECEXP (pattern, 0, i);
3401 if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == SImode)
3406 reg = XEXP (reg_part, 0);
3407 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
3409 part = XVECEXP (pattern, 0, i);
3410 if (part == reg_part || GET_CODE (part) == CLOBBER)
3412 if (reg_mentioned_p (reg, ((GET_CODE (part) == SET
3413 && GET_CODE (SET_DEST (part)) == REG)
3414 ? SET_SRC (part) : part)))
3420 /* See if the only way in which INSN uses REG is by calling it, or by
3421 setting it while calling it. Set *SET to a SET rtx if the register
3425 noncall_uses_reg (rtx reg, rtx insn, rtx *set)
3431 reg2 = sfunc_uses_reg (insn);
3432 if (reg2 && REGNO (reg2) == REGNO (reg))
3434 pattern = single_set (insn);
3436 && GET_CODE (SET_DEST (pattern)) == REG
3437 && REGNO (reg) == REGNO (SET_DEST (pattern)))
3441 if (GET_CODE (insn) != CALL_INSN)
3443 /* We don't use rtx_equal_p because we don't care if the mode is
3445 pattern = single_set (insn);
3447 && GET_CODE (SET_DEST (pattern)) == REG
3448 && REGNO (reg) == REGNO (SET_DEST (pattern)))
3454 par = PATTERN (insn);
3455 if (GET_CODE (par) == PARALLEL)
3456 for (i = XVECLEN (par, 0) - 1; i >= 0; i--)
3458 part = XVECEXP (par, 0, i);
3459 if (GET_CODE (part) != SET && reg_mentioned_p (reg, part))
3462 return reg_mentioned_p (reg, SET_SRC (pattern));
3468 pattern = PATTERN (insn);
3470 if (GET_CODE (pattern) == PARALLEL)
3474 for (i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
3475 if (reg_mentioned_p (reg, XVECEXP (pattern, 0, i)))
3477 pattern = XVECEXP (pattern, 0, 0);
3480 if (GET_CODE (pattern) == SET)
3482 if (reg_mentioned_p (reg, SET_DEST (pattern)))
3484 /* We don't use rtx_equal_p, because we don't care if the
3485 mode is different. */
3486 if (GET_CODE (SET_DEST (pattern)) != REG
3487 || REGNO (reg) != REGNO (SET_DEST (pattern)))
3493 pattern = SET_SRC (pattern);
3496 if (GET_CODE (pattern) != CALL
3497 || GET_CODE (XEXP (pattern, 0)) != MEM
3498 || ! rtx_equal_p (reg, XEXP (XEXP (pattern, 0), 0)))
3504 /* Given a X, a pattern of an insn or a part of it, return a mask of used
3505 general registers. Bits 0..15 mean that the respective registers
3506 are used as inputs in the instruction. Bits 16..31 mean that the
3507 registers 0..15, respectively, are used as outputs, or are clobbered.
3508 IS_DEST should be set to 16 if X is the destination of a SET, else to 0. */
3510 regs_used (rtx x, int is_dest)
3518 code = GET_CODE (x);
3523 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
3524 << (REGNO (x) + is_dest));
3528 rtx y = SUBREG_REG (x);
3530 if (GET_CODE (y) != REG)
3533 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
3535 subreg_regno_offset (REGNO (y),
3538 GET_MODE (x)) + is_dest));
3542 return regs_used (SET_SRC (x), 0) | regs_used (SET_DEST (x), 16);
3544 /* If there was a return value, it must have been indicated with USE. */
3559 fmt = GET_RTX_FORMAT (code);
3561 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3566 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3567 used |= regs_used (XVECEXP (x, i, j), is_dest);
3569 else if (fmt[i] == 'e')
3570 used |= regs_used (XEXP (x, i), is_dest);
3575 /* Create an instruction that prevents redirection of a conditional branch
3576 to the destination of the JUMP with address ADDR.
3577 If the branch needs to be implemented as an indirect jump, try to find
3578 a scratch register for it.
3579 If NEED_BLOCK is 0, don't do anything unless we need a scratch register.
3580 If any preceding insn that doesn't fit into a delay slot is good enough,
3581 pass 1. Pass 2 if a definite blocking insn is needed.
3582 -1 is used internally to avoid deep recursion.
3583 If a blocking instruction is made or recognized, return it. */
3586 gen_block_redirect (rtx jump, int addr, int need_block)
3589 rtx prev = prev_nonnote_insn (jump);
3592 /* First, check if we already have an instruction that satisfies our need. */
3593 if (prev && GET_CODE (prev) == INSN && ! INSN_DELETED_P (prev))
3595 if (INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
3597 if (GET_CODE (PATTERN (prev)) == USE
3598 || GET_CODE (PATTERN (prev)) == CLOBBER
3599 || get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
3601 else if ((need_block &= ~1) < 0)
3603 else if (recog_memoized (prev) == CODE_FOR_block_branch_redirect)
3606 if (GET_CODE (PATTERN (jump)) == RETURN)
3610 /* Reorg even does nasty things with return insns that cause branches
3611 to go out of range - see find_end_label and callers. */
3612 return emit_insn_before (gen_block_branch_redirect (const0_rtx) , jump);
3614 /* We can't use JUMP_LABEL here because it might be undefined
3615 when not optimizing. */
3616 dest = XEXP (SET_SRC (PATTERN (jump)), 0);
3617 /* If the branch is out of range, try to find a scratch register for it. */
3619 && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
3623 /* Don't look for the stack pointer as a scratch register,
3624 it would cause trouble if an interrupt occurred. */
3625 unsigned try = 0x7fff, used;
3626 int jump_left = flag_expensive_optimizations + 1;
3628 /* It is likely that the most recent eligible instruction is wanted for
3629 the delay slot. Therefore, find out which registers it uses, and
3630 try to avoid using them. */
3632 for (scan = jump; (scan = PREV_INSN (scan)); )
3636 if (INSN_DELETED_P (scan))
3638 code = GET_CODE (scan);
3639 if (code == CODE_LABEL || code == JUMP_INSN)
3642 && GET_CODE (PATTERN (scan)) != USE
3643 && GET_CODE (PATTERN (scan)) != CLOBBER
3644 && get_attr_in_delay_slot (scan) == IN_DELAY_SLOT_YES)
3646 try &= ~regs_used (PATTERN (scan), 0);
3650 for (used = dead = 0, scan = JUMP_LABEL (jump);
3651 (scan = NEXT_INSN (scan)); )
3655 if (INSN_DELETED_P (scan))
3657 code = GET_CODE (scan);
3660 used |= regs_used (PATTERN (scan), 0);
3661 if (code == CALL_INSN)
3662 used |= regs_used (CALL_INSN_FUNCTION_USAGE (scan), 0);
3663 dead |= (used >> 16) & ~used;
3669 if (code == JUMP_INSN)
3671 if (jump_left-- && simplejump_p (scan))
3672 scan = JUMP_LABEL (scan);
3678 /* Mask out the stack pointer again, in case it was
3679 the only 'free' register we have found. */
3682 /* If the immediate destination is still in range, check for possible
3683 threading with a jump beyond the delay slot insn.
3684 Don't check if we are called recursively; the jump has been or will be
3685 checked in a different invocation then. */
3687 else if (optimize && need_block >= 0)
3689 rtx next = next_active_insn (next_active_insn (dest));
3690 if (next && GET_CODE (next) == JUMP_INSN
3691 && GET_CODE (PATTERN (next)) == SET
3692 && recog_memoized (next) == CODE_FOR_jump_compact)
3694 dest = JUMP_LABEL (next);
3696 && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
3698 gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), -1);
3704 rtx reg = gen_rtx_REG (SImode, exact_log2 (dead & -dead));
3706 /* It would be nice if we could convert the jump into an indirect
3707 jump / far branch right now, and thus exposing all constituent
3708 instructions to further optimization. However, reorg uses
3709 simplejump_p to determine if there is an unconditional jump where
3710 it should try to schedule instructions from the target of the
3711 branch; simplejump_p fails for indirect jumps even if they have
3713 rtx insn = emit_insn_before (gen_indirect_jump_scratch
3714 (reg, GEN_INT (INSN_UID (JUMP_LABEL (jump))))
3716 /* ??? We would like this to have the scope of the jump, but that
3717 scope will change when a delay slot insn of an inner scope is added.
3718 Hence, after delay slot scheduling, we'll have to expect
3719 NOTE_INSN_BLOCK_END notes between the indirect_jump_scratch and
3722 INSN_LOCATOR (insn) = INSN_LOCATOR (jump);
3723 INSN_CODE (insn) = CODE_FOR_indirect_jump_scratch;
3726 else if (need_block)
3727 /* We can't use JUMP_LABEL here because it might be undefined
3728 when not optimizing. */
3729 return emit_insn_before (gen_block_branch_redirect
3730 (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))))
3735 #define CONDJUMP_MIN -252
3736 #define CONDJUMP_MAX 262
3739 /* A label (to be placed) in front of the jump
3740 that jumps to our ultimate destination. */
3742 /* Where we are going to insert it if we cannot move the jump any farther,
3743 or the jump itself if we have picked up an existing jump. */
3745 /* The ultimate destination. */
3747 struct far_branch *prev;
3748 /* If the branch has already been created, its address;
3749 else the address of its first prospective user. */
3753 static void gen_far_branch (struct far_branch *);
3754 enum mdep_reorg_phase_e mdep_reorg_phase;
3756 gen_far_branch (struct far_branch *bp)
3758 rtx insn = bp->insert_place;
3760 rtx label = gen_label_rtx ();
3762 emit_label_after (label, insn);
3765 jump = emit_jump_insn_after (gen_jump (bp->far_label), insn);
3766 LABEL_NUSES (bp->far_label)++;
3769 jump = emit_jump_insn_after (gen_return (), insn);
3770 /* Emit a barrier so that reorg knows that any following instructions
3771 are not reachable via a fall-through path.
3772 But don't do this when not optimizing, since we wouldn't suppress the
3773 alignment for the barrier then, and could end up with out-of-range
3774 pc-relative loads. */
3776 emit_barrier_after (jump);
3777 emit_label_after (bp->near_label, insn);
3778 JUMP_LABEL (jump) = bp->far_label;
3779 if (! invert_jump (insn, label, 1))
3781 /* If we are branching around a jump (rather than a return), prevent
3782 reorg from using an insn from the jump target as the delay slot insn -
3783 when reorg did this, it pessimized code (we rather hide the delay slot)
3784 and it could cause branches to go out of range. */
3787 (gen_stuff_delay_slot
3788 (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))),
3789 GEN_INT (recog_memoized (insn) == CODE_FOR_branch_false)),
3791 /* Prevent reorg from undoing our splits. */
3792 gen_block_redirect (jump, bp->address += 2, 2);
3795 /* Fix up ADDR_DIFF_VECs. */
3797 fixup_addr_diff_vecs (rtx first)
3801 for (insn = first; insn; insn = NEXT_INSN (insn))
3803 rtx vec_lab, pat, prev, prevpat, x, braf_label;
3805 if (GET_CODE (insn) != JUMP_INSN
3806 || GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
3808 pat = PATTERN (insn);
3809 vec_lab = XEXP (XEXP (pat, 0), 0);
3811 /* Search the matching casesi_jump_2. */
3812 for (prev = vec_lab; ; prev = PREV_INSN (prev))
3814 if (GET_CODE (prev) != JUMP_INSN)
3816 prevpat = PATTERN (prev);
3817 if (GET_CODE (prevpat) != PARALLEL || XVECLEN (prevpat, 0) != 2)
3819 x = XVECEXP (prevpat, 0, 1);
3820 if (GET_CODE (x) != USE)
3823 if (GET_CODE (x) == LABEL_REF && XEXP (x, 0) == vec_lab)
3826 /* FIXME: This is a bug in the optimizer, but it seems harmless
3827 to just avoid panicing. */
3831 /* Emit the reference label of the braf where it belongs, right after
3832 the casesi_jump_2 (i.e. braf). */
3833 braf_label = XEXP (XEXP (SET_SRC (XVECEXP (prevpat, 0, 0)), 1), 0);
3834 emit_label_after (braf_label, prev);
3836 /* Fix up the ADDR_DIF_VEC to be relative
3837 to the reference address of the braf. */
3838 XEXP (XEXP (pat, 0), 0) = braf_label;
3842 /* BARRIER_OR_LABEL is either a BARRIER or a CODE_LABEL immediately following
3843 a barrier. Return the base 2 logarithm of the desired alignment. */
3845 barrier_align (rtx barrier_or_label)
3847 rtx next = next_real_insn (barrier_or_label), pat, prev;
3848 int slot, credit, jump_to_next = 0;
3853 pat = PATTERN (next);
3855 if (GET_CODE (pat) == ADDR_DIFF_VEC)
3858 if (GET_CODE (pat) == UNSPEC_VOLATILE && XINT (pat, 1) == UNSPECV_ALIGN)
3859 /* This is a barrier in front of a constant table. */
3862 prev = prev_real_insn (barrier_or_label);
3863 if (GET_CODE (PATTERN (prev)) == ADDR_DIFF_VEC)
3865 pat = PATTERN (prev);
3866 /* If this is a very small table, we want to keep the alignment after
3867 the table to the minimum for proper code alignment. */
3868 return ((TARGET_SMALLCODE
3869 || ((unsigned) XVECLEN (pat, 1) * GET_MODE_SIZE (GET_MODE (pat))
3870 <= (unsigned) 1 << (CACHE_LOG - 2)))
3871 ? 1 << TARGET_SHMEDIA : align_jumps_log);
3874 if (TARGET_SMALLCODE)
3877 if (! TARGET_SH2 || ! optimize)
3878 return align_jumps_log;
3880 /* When fixing up pcloads, a constant table might be inserted just before
3881 the basic block that ends with the barrier. Thus, we can't trust the
3882 instruction lengths before that. */
3883 if (mdep_reorg_phase > SH_FIXUP_PCLOAD)
3885 /* Check if there is an immediately preceding branch to the insn beyond
3886 the barrier. We must weight the cost of discarding useful information
3887 from the current cache line when executing this branch and there is
3888 an alignment, against that of fetching unneeded insn in front of the
3889 branch target when there is no alignment. */
3891 /* There are two delay_slot cases to consider. One is the simple case
3892 where the preceding branch is to the insn beyond the barrier (simple
3893 delay slot filling), and the other is where the preceding branch has
3894 a delay slot that is a duplicate of the insn after the barrier
3895 (fill_eager_delay_slots) and the branch is to the insn after the insn
3896 after the barrier. */
3898 /* PREV is presumed to be the JUMP_INSN for the barrier under
3899 investigation. Skip to the insn before it. */
3900 prev = prev_real_insn (prev);
3902 for (slot = 2, credit = (1 << (CACHE_LOG - 2)) + 2;
3903 credit >= 0 && prev && GET_CODE (prev) == INSN;
3904 prev = prev_real_insn (prev))
3907 if (GET_CODE (PATTERN (prev)) == USE
3908 || GET_CODE (PATTERN (prev)) == CLOBBER)
3910 if (GET_CODE (PATTERN (prev)) == SEQUENCE)
3912 prev = XVECEXP (PATTERN (prev), 0, 1);
3913 if (INSN_UID (prev) == INSN_UID (next))
3915 /* Delay slot was filled with insn at jump target. */
3922 get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
3924 credit -= get_attr_length (prev);
3927 && GET_CODE (prev) == JUMP_INSN
3928 && JUMP_LABEL (prev))
3932 || next_real_insn (JUMP_LABEL (prev)) == next
3933 /* If relax_delay_slots() decides NEXT was redundant
3934 with some previous instruction, it will have
3935 redirected PREV's jump to the following insn. */
3936 || JUMP_LABEL (prev) == next_nonnote_insn (next)
3937 /* There is no upper bound on redundant instructions
3938 that might have been skipped, but we must not put an
3939 alignment where none had been before. */
3940 || (x = (NEXT_INSN (NEXT_INSN (PREV_INSN (prev)))),
3942 && (INSN_CODE (x) == CODE_FOR_block_branch_redirect
3943 || INSN_CODE (x) == CODE_FOR_indirect_jump_scratch
3944 || INSN_CODE (x) == CODE_FOR_stuff_delay_slot))))
3946 rtx pat = PATTERN (prev);
3947 if (GET_CODE (pat) == PARALLEL)
3948 pat = XVECEXP (pat, 0, 0);
3949 if (credit - slot >= (GET_CODE (SET_SRC (pat)) == PC ? 2 : 0))
3955 return align_jumps_log;
3958 /* If we are inside a phony loop, almost any kind of label can turn up as the
3959 first one in the loop. Aligning a braf label causes incorrect switch
3960 destination addresses; we can detect braf labels because they are
3961 followed by a BARRIER.
3962 Applying loop alignment to small constant or switch tables is a waste
3963 of space, so we suppress this too. */
3965 sh_loop_align (rtx label)
3970 next = next_nonnote_insn (next);
3971 while (next && GET_CODE (next) == CODE_LABEL);
3975 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC
3976 || recog_memoized (next) == CODE_FOR_consttable_2)
3979 return align_loops_log;
3982 /* Do a final pass over the function, just before delayed branch
3988 rtx first, insn, mova = NULL_RTX;
3990 rtx r0_rtx = gen_rtx_REG (Pmode, 0);
3991 rtx r0_inc_rtx = gen_rtx_POST_INC (Pmode, r0_rtx);
3993 first = get_insns ();
3995 /* We must split call insns before introducing `mova's. If we're
3996 optimizing, they'll have already been split. Otherwise, make
3997 sure we don't split them too late. */
3999 split_all_insns_noflow ();
4004 /* If relaxing, generate pseudo-ops to associate function calls with
4005 the symbols they call. It does no harm to not generate these
4006 pseudo-ops. However, when we can generate them, it enables to
4007 linker to potentially relax the jsr to a bsr, and eliminate the
4008 register load and, possibly, the constant pool entry. */
4010 mdep_reorg_phase = SH_INSERT_USES_LABELS;
4013 /* Remove all REG_LABEL notes. We want to use them for our own
4014 purposes. This works because none of the remaining passes
4015 need to look at them.
4017 ??? But it may break in the future. We should use a machine
4018 dependent REG_NOTE, or some other approach entirely. */
4019 for (insn = first; insn; insn = NEXT_INSN (insn))
4025 while ((note = find_reg_note (insn, REG_LABEL, NULL_RTX)) != 0)
4026 remove_note (insn, note);
4030 for (insn = first; insn; insn = NEXT_INSN (insn))
4032 rtx pattern, reg, link, set, scan, dies, label;
4033 int rescan = 0, foundinsn = 0;
4035 if (GET_CODE (insn) == CALL_INSN)
4037 pattern = PATTERN (insn);
4039 if (GET_CODE (pattern) == PARALLEL)
4040 pattern = XVECEXP (pattern, 0, 0);
4041 if (GET_CODE (pattern) == SET)
4042 pattern = SET_SRC (pattern);
4044 if (GET_CODE (pattern) != CALL