Copyright (C) 1993, 1994, 1995, 1997, 1997, 1998, 1999, 2000, 2001, 2002,
2003, 2004 Free Software Foundation, Inc.
Contributed by Steve Chamberlain (sac@cygnus.com).
Copyright (C) 1993, 1994, 1995, 1997, 1997, 1998, 1999, 2000, 2001, 2002,
2003, 2004 Free Software Foundation, Inc.
Contributed by Steve Chamberlain (sac@cygnus.com).
static int sh_reorder2 (FILE *, int, rtx *, int *, int);
static void sh_md_init (FILE *, int, int);
static int sh_variable_issue (FILE *, int, rtx, int);
static int sh_reorder2 (FILE *, int, rtx *, int *, int);
static void sh_md_init (FILE *, int, int);
static int sh_variable_issue (FILE *, int, rtx, int);
static bool sh_function_ok_for_sibcall (tree, tree);
static bool sh_cannot_modify_jumps_p (void);
static bool sh_function_ok_for_sibcall (tree, tree);
static bool sh_cannot_modify_jumps_p (void);
/* The next 5 hooks have been implemented for reenabling sched1. With the
help of these macros we are limiting the movement of insns in sched1 to
/* The next 5 hooks have been implemented for reenabling sched1. With the
help of these macros we are limiting the movement of insns in sched1 to
and SFmode regs required by already scheduled insns. When these counts
cross some threshold values; give priority to insns that free registers.
The insn that frees registers is most likely to be the insn with lowest
and SFmode regs required by already scheduled insns. When these counts
cross some threshold values; give priority to insns that free registers.
The insn that frees registers is most likely to be the insn with lowest
queue (Q) instead of the ready queue (R). To solve this, we skip cycles
upto a max of 8 cycles so that such insns may move from Q -> R.
queue (Q) instead of the ready queue (R). To solve this, we skip cycles
upto a max of 8 cycles so that such insns may move from Q -> R.
scheduler; it is called inside the sched_init function just after
find_insn_reg_weights function call. It is used to calculate the SImode
and SFmode weights of insns of basic blocks; much similar to what
scheduler; it is called inside the sched_init function just after
find_insn_reg_weights function call. It is used to calculate the SImode
and SFmode weights of insns of basic blocks; much similar to what
TARGET_SCHED_FINISH_GLOBAL: Corresponding cleanup hook.
TARGET_SCHED_DFA_NEW_CYCLE: Skip cycles if high register pressure is
TARGET_SCHED_FINISH_GLOBAL: Corresponding cleanup hook.
TARGET_SCHED_DFA_NEW_CYCLE: Skip cycles if high register pressure is
{
rtx tga_op1, tga_ret, tmp, tmp2;
{
rtx tga_op1, tga_ret, tmp, tmp2;
/* If the instruction in the delay slot is annulled (true), then
there is no delay slot where we can put it now. The only safe
place for it is after the label. final will do that by default. */
/* If the instruction in the delay slot is annulled (true), then
there is no delay slot where we can put it now. The only safe
place for it is after the label. final will do that by default. */
}
else
asm_fprintf (asm_out_file, "\tb%s\t%LLF%d\n", logic ? "f" : "t", label);
}
else
asm_fprintf (asm_out_file, "\tb%s\t%LLF%d\n", logic ? "f" : "t", label);
output_asm_insn ("bra\t%l0", &op0);
fprintf (asm_out_file, "\tnop\n");
(*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
output_asm_insn ("bra\t%l0", &op0);
fprintf (asm_out_file, "\tnop\n");
(*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
sprintf (buffer, "b%s%ss\t%%l0",
logic ? "t" : "f",
ASSEMBLER_DIALECT ? "/" : ".");
sprintf (buffer, "b%s%ss\t%%l0",
logic ? "t" : "f",
ASSEMBLER_DIALECT ? "/" : ".");
fputs ("\t.section .directive, \"SM\", @progbits, 1\n", asm_out_file);
fputs ("\t.asciz \"#<SYMEDIT>#\\n\"\n", asm_out_file);
#endif
fputs ("\t.section .directive, \"SM\", @progbits, 1\n", asm_out_file);
fputs ("\t.asciz \"#<SYMEDIT>#\\n\"\n", asm_out_file);
#endif
if (TARGET_ELF)
/* We need to show the text section with the proper
attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
if (TARGET_ELF)
/* We need to show the text section with the proper
attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
/* Likewise, but for shift amounts < 16, up to three highmost bits
might be clobbered. This is typically used when combined with some
kind of sign or zero extension. */
/* Likewise, but for shift amounts < 16, up to three highmost bits
might be clobbered. This is typically used when combined with some
kind of sign or zero extension. */
static const char ext_shift_insns[] =
{ 0,1,1,2,2,3,2,2,1,2,2,3,3,3,2,2,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
static const char ext_shift_insns[] =
{ 0,1,1,2,2,3,2,2,1,2,2,3,3,3,2,2,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
for (i = 0; i < max; i++)
gen_ashift (code, shift_amounts[value][i], operands[0]);
}
for (i = 0; i < max; i++)
gen_ashift (code, shift_amounts[value][i], operands[0]);
}
pass 1. Pass 2 if a definite blocking insn is needed.
-1 is used internally to avoid deep recursion.
If a blocking instruction is made or recognized, return it. */
pass 1. Pass 2 if a definite blocking insn is needed.
-1 is used internally to avoid deep recursion.
If a blocking instruction is made or recognized, return it. */
it would cause trouble if an interrupt occurred. */
unsigned try = 0x7fff, used;
int jump_left = flag_expensive_optimizations + 1;
it would cause trouble if an interrupt occurred. */
unsigned try = 0x7fff, used;
int jump_left = flag_expensive_optimizations + 1;
/* It is likely that the most recent eligible instruction is wanted for
the delay slot. Therefore, find out which registers it uses, and
try to avoid using them. */
/* It is likely that the most recent eligible instruction is wanted for
the delay slot. Therefore, find out which registers it uses, and
try to avoid using them. */
threading with a jump beyond the delay slot insn.
Don't check if we are called recursively; the jump has been or will be
checked in a different invocation then. */
threading with a jump beyond the delay slot insn.
Don't check if we are called recursively; the jump has been or will be
checked in a different invocation then. */
Hence, after delay slot scheduling, we'll have to expect
NOTE_INSN_BLOCK_END notes between the indirect_jump_scratch and
the jump. */
Hence, after delay slot scheduling, we'll have to expect
NOTE_INSN_BLOCK_END notes between the indirect_jump_scratch and
the jump. */
{
rtx next = next_real_insn (barrier_or_label), pat, prev;
int slot, credit, jump_to_next = 0;
{
rtx next = next_real_insn (barrier_or_label), pat, prev;
int slot, credit, jump_to_next = 0;
an alignment, against that of fetching unneeded insn in front of the
branch target when there is no alignment. */
an alignment, against that of fetching unneeded insn in front of the
branch target when there is no alignment. */
- /* There are two delay_slot cases to consider. One is the simple case
- where the preceding branch is to the insn beyond the barrier (simple
- delay slot filling), and the other is where the preceding branch has
- a delay slot that is a duplicate of the insn after the barrier
- (fill_eager_delay_slots) and the branch is to the insn after the insn
+ /* There are two delay_slot cases to consider. One is the simple case
+ where the preceding branch is to the insn beyond the barrier (simple
+ delay slot filling), and the other is where the preceding branch has
+ a delay slot that is a duplicate of the insn after the barrier
+ (fill_eager_delay_slots) and the branch is to the insn after the insn
if (GET_CODE (PATTERN (prev)) == SEQUENCE)
{
prev = XVECEXP (PATTERN (prev), 0, 1);
if (GET_CODE (PATTERN (prev)) == SEQUENCE)
{
prev = XVECEXP (PATTERN (prev), 0, 1);
/* There is no upper bound on redundant instructions
that might have been skipped, but we must not put an
alignment where none had been before. */
/* There is no upper bound on redundant instructions
that might have been skipped, but we must not put an
alignment where none had been before. */
&& (INSN_CODE (x) == CODE_FOR_block_branch_redirect
|| INSN_CODE (x) == CODE_FOR_indirect_jump_scratch
|| INSN_CODE (x) == CODE_FOR_stuff_delay_slot))))
&& (INSN_CODE (x) == CODE_FOR_block_branch_redirect
|| INSN_CODE (x) == CODE_FOR_indirect_jump_scratch
|| INSN_CODE (x) == CODE_FOR_stuff_delay_slot))))
rtx label = 0;
int dest_uid = get_dest_uid (olabel, max_uid);
struct far_branch *bp = uid_branch[dest_uid];
rtx label = 0;
int dest_uid = get_dest_uid (olabel, max_uid);
struct far_branch *bp = uid_branch[dest_uid];
/* redirect_jump needs a valid JUMP_LABEL, and it might delete
the label if the LABEL_NUSES count drops to zero. There is
always a jump_optimize pass that sets these values, but it
/* redirect_jump needs a valid JUMP_LABEL, and it might delete
the label if the LABEL_NUSES count drops to zero. There is
always a jump_optimize pass that sets these values, but it
gen_block_redirect (beyond,
INSN_ADDRESSES (INSN_UID (beyond)), 1);
}
gen_block_redirect (beyond,
INSN_ADDRESSES (INSN_UID (beyond)), 1);
}
x = gen_pop_e (gen_rtx_REG (SFmode, rn));
else
x = gen_pop (gen_rtx_REG (SImode, rn));
x = gen_pop_e (gen_rtx_REG (SFmode, rn));
else
x = gen_pop (gen_rtx_REG (SImode, rn));
/* Decide whether we should reserve space for callee-save target registers,
in case target register allocation wants to use them. REGS_SAVED is
the space, in bytes, that is already required for register saves.
/* Decide whether we should reserve space for callee-save target registers,
in case target register allocation wants to use them. REGS_SAVED is
the space, in bytes, that is already required for register saves.
use reverse order. Returns the last entry written to (not counting
the delimiter). OFFSET_BASE is a number to be added to all offset
entries. */
use reverse order. Returns the last entry written to (not counting
the delimiter). OFFSET_BASE is a number to be added to all offset
entries. */
that already happens to be at the function start into the prologue. */
if (target_flags != save_flags && ! current_function_interrupt)
emit_insn (gen_toggle_sz ());
that already happens to be at the function start into the prologue. */
if (target_flags != save_flags && ! current_function_interrupt)
emit_insn (gen_toggle_sz ());
insn = emit_move_insn (mem_rtx, reg_rtx);
RTX_FRAME_RELATED_P (insn) = 1;
insn = emit_move_insn (mem_rtx, reg_rtx);
RTX_FRAME_RELATED_P (insn) = 1;
if (j == FPSCR_REG && current_function_interrupt && TARGET_FMOVD
&& hard_regs_intersect_p (&live_regs_mask,
®_class_contents[DF_REGS]))
if (j == FPSCR_REG && current_function_interrupt && TARGET_FMOVD
&& hard_regs_intersect_p (&live_regs_mask,
®_class_contents[DF_REGS]))
entry = sh5_schedule_saves (&live_regs_mask, &schedule, 0);
offset = entry[1].offset;
for (; entry->mode != VOIDmode; entry--)
entry = sh5_schedule_saves (&live_regs_mask, &schedule, 0);
offset = entry[1].offset;
for (; entry->mode != VOIDmode; entry--)
if (! TARGET_SH2E && ! TARGET_SH4 && ! TARGET_SH5)
{
error ("__builtin_saveregs not supported by this subtarget");
if (! TARGET_SH2E && ! TARGET_SH4 && ! TARGET_SH5)
{
error ("__builtin_saveregs not supported by this subtarget");
GEN_INT (-2 * UNITS_PER_WORD)));
mem = gen_rtx_MEM (DFmode, fpregs);
set_mem_alias_set (mem, alias_set);
GEN_INT (-2 * UNITS_PER_WORD)));
mem = gen_rtx_MEM (DFmode, fpregs);
set_mem_alias_set (mem, alias_set);
/* Update the data in CUM to advance over an argument
of mode MODE and data type TYPE.
(TYPE is null for libcalls where that information may not be
/* Update the data in CUM to advance over an argument
of mode MODE and data type TYPE.
(TYPE is null for libcalls where that information may not be
/* Symbian support adds three new attributes:
dllexport - for exporting a function/variable that will live in a dll
dllimport - for importing a function/variable from a dll
/* Symbian support adds three new attributes:
dllexport - for exporting a function/variable that will live in a dll
dllimport - for importing a function/variable from a dll
Microsoft allows multiple declspecs in one __declspec, separating
them with spaces. We do NOT support this. Instead, use __declspec
multiple times. */
Microsoft allows multiple declspecs in one __declspec, separating
them with spaces. We do NOT support this. Instead, use __declspec
multiple times. */
int flag_mask
= (SH1_BIT | SH2_BIT | SH3_BIT | SH_E_BIT | HARD_SH4_BIT | FPU_SINGLE_BIT
| SH4_BIT | HITACHI_BIT | LITTLE_ENDIAN_BIT);
int flag_mask
= (SH1_BIT | SH2_BIT | SH3_BIT | SH_E_BIT | HARD_SH4_BIT | FPU_SINGLE_BIT
| SH4_BIT | HITACHI_BIT | LITTLE_ENDIAN_BIT);
/* -fpic and -fpie also usually make a PCH invalid. */
if (data[0] != flag_pic)
return _("created and used with different settings of -fpic");
/* -fpic and -fpie also usually make a PCH invalid. */
if (data[0] != flag_pic)
return _("created and used with different settings of -fpic");
MODE is the mode we are setting it to. */
void
fpscr_set_from_mem (int mode, HARD_REG_SET regs_live)
MODE is the mode we are setting it to. */
void
fpscr_set_from_mem (int mode, HARD_REG_SET regs_live)
/* Return true if it's possible to redirect BRANCH1 to the destination
of an unconditional jump BRANCH2. We only want to do this if the
resulting branch will have a short displacement. */
/* Return true if it's possible to redirect BRANCH1 to the destination
of an unconditional jump BRANCH2. We only want to do this if the
resulting branch will have a short displacement. */
sh_can_redirect_branch (rtx branch1, rtx branch2)
{
if (flag_expensive_optimizations && simplejump_p (branch2))
sh_can_redirect_branch (rtx branch1, rtx branch2)
{
if (flag_expensive_optimizations && simplejump_p (branch2))
-
- for (distance = 0, insn = NEXT_INSN (branch1);
- insn && distance < 256;
+
+ for (distance = 0, insn = NEXT_INSN (branch1);
+ insn && distance < 256;
- for (distance = 0, insn = NEXT_INSN (branch1);
- insn && distance < 256;
+ for (distance = 0, insn = NEXT_INSN (branch1);
+ insn && distance < 256;
minimizes instruction movement, thus minimizing sched's effect on
register pressure. */
return INSN_LUID (tmp) - INSN_LUID (tmp2);
minimizes instruction movement, thus minimizing sched's effect on
register pressure. */
return INSN_LUID (tmp) - INSN_LUID (tmp2);
sh_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
int sched_verbose ATTRIBUTE_UNUSED,
rtx *ready ATTRIBUTE_UNUSED,
sh_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
int sched_verbose ATTRIBUTE_UNUSED,
rtx *ready ATTRIBUTE_UNUSED,
/* Skip cycles without sorting the ready queue. This will move insn from
Q->R. If this is the last cycle we are skipping; allow sorting of ready
/* Skip cycles without sorting the ready queue. This will move insn from
Q->R. If this is the last cycle we are skipping; allow sorting of ready
+/* Implements target hook vector_mode_supported_p. */
+bool
+sh_vector_mode_supported_p (enum machine_mode mode)
+{
+ if (TARGET_FPU_ANY
+ && ((mode == V2SFmode)
+ || (mode == V4SFmode)
+ || (mode == V16SFmode)))
+ return true;
+
+ else if (TARGET_SHMEDIA
+ && ((mode == V8QImode)
+ || (mode == V2HImode)
+ || (mode == V4HImode)
+ || (mode == V2SImode)))
+ return true;
+
+ return false;
+}
+
/* Find the "this" pointer. We have such a wide range of ABIs for the
SH that it's best to do this completely machine independently.
/* Find the "this" pointer. We have such a wide range of ABIs for the
SH that it's best to do this completely machine independently.
comes first, in which case "this" comes second. */
INIT_CUMULATIVE_ARGS (cum, funtype, NULL_RTX, 0, 1);
#ifndef PCC_STATIC_STRUCT_RETURN
comes first, in which case "this" comes second. */
INIT_CUMULATIVE_ARGS (cum, funtype, NULL_RTX, 0, 1);
#ifndef PCC_STATIC_STRUCT_RETURN
structure_value_byref = 1;
#endif /* not PCC_STATIC_STRUCT_RETURN */
if (structure_value_byref && sh_struct_value_rtx (function, 0) == 0)
structure_value_byref = 1;
#endif /* not PCC_STATIC_STRUCT_RETURN */
if (structure_value_byref && sh_struct_value_rtx (function, 0) == 0)
tree ptype = build_pointer_type (TREE_TYPE (funtype));
FUNCTION_ARG_ADVANCE (cum, Pmode, ptype, 1);
tree ptype = build_pointer_type (TREE_TYPE (funtype));
FUNCTION_ARG_ADVANCE (cum, Pmode, ptype, 1);
if (GET_CODE (PATTERN (insn)) == SEQUENCE)
insn = XVECEXP (PATTERN (insn), 0, 0);
if (GET_CODE (PATTERN (insn)) != PARALLEL
if (GET_CODE (PATTERN (insn)) == SEQUENCE)
insn = XVECEXP (PATTERN (insn), 0, 0);
if (GET_CODE (PATTERN (insn)) != PARALLEL
/* This function returns a constant rtx that represents pi / 2**15 in
DFmode. it's used to scale DFmode angles, in radians, to a
fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
/* This function returns a constant rtx that represents pi / 2**15 in
DFmode. it's used to scale DFmode angles, in radians, to a
fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
/* This function returns a constant rtx that represents 2**15 / pi in
SFmode. it's used to scale a fixed-point signed 16.16-bit fraction
of a full circle back to a SFmode value, i.e., 0x10000 maps to
/* This function returns a constant rtx that represents 2**15 / pi in
SFmode. it's used to scale a fixed-point signed 16.16-bit fraction
of a full circle back to a SFmode value, i.e., 0x10000 maps to