#define REG_ODD \
( (1 << (int) QImode) | (1 << (int) HImode) | (1 << (int) SImode) \
| (1 << (int) QFmode) | (1 << (int) HFmode) | (1 << (int) SFmode) \
- | (1 << (int) CQImode) | (1 << (int) CHImode))
+ | (1 << (int) CQImode) | (1 << (int) CHImode)| (1<< (int)DFmode) | (1<<(int)DImode))
#define REG_EVEN \
- (REG_ODD | (1 << (int) DImode) | (1 << (int) DFmode) \
- | (1 << (int) CSImode) | (1 << (int) SCmode))
+ (REG_ODD | (1 << (int) CSImode) | (1 << (int) SCmode))
#define SI_ONLY (1<<(int)SImode)
+
int hard_regno_mode_ok[] =
{
REG_EVEN, REG_ODD, REG_EVEN, REG_ODD,
'^' increment the local label number
'!' dump the constant table
'#' output a nop if there is nothing to put in the delay slot
+ '@' print rte or rts depending upon pragma interruptness
'R' print the next register or memory location along, ie the lsw in
a double word value
'O' print a constant without the #
case '^':
lf++;
break;
+ case '@':
+ if (pragma_interrupt)
+ fprintf (stream,"rte");
+ else
+ fprintf (stream,"rts");
+ break;
case '#':
/* Output a nop if there's nothing in the delay slot */
if (dbr_sequence_length () == 0)
{
- fprintf (stream, "\n\tor r0,r0\t!wasted slot");
+ fprintf (stream, "\n\tnop");
}
break;
case 'O':
rtx dst;
int i = INTVAL (operands[1]) & 0xffffffff;
- if (CONST_OK_FOR_I (i))
+ if (CONST_OK_FOR_I (i))
return 0;
- dst = mode == SImode ? operands[0] : gen_reg_rtx (SImode);
+ if (TARGET_CLEN0 && mode != QImode)
+ return 0;
+
+ if (mode != SImode)
+ {
+ if (reload_in_progress)
+ return 0;
+ dst = gen_reg_rtx (SImode);
+ }
+ else
+ {
+ dst = operands[0];
+ }
/* 00000000 00000000 11111111 1NNNNNNNN load and zero extend word */
if ((i & 0xffffff80) == 0x0000ff80)
int constp = (GET_CODE (operands[2]) == CONST_INT);
int bytes = (constp ? INTVAL (operands[2]) : 0);
enum machine_mode mode;
+
/* IF odd then fail */
if (!constp || bytes <= 0)
return 0;
+ /* Don't expand if we'd make the code bigger and we don't want big code */
+
+ if (bytes > 8 && TARGET_SMALLCODE)
+ return 0;
+
switch (align)
{
case 1:
mode = SImode;
align = 4;
}
+
if (mode == SImode && constp && bytes < 64 && (bytes % 4 == 0))
{
char entry[30];
return 1;
}
}
- return 0;
+ return 0;
}
/* Prepare operands for a move define_expand; specifically, one of the
REGNO (dst) >= FIRST_PSEUDO_REGISTER)
return 0;
-
if (push_operand (dst, mode))
return 0;
return 0;
}
-/* Work out the subword parts to split up a double move
- into two SI moves - take care to do it in the right order
- */
-
-int
-prepare_split_double_ops (operands, mode)
- rtx operands[];
- enum machine_mode mode;
-{
- if (GET_CODE (operands[1]) == REG
- && REGNO (operands[1]) > FIRST_PSEUDO_REGISTER)
- return 0;
-
- if (GET_CODE (operands[0]) == REG
- && REGNO (operands[0]) > FIRST_PSEUDO_REGISTER)
- return 0;
-
- /* If we split move insns from memory, it confuses scheduling
- later on. */
- if (GET_CODE (operands[1]) == MEM)
- return 0;
- if (GET_CODE (operands[0]) == MEM)
- return 0;
-
- if (GET_CODE (operands[0]) != REG
- || !refers_to_regno_p (REGNO (operands[0]),
- REGNO (operands[0]) + 1, operands[1], 0))
- {
- operands[2] = operand_subword (operands[0], 0, 0, mode);
- operands[3] = operand_subword (operands[1], 0, 0, mode);
- operands[4] = operand_subword (operands[0], 1, 0, mode);
- operands[5] = operand_subword (operands[1], 1, 0, mode);
- }
- else
- {
- operands[2] = operand_subword (operands[0], 1, 0, mode);
- operands[3] = operand_subword (operands[1], 1, 0, mode);
- operands[4] = operand_subword (operands[0], 0, 0, mode);
- operands[5] = operand_subword (operands[1], 0, 0, mode);
- }
-
- if (operands[2] == 0 || operands[3] == 0
- || operands[4] == 0 || operands[5] == 0)
- return 0;
-
- emit_move_insn (operands[2], operands[3]);
- emit_move_insn (operands[4], operands[5]);
- return 1;
-}
-
/* Prepare the operands for an scc instruction; make sure that the
compare has been done. */
rtx
rtx dst = operands[0];
rtx src = operands[1];
- fprintf (asm_out_file, "! move double \n");
- fprintf (asm_out_file, "! pc %04x\n", insn_addresses[INSN_UID (insn)]);
+/* fprintf (asm_out_file, "! move double \n");
+ fprintf (asm_out_file, "! pc %04x\n", insn_addresses[INSN_UID (insn)]);*/
if (GET_CODE (dst) == MEM
&& GET_CODE (XEXP (dst, 0)) == POST_INC)
{
FILE *stream;
int size;
{
- fprintf (stream, "\trts\n");
- fprintf (stream, "\tor r0,r0\n");
+ pragma_interrupt = pragma_trapa = 0;
}
output_asm_insn ("mov.l @r15+,r13", 0);
}
- ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (thislab));
output_asm_insn (".align 2", 0);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (thislab));
output_asm_insn (".long %O0", &op);
return "";
}
int label = lf++;
int rn = -1;
int need_save;
- fprintf (asm_out_file, "! pc %04x\n", insn_addresses[INSN_UID (insn)]);
+/* fprintf (asm_out_file, "! pc %04x\n", insn_addresses[INSN_UID (insn)]);*/
switch (get_attr_length (insn))
{
data_section ();
- pos = fprintf (file, "\n! Hitachi SH cc1 (%s) arguments:", version_string);
+ pos = fprintf (file, "\n! Hitachi SH cc1 (%s) (release D-1) arguments:", version_string);
output_options (file, f_options, f_len, W_options, W_len,
pos, 75, " ", "\n! ", "\n\n");
}
/* otherwise it will be several insns, but we pretend that it will be more than
just the components, so that combine doesn't glue together a load of shifts into
one shift which has to be emitted as a bunch anyway - breaking scheduling */
- return 100;
+ return 1;
}
int
return 3;
return 5;
}
+
+int howshift (i)
+int i;
+{
+ int total = 0;
+ while (i > 0)
+ {
+ if (i >= 16) {
+ total++;
+ i -= 16;
+ }
+ else if (i >= 8) {
+ total++;
+ i -= 8;
+ }
+ else if (i >= 2) {
+ total++;
+ i -= 2;
+ }
+ else if (i>=1) {
+ total++;
+ i--;
+ }
+ }
+ return total;
+}
+
/* Return the cost of a multiply */
int
multcosts (RTX)
rtx RTX;
{
+ /* If mult by a power of 2 then work out how we'd shift to make it */
+ int insn_cost;
+
+ if (GET_CODE (XEXP (RTX, 1)) == CONST_INT)
+ {
+ int i = exact_log2 (INTVAL (XEXP (RTX, 1)));
+ if (i >= 0)
+ insn_cost = howshift (i);
+ else
+ insn_cost = 100000;
+ }
if (TARGET_SH2)
- return 2;
+ {
+ /* We have a mul insn, so we can never take more than the mul and the
+ read of the mac reg, but count more because of the latency and extra reg
+ usage */
+ if (TARGET_SMALLCODE)
+ return 2;
+ if (insn_cost > 5)
+ return 5;
+ return insn_cost;
+ }
+
/* If we we're aiming at small code, then just count the number of
- insns in a multiply call sequence, otherwise, count all the insnsn
- inside the call. */
- if (TARGET_SMALLCODE)
- return 3;
- return 30;
+ insns in a multiply call sequence */
+
+ if (TARGET_SMALLCODE)
+ {
+ if (insn_cost > 6)
+ return 6;
+ return insn_cost;
+ }
+
+ /* Otherwise count all the insns in the routine we'd be calling too */
+ return 20;
}
/* Code to expand a shift */
}
/* Expand a short sequence inline, longer call a magic routine */
- if (value < 4)
+ if (value <= 5)
{
emit_move_insn (wrk, operands[1]);
while (value--)
}
need_align = 1;
-
for (i = 0; i < pool_size; i++)
{
pool_node *p = pool_vector + i;
if (need_align)
{
need_align = 0;
+ scan = emit_label_after (gen_label_rtx (), scan);
scan = emit_insn_after (gen_align_4 (), scan);
}
scan = emit_label_after (p->label, scan);
if (need_align)
{
need_align = 0;
+ scan = emit_label_after (gen_label_rtx (), scan);
scan = emit_insn_after (gen_align_4 (), scan);
}
scan = emit_label_after (p->label, scan);
hi_const (src)
rtx src;
{
+ if (GET_CODE (src) == CONST
+ && GET_CODE (XEXP (src, 0)) == SIGN_EXTEND
+ && GET_CODE (XEXP (XEXP (src, 0), 0)) == SYMBOL_REF)
+ return 1;
+
+ if (TARGET_SHORTADDR
+ && GET_CODE (src) == SYMBOL_REF)
+ return 1;
+
return (GET_CODE (src) == CONST_INT
&& INTVAL (src) >= -32768
&& INTVAL (src) <= 32767);
{
/* This is an HI source, clobber the dest to get the mode right too */
mode = HImode;
+ while (GET_CODE (dst) == SUBREG)
+ dst = SUBREG_REG (dst);
dst = gen_rtx (REG, HImode, REGNO (dst));
}
lab = add_constant (src, mode);
rtx *operands;
int code;
{
+ if (code != EQ && code != NE)
+ {
+ /* Force args into regs, since we can't use constants here */
+ sh_compare_op0 = force_reg (SImode, sh_compare_op0);
+ if (sh_compare_op1 != const0_rtx)
+ sh_compare_op1 = force_reg (SImode, sh_compare_op1);
+ }
operands[1] = sh_compare_op0;
- operands[2] = force_reg (SImode, sh_compare_op1);
- operands[1] = force_reg (SImode, operands[1]);
+ operands[2] = sh_compare_op1;
}
/* Non-zero if x is EQ or NE */
current_function_anonymous_args = 0;
for (i = 0; i < 32; i++)
shiftsyms[i] = 0;
+
}
/* Define the offset between two registers, one to be eliminated, and
\f
/* insn expand helpers */
-/* Emit insns to perform a call. If TARGET_SMALLCALL, then load the
+/* Emit insns to perform a call.
+ If TARGET_SHORTADDR then use a bsr. If TARGET_SMALLCALL, then load the
target address into r1 and call __saveargs, otherwise
perform the standard call sequence */
rtx call_target = operands[isa_retval + 0];
rtx numargs = operands[isa_retval + 1];
- if (GET_CODE (call_target) == MEM)
- {
- call_target = force_reg (Pmode,
- XEXP (call_target, 0));
- }
- if (TARGET_SMALLCALL)
+ if (TARGET_BSR)
{
- rtx tmp = gen_reg_rtx (SImode);
- rtx r1 = gen_rtx (REG, SImode, 1);
- emit_move_insn (tmp, gen_rtx (SYMBOL_REF, SImode, "__saveargs"));
- emit_move_insn (r1, call_target);
- emit_insn (gen_rtx (USE, VOIDmode, r1));
- call_target = tmp;
+ call = gen_rtx (CALL, VOIDmode, call_target, numargs);
}
+ else {
- call = gen_rtx (CALL, VOIDmode, gen_rtx (MEM, SImode, call_target), numargs);
+ if (GET_CODE (call_target) == MEM)
+ {
+ call_target = force_reg (Pmode,
+ XEXP (call_target, 0));
+ }
+ if (TARGET_SMALLCALL)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+ rtx r1 = gen_rtx (REG, SImode, 1);
+ emit_move_insn (tmp, gen_rtx (SYMBOL_REF, SImode, "__saveargs"));
+ emit_move_insn (r1, call_target);
+ emit_insn (gen_rtx (USE, VOIDmode, r1));
+ call_target = tmp;
+ }
+ call = gen_rtx (CALL, VOIDmode, gen_rtx (MEM, SImode, call_target), numargs);
+ }
if (isa_retval)
{
call = gen_rtx (SET, VOIDmode, ret, call);
emit_call_insn (gen_rtx (PARALLEL, VOIDmode,
gen_rtvec (2,
call,
- gen_rtx (CLOBBER, VOIDmode, gen_rtx (REG, SImode, 17)))));
+ gen_rtx (CLOBBER, VOIDmode, gen_rtx (REG, SImode, 17)))));
}
\f
enum machine_mode mode;
{
if (GET_CODE (op) == MEM
- && GET_CODE (XEXP (op, 0)) == PRE_INC)
+ && (GET_CODE (XEXP (op, 0)) == PRE_INC
+ || GET_CODE (XEXP (op, 0)) == POST_INC
+ || GET_CODE (XEXP (op, 0)) == POST_DEC))
return 0;
+
return general_operand (op, mode);
}
+
+/* Returns 1 if OP is valid destination for a bsr. */
+
+int
+bsr_operand (op, mode)
+rtx op;
+enum machine_mode mode;
+{
+ if (GET_CODE (op) == SYMBOL_REF)
+ return 1;
+ return 0;
+}
+
/* Returns 1 if OP is an immediate ok for a byte index. */
int
return 0;
}
+/* Returns 1 if OP is a valid operand for a MAC instruction,
+ either a register or indirect memory. For now we don't
+ try and recognise a mac insn */
+
+int
+mac_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (arith_reg_operand (op, mode))
+ return 1;
+#if 0
+ Turned off till mac is understood
+ if (GET_CODE (op) == MEM)
+ return 1;
+#endif
+ return 0;
+}
+
+/* Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis). */
+
+rtx
+sh_function_arg (cum, mode, type, named)
+CUMULATIVE_ARGS cum;
+enum machine_mode mode;
+tree type;
+int named;
+{
+ if (named)
+ {
+ int rr = (ROUND_REG ((cum), (mode)));
+
+ if (rr < NPARM_REGS)
+ {
+ return ((((mode) != BLKmode
+ && ((type)==0 || ! TREE_ADDRESSABLE ((tree)(type)))
+ && ((type)==0 || (mode) != BLKmode
+ || (TYPE_ALIGN ((type)) % PARM_BOUNDARY == 0))
+ ? gen_rtx (REG, (mode),
+ (FIRST_PARM_REG + rr)): 0)));
+
+ }
+ }
+ return 0;
+}
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero.
+ Any arg that starts in the first 4 regs but won't entirely fit in them
+ needs partial registers on the SH. */
+
+int
+sh_function_arg_partial_nregs (CUM, MODE, TYPE, NAMED)
+ CUMULATIVE_ARGS CUM;
+ enum machine_mode MODE;
+ tree TYPE;
+ int NAMED;
+{
+ if ((CUM) < NPARM_REGS)
+ {
+ if (((TYPE)==0 || ! TREE_ADDRESSABLE ((tree)(TYPE)))
+ && ((TYPE)==0 || (MODE) != BLKmode
+ || (TYPE_ALIGN ((TYPE)) % PARM_BOUNDARY == 0))
+ && ((CUM) + ((MODE) == BLKmode
+ ? ROUND_ADVANCE (int_size_in_bytes (TYPE))
+ : ROUND_ADVANCE (GET_MODE_SIZE (MODE))) - NPARM_REGS > 0))
+ {
+ return NPARM_REGS - CUM;
+ }
+ }
+ return 0;
+}
+
#define CONSTLEN_2_BIT (1<<20)
#define CONSTLEN_3_BIT (1<<21)
#define HITACHI_BIT (1<<22)
+#define PARANOID_BIT (1<<23)
+#define RETR2_BIT (1<<24)
+#define CONSTLEN_0_BIT (1<<25)
+#define BSR_BIT (1<<26)
+#define SHORTADDR_BIT (1<<27)
/* Nonzero if we should generate code using type 0 insns */
#define TARGET_SH0 (target_flags & SH0_BIT)
/* Select max size of computed constant code sequences to be 3 insns */
#define TARGET_CLEN3 (target_flags & CONSTLEN_3_BIT)
+/* Select max size of computed constant code sequences to be 0 insns - ie don't do it */
+#define TARGET_CLEN0 (target_flags & CONSTLEN_0_BIT)
+
/* Nonzero if using Hitachi's calling convention */
-#define TARGET_HITACHI (target_flags & HITACHI_BIT)
+#define TARGET_HITACHI (target_flags & HITACHI_BIT)
+#define TARGET_PARANOID (target_flags & PARANOID_BIT)
+#define TARGET_RETR2 (target_flags & RETR2_BIT)
+#define TARGET_SHORTADDR (target_flags & SHORTADDR_BIT)
+#define TARGET_BSR (target_flags & BSR_BIT)
+
#define TARGET_SWITCHES \
{ {"isize", ( ISIZE_BIT) }, \
{"R", ( R_BIT) }, \
{"nosave", ( NOSAVE_BIT) }, \
{"clen3", ( CONSTLEN_3_BIT) }, \
+ {"clen0", ( CONSTLEN_0_BIT) }, \
{"smallcall", ( SMALLCALL_BIT) }, \
{"hitachi", ( HITACHI_BIT) }, \
+ {"paranoid", ( PARANOID_BIT) }, \
+ {"r2", ( RETR2_BIT) }, \
+ {"shortaddr", ( SHORTADDR_BIT) }, \
+ {"bsr", ( BSR_BIT) }, \
{"", TARGET_DEFAULT} \
}
\
optimize = 1; \
flag_delayed_branch = 1; \
- \
+ /* But never run scheduling before reload, since than can \
+ break global alloc, and generates slower code anyway due \
+ to the pressure on R0. */ \
+ flag_schedule_insns = 0; \
if (max_si) \
max_count_si = atoi (max_si); \
else \
max_count_hi = atoi (max_hi); \
else \
max_count_hi = 505; \
+ if (TARGET_BSR) \
+ flag_no_function_cse = 1; \
} while (0)
\f
#define FIRST_PSEUDO_REGISTER 22
/* 1 for registers that have pervasive standard uses
- and are not available for the register allocator. */
+ and are not available for the register allocator.
+
+ mach register is fixed 'cause it's only 10 bits wide */
+
/* r0 r1 r2 r3
r4 r5 r6 r7
r8 r9 r10 r11
1, 1, 1, 1, \
1, 1}
+
/* 1 for registers not available across function calls.
These must include the FIXED_REGISTERS and also any
registers that can be used without being saved.
These two macros are used only in other macro definitions below. */
#define NPARM_REGS 4
#define FIRST_PARM_REG 4
-#define FIRST_RET_REG 0
+#define FIRST_RET_REG (TARGET_RETR2 ? 2 : 0)
/* Define this if pushing a word on the stack
makes the stack pointer a smaller address. */
/* Round a register number up to a proper boundary for an arg of mode
MODE.
- We round to an even reg for things larger than a word */
+ The SH doesn't care about double alignment, so we only
+ round doubles to even regs when asked to explicitly. */
#define ROUND_REG(X, MODE) \
((TARGET_ALIGN_DOUBLE \
NPARM_REGS words is at least partially passed in a register unless
its data type forbids. */
-#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
- (NAMED && ROUND_REG ((CUM), (MODE)) < NPARM_REGS \
- && (MODE) != BLKmode \
- && ((TYPE)==0 || ! TREE_ADDRESSABLE ((tree)(TYPE))) \
- && ((TYPE)==0 || (MODE) != BLKmode \
- || (TYPE_ALIGN ((TYPE)) % PARM_BOUNDARY == 0)) \
- ? gen_rtx (REG, (MODE), \
- (FIRST_PARM_REG + ROUND_REG ((CUM), (MODE)))) \
- : 0)
+
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ sh_function_arg (CUM, MODE, TYPE, NAMED)
+
+extern struct rtx_def *sh_function_arg();
/* For an arg passed partly in registers and partly in memory,
this is the number of registers used.
For args passed entirely in registers or entirely in memory, zero.
- We never split args */
+ We sometimes split args */
-#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) 0
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+ sh_function_arg_partial_nregs (CUM, MODE, TYPE, NAMED)
extern int current_function_anonymous_args;
\f
/* Addressing modes, and classification of registers for them. */
-/*#define HAVE_POST_INCREMENT 1*/
+#define HAVE_POST_INCREMENT 1
/*#define HAVE_PRE_INCREMENT 1*/
/*#define HAVE_POST_DECREMENT 1*/
-/*#define HAVE_PRE_DECREMENT 1*/
+#define HAVE_PRE_DECREMENT 1
/* Macros to check register numbers against specific register classes. */
The symbol REG_OK_STRICT causes the latter definition to be used. */
#define MODE_DISP_OK_4(X,MODE) ((GET_MODE_SIZE(MODE)==4) && ((unsigned)INTVAL(X)<64))
+#define MODE_DISP_OK_8(X,MODE) ((GET_MODE_SIZE(MODE)==8) && ((unsigned)INTVAL(X)<60))
#define MODE_DISP_OK_2(X,MODE) ((GET_MODE_SIZE(MODE)==2) && ((unsigned)INTVAL(X)<32) && TARGET_TRYR0)
#define MODE_DISP_OK_1(X,MODE) ((GET_MODE_SIZE(MODE)==1) && ((unsigned)INTVAL(X)<16) && TARGET_TRYR0)
(REGNO (X) == 0 || REGNO(X) >= FIRST_PSEUDO_REGISTER)
#define REG_OK_FOR_PRE_POST_P(X) \
- (REGNO (X) <= 16)
+ (REG_OK_FOR_INDEX_P (X))
#else
/* Nonzero if X is a hard reg that can be used as a base reg. */
REGNO_OK_FOR_INDEX_P (REGNO (X))
#define REG_OK_FOR_PRE_POST_P(X) \
- (REGNO (X) <= 16)
+ (REGNO_OK_FOR_INDEX_P (REGNO (X)))
#endif
/* The Q is a pc relative load operand */
if (GET_CODE (OP) == CONST_INT) \
{ \
if (MODE_DISP_OK_4 (OP, MODE)) goto LABEL; \
+ if (MODE_DISP_OK_8 (OP, MODE)) goto LABEL; \
if (MODE_DISP_OK_2 (OP, MODE)) goto LABEL; \
if (MODE_DISP_OK_1 (OP, MODE)) goto LABEL; \
} \
{ \
rtx xop0 = XEXP(X,0); \
rtx xop1 = XEXP(X,1); \
- if (GET_MODE_SIZE(MODE) <= 4 && BASE_REGISTER_RTX_P (xop0)) \
+ if (GET_MODE_SIZE(MODE) <= 8 && BASE_REGISTER_RTX_P (xop0)) \
GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop0), xop1, LABEL); \
- if (GET_MODE_SIZE(MODE) <= 4 && BASE_REGISTER_RTX_P (xop1)) \
+ if (GET_MODE_SIZE(MODE) <= 8 && BASE_REGISTER_RTX_P (xop1)) \
GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop1), xop0, LABEL); \
- if (GET_MODE_SIZE(MODE)<=4) { \
+ if (GET_MODE_SIZE(MODE)<= 4) { \
if(BASE_REGISTER_RTX_P(xop1) && \
INDEX_REGISTER_RTX_P(xop0)) goto LABEL; \
if(INDEX_REGISTER_RTX_P(xop1) && \
It is always safe for this macro to do nothing. It exists to recognize
opportunities to optimize the output.
- On the SH we don't try anything */
+ */
-#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) ;
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) ;
/* Go to LABEL if ADDR (a legitimate address expression)
has an effect that depends on the machine mode it is used for. */
return COSTS_N_INSNS (multcosts (X)); \
case ASHIFT: \
case ASHIFTRT: \
- case LSHIFTRT: \
return COSTS_N_INSNS (shiftcosts (X)) ; \
case DIV: \
case UDIV: \
#define CTORS_SECTION_ASM_OP "\t.section\t.ctors\n"
#define DTORS_SECTION_ASM_OP "\t.section\t.dtors\n"
#define INIT_SECTION_ASM_OP "\t.section\t.init\n"
-
-/* Assemble generic sections.
- This is currently only used to support section attributes. */
-
-#define ASM_OUTPUT_SECTION_NAME(FILE, NAME) \
- fprintf (FILE, ".section\t%s\n", NAME)
-
#define EXTRA_SECTIONS in_ctors, in_dtors
#define EXTRA_SECTION_FUNCTIONS \
} \
}
+/* Assemble generic sections.
+ This is currently only used to support section attributes. */
+
+#define ASM_OUTPUT_SECTION_NAME(FILE, NAME) \
+ do { fprintf (FILE, ".section\t%s\n", NAME); } while (0)
+
#define ASM_OUTPUT_CONSTRUCTOR(FILE,NAME) \
do { ctors_section(); fprintf(FILE,"\t.long\t_%s\n", NAME); } while (0)
#define PRINT_OPERAND_ADDRESS(STREAM,X) print_operand_address (STREAM, X)
#define PRINT_OPERAND_PUNCT_VALID_P(CHAR) \
- ((CHAR)=='.' || (CHAR) == '#' || (CHAR) == '*' || (CHAR) == '^' || (CHAR)=='!')
+ ((CHAR)=='.' || (CHAR) == '#' || (CHAR) == '*' || (CHAR) == '^' || (CHAR)=='!' || (CHAR)=='@')
\f
extern struct rtx_def *sh_compare_op0;
extern struct rtx_def *prepare_scc_operands();
extern struct rtx_def *table_lab;
+
extern enum attr_cpu sh_cpu; /* target cpu */
/* Declare functions defined in sh.c and used in templates. */
/* Set when processing a function with pragma interrupt turned on. */
extern int pragma_interrupt;
-#define MOVE_RATIO 16
+#define MOVE_RATIO (TARGET_SMALLCODE ? 4 : 16)
char *max_si;
char *max_hi;