/* Target Code for R8C/M16C/M32C
- Copyright (C) 2005
+ Copyright (C) 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
Contributed by Red Hat.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 2, or (at your
+ by the Free Software Foundation; either version 3, or (at your
option) any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
License for more details.
You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING. If not, write to the Free
- Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "target-def.h"
#include "tm_p.h"
#include "langhooks.h"
-#include "tree-gimple.h"
+#include "gimple.h"
+#include "df.h"
/* Prototypes */
} Push_Pop_Type;
static tree interrupt_handler (tree *, tree, tree, int, bool *);
+static tree function_vector_handler (tree *, tree, tree, int, bool *);
static int interrupt_p (tree node);
static bool m32c_asm_integer (rtx, unsigned int, int);
-static int m32c_comp_type_attributes (tree, tree);
+static int m32c_comp_type_attributes (const_tree, const_tree);
static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
static struct machine_function *m32c_init_machine_status (void);
static void m32c_insert_attributes (tree, tree *);
static bool m32c_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
- tree, bool);
-static bool m32c_promote_prototypes (tree);
+ const_tree, bool);
+static bool m32c_promote_prototypes (const_tree);
static int m32c_pushm_popm (Push_Pop_Type);
static bool m32c_strict_argument_naming (CUMULATIVE_ARGS *);
static rtx m32c_struct_value_rtx (tree, int);
static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
static int need_to_save (int);
+int current_function_special_page_vector (rtx);
+
+#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
#define streq(a,b) (strcmp ((a), (b)) == 0)
/* Used by m32c_register_move_cost to determine if a move is
impossibly expensive. */
static int
-class_can_hold_mode (int class, enum machine_mode mode)
+class_can_hold_mode (int rclass, enum machine_mode mode)
{
/* Cache the results: 0=untested 1=no 2=yes */
static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
- if (results[class][mode] == 0)
+ if (results[rclass][mode] == 0)
{
int r, n, i;
- results[class][mode] = 1;
+ results[rclass][mode] = 1;
for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
- if (class_contents[class][0] & (1 << r)
+ if (class_contents[rclass][0] & (1 << r)
&& HARD_REGNO_MODE_OK (r, mode))
{
int ok = 1;
n = HARD_REGNO_NREGS (r, mode);
for (i = 1; i < n; i++)
- if (!(class_contents[class][0] & (1 << (r + i))))
+ if (!(class_contents[rclass][0] & (1 << (r + i))))
ok = 0;
if (ok)
{
- results[class][mode] = 2;
+ results[rclass][mode] = 2;
break;
}
}
}
#if DEBUG0
fprintf (stderr, "class %s can hold %s? %s\n",
- class_names[class], mode_name[mode],
- (results[class][mode] == 2) ? "yes" : "no");
+ class_names[rclass], mode_name[mode],
+ (results[rclass][mode] == 2) ? "yes" : "no");
#endif
- return results[class][mode] == 2;
+ return results[rclass][mode] == 2;
}
/* Run-time Target Specification. */
#undef TARGET_PROMOTE_FUNCTION_RETURN
#define TARGET_PROMOTE_FUNCTION_RETURN m32c_promote_function_return
bool
-m32c_promote_function_return (tree fntype ATTRIBUTE_UNUSED)
+m32c_promote_function_return (const_tree fntype ATTRIBUTE_UNUSED)
{
return false;
}
/* Implements HARD_REGNO_NREGS. This is complicated by the fact that
different registers are different sizes from each other, *and* may
be different sizes in different chip families. */
-int
-m32c_hard_regno_nregs (int regno, enum machine_mode mode)
+static int
+m32c_hard_regno_nregs_1 (int regno, enum machine_mode mode)
{
if (regno == FLG_REGNO && mode == CCmode)
return 1;
return 0;
}
+int
+m32c_hard_regno_nregs (int regno, enum machine_mode mode)
+{
+ int rv = m32c_hard_regno_nregs_1 (regno, mode);
+ return rv ? rv : 1;
+}
+
/* Implements HARD_REGNO_MODE_OK. The above function does the work
already; just test its return value. */
int
m32c_hard_regno_ok (int regno, enum machine_mode mode)
{
- return m32c_hard_regno_nregs (regno, mode) != 0;
+ return m32c_hard_regno_nregs_1 (regno, mode) != 0;
}
/* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
m32c_cannot_change_mode_class (enum machine_mode from,
enum machine_mode to, int rclass)
{
+ int rn;
#if DEBUG0
fprintf (stderr, "cannot change from %s to %s in %s\n",
mode_name[from], mode_name[to], class_names[rclass]);
#endif
+ /* If the larger mode isn't allowed in any of these registers, we
+ can't allow the change. */
+ for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
+ if (class_contents[rclass][0] & (1 << rn))
+ if (! m32c_hard_regno_ok (rn, to))
+ return 1;
+
if (to == QImode)
return (class_contents[rclass][0] & 0x1ffa);
if (memcmp (str, "Ilb", 3) == 0)
{
int b = exact_log2 (value);
- return (b >= 1 && b <= 8);
+ return (b >= 0 && b <= 7);
}
if (memcmp (str, "Imb", 3) == 0)
{
int b = exact_log2 ((value ^ 0xff) & 0xff);
- return (b >= 1 && b <= 8);
+ return (b >= 0 && b <= 7);
+ }
+ if (memcmp (str, "ImB", 3) == 0)
+ {
+ int b = exact_log2 ((value ^ 0xffff) & 0xffff);
+ return (b >= 0 && b <= 7);
}
if (memcmp (str, "Ilw", 3) == 0)
{
int b = exact_log2 (value);
- return (b >= 1 && b <= 16);
+ return (b >= 0 && b <= 15);
}
if (memcmp (str, "Imw", 3) == 0)
{
int b = exact_log2 ((value ^ 0xffff) & 0xffff);
- return (b >= 1 && b <= 16);
+ return (b >= 0 && b <= 15);
}
if (memcmp (str, "I00", 3) == 0)
{
if (TARGET_A24)
{
- mode = SImode;
+ /* It's four bytes */
+ mode = PSImode;
offset = 4;
}
else
case 0:
return A0_REGNO;
case 1:
- return A1_REGNO;
+ if (TARGET_A16)
+ return R3_REGNO;
+ else
+ return R1_REGNO;
default:
return INVALID_REGNUM;
}
{
rtx sa;
- sa = gen_reg_rtx (Pmode);
+ sa = gen_rtx_REG (Pmode, R0_REGNO);
cfun->machine->eh_stack_adjust = sa;
}
return cfun->machine->eh_stack_adjust;
int a24_bytes;
} pushm_info[] =
{
- /* These are in push order. */
- { FB_REGNO, 0x01, 2, 4 },
- { SB_REGNO, 0x02, 2, 4 },
- { A1_REGNO, 0x04, 2, 4 },
- { A0_REGNO, 0x08, 2, 4 },
- { R3_REGNO, 0x10, 2, 2 },
- { R2_REGNO, 0x20, 2, 2 },
+ /* These are in reverse push (nearest-to-sp) order. */
+ { R0_REGNO, 0x80, 2, 2 },
{ R1_REGNO, 0x40, 2, 2 },
- { R0_REGNO, 0x80, 2, 2 }
+ { R2_REGNO, 0x20, 2, 2 },
+ { R3_REGNO, 0x10, 2, 2 },
+ { A0_REGNO, 0x08, 2, 4 },
+ { A1_REGNO, 0x04, 2, 4 },
+ { SB_REGNO, 0x02, 2, 4 },
+ { FB_REGNO, 0x01, 2, 4 }
};
#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
calls something else (because we don't know what *that* function
might do), but try to be a bit smarter if the handler is a leaf
function. We always save $a0, though, because we use that in the
- epilog to copy $fb to $sp. */
+ epilogue to copy $fb to $sp. */
static int
need_to_save (int regno)
{
if (fixed_regs[regno])
return 0;
- if (cfun->calls_eh_return)
+ if (crtl->calls_eh_return)
return 1;
if (regno == FP_REGNO)
return 0;
if (cfun->machine->is_interrupt
&& (!cfun->machine->is_leaf || regno == A0_REGNO))
return 1;
- if (regs_ever_live[regno]
+ if (df_regs_ever_live_p (regno)
&& (!call_used_regs[regno] || cfun->machine->is_interrupt))
return 1;
return 0;
int n_dwarfs = 0;
int nosave_mask = 0;
- if (cfun->return_rtx
- && GET_CODE (cfun->return_rtx) == PARALLEL
- && !(cfun->calls_eh_return || cfun->machine->is_interrupt))
+ if (crtl->return_rtx
+ && GET_CODE (crtl->return_rtx) == PARALLEL
+ && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
{
- rtx exp = XVECEXP (cfun->return_rtx, 0, 0);
+ rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
rtx rv = XEXP (exp, 0);
int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
#undef TARGET_PROMOTE_PROTOTYPES
#define TARGET_PROMOTE_PROTOTYPES m32c_promote_prototypes
static bool
-m32c_promote_prototypes (tree fntype ATTRIBUTE_UNUSED)
+m32c_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED)
{
return 0;
}
if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
return NULL_RTX;
+ if (type && AGGREGATE_TYPE_P (type))
+ return NULL_RTX;
+
switch (ca->parm_num)
{
case 1:
static bool
m32c_pass_by_reference (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED,
enum machine_mode mode ATTRIBUTE_UNUSED,
- tree type ATTRIBUTE_UNUSED,
+ const_tree type ATTRIBUTE_UNUSED,
bool named ATTRIBUTE_UNUSED)
{
return 0;
/* Implements INIT_CUMULATIVE_ARGS. */
void
m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
- tree fntype ATTRIBUTE_UNUSED,
+ tree fntype,
rtx libname ATTRIBUTE_UNUSED,
- tree fndecl ATTRIBUTE_UNUSED,
+ tree fndecl,
int n_named_args ATTRIBUTE_UNUSED)
{
- ca->force_mem = 0;
+ if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
+ ca->force_mem = 1;
+ else
+ ca->force_mem = 0;
ca->parm_num = 1;
}
{
if (ca->force_mem)
ca->force_mem = 0;
- ca->parm_num++;
+ else
+ ca->parm_num++;
}
/* Implements FUNCTION_ARG_REGNO_P. */
}
/* HImode and PSImode are the two "native" modes as far as GCC is
- concerned, but the chips also support a 32 bit mode which is used
+ concerned, but the chips also support a 32-bit mode which is used
for some opcodes in R8C/M16C and for reset vectors and such. */
#undef TARGET_VALID_POINTER_MODE
#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
/* Implements FUNCTION_VALUE. Functions and libcalls have the same
conventions. */
rtx
-m32c_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
+m32c_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
{
/* return reg or parallel */
- enum machine_mode mode = TYPE_MODE (valtype);
+ const enum machine_mode mode = TYPE_MODE (valtype);
return m32c_libcall_value (mode);
}
emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
emit_move_insn (A0 (HImode, 2), chainval);
emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
- /* We use 16 bit addresses here, but store the zero to turn it
- into a 24 bit offset. */
+ /* We use 16-bit addresses here, but store the zero to turn it
+ into a 24-bit offset. */
emit_move_insn (A0 (HImode, 5), function);
emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
}
if (TARGET_A24)
{
/* We do this because the M32C has an HImode operand, but the
- M16C has an 8 bit operand. Since gcc looks at the match data
+ M16C has an 8-bit operand. Since gcc looks at the match data
and not the expanded rtl, we have to reset the array so that
the right modes are found. */
setcc_gen_code[EQ] = CODE_FOR_seq_24;
}
/* We have three choices for choosing fb->aN offsets. If we choose -128,
- we need one MOVA -128[fb],aN opcode and 16 bit aN displacements,
+ we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
like this:
EB 4B FF mova -128[$fb],$a0
D8 0C FF FF mov.w:Q #0,-1[$a0]
- Alternately, we subtract the frame size, and hopefully use 8 bit aN
+ Alternately, we subtract the frame size, and hopefully use 8-bit aN
displacements:
7B F4 stc $fb,$a0
77 54 00 01 sub #256,$a0
We have to subtract *something* so that we have a PLUS rtx to mark
that we've done this reload. The -128 offset will never result in
- an 8 bit aN offset, and the payoff for the second case is five
+ an 8-bit aN offset, and the payoff for the second case is five
loads *if* those loads are within 256 bytes of the other end of the
frame, so the third case seems best. Note that we subtract the
zero, but detect that in the addhi3 pattern. */
return 0;
}
-/* Used in GO_IF_MODE_DEPENDENT_ADDRESS. */
-int
-m32c_mode_dependent_address (rtx addr)
-{
- if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == PRE_DEC)
- return 1;
- return 0;
-}
-
/* Implements LEGITIMATE_CONSTANT_P. We split large constants anyway,
so we can allow anything. */
int
#undef TARGET_RTX_COSTS
#define TARGET_RTX_COSTS m32c_rtx_costs
static bool
-m32c_rtx_costs (rtx x, int code, int outer_code, int *total)
+m32c_rtx_costs (rtx x, int code, int outer_code, int *total,
+ bool speed ATTRIBUTE_UNUSED)
{
switch (code)
{
#undef TARGET_ADDRESS_COST
#define TARGET_ADDRESS_COST m32c_address_cost
static int
-m32c_address_cost (rtx addr)
+m32c_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
{
+ int i;
/* fprintf(stderr, "\naddress_cost\n");
debug_rtx(addr);*/
switch (GET_CODE (addr))
{
case CONST_INT:
- return COSTS_N_INSNS(1);
+ i = INTVAL (addr);
+ if (i == 0)
+ return COSTS_N_INSNS(1);
+ if (0 < i && i <= 255)
+ return COSTS_N_INSNS(2);
+ if (0 < i && i <= 65535)
+ return COSTS_N_INSNS(3);
+ return COSTS_N_INSNS(4);
case SYMBOL_REF:
- return COSTS_N_INSNS(3);
+ return COSTS_N_INSNS(4);
case REG:
- return COSTS_N_INSNS(2);
+ return COSTS_N_INSNS(1);
+ case PLUS:
+ if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ {
+ i = INTVAL (XEXP (addr, 1));
+ if (i == 0)
+ return COSTS_N_INSNS(1);
+ if (0 < i && i <= 255)
+ return COSTS_N_INSNS(2);
+ if (0 < i && i <= 65535)
+ return COSTS_N_INSNS(3);
+ }
+ return COSTS_N_INSNS(4);
default:
return 0;
}
const char *comma;
HOST_WIDE_INT ival;
int unsigned_const = 0;
+ int force_sign;
/* Multiplies; constants are converted to sign-extended format but
we need unsigned, so 'u' and 'U' tell us what size unsigned we
code = 0;
encode_pattern (x);
+ force_sign = 0;
for (i = 0; conversions[i].pattern; i++)
if (conversions[i].code == code
&& streq (conversions[i].pattern, pattern))
/* Integers used as addresses are unsigned. */
ival &= (TARGET_A24 ? 0xffffff : 0xffff);
}
+ if (force_sign && ival >= 0)
+ fputc ('+', file);
fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
break;
}
/* Signed displacements off symbols need to have signs
blended cleanly. */
if (conversions[i].format[j] == '+'
- && (!code || code == 'I')
+ && (!code || code == 'D' || code == 'd')
&& ISDIGIT (conversions[i].format[j + 1])
- && GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
- == CONST_INT
- && INTVAL (patternr[conversions[i].format[j + 1] - '0']) <
- 0)
- continue;
+ && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
+ == CONST_INT))
+ {
+ force_sign = 1;
+ continue;
+ }
fputc (conversions[i].format[j], file);
}
break;
void
m32c_print_operand_address (FILE * stream, rtx address)
{
- gcc_assert (GET_CODE (address) == MEM);
- m32c_print_operand (stream, XEXP (address, 0), 0);
+ if (GET_CODE (address) == MEM)
+ address = XEXP (address, 0);
+ else
+ /* cf: gcc.dg/asm-4.c. */
+ gcc_assert (GET_CODE (address) == REG);
+
+ m32c_print_operand (stream, address, 0);
}
/* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
return NULL_TREE;
}
+/* Returns TRUE if given tree has the "function_vector" attribute. */
+int
+m32c_special_page_vector_p (tree func)
+{
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ return 0;
+
+ tree list = M32C_ATTRIBUTES (func);
+ while (list)
+ {
+ if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
+ return 1;
+ list = TREE_CHAIN (list);
+ }
+ return 0;
+}
+
+static tree
+function_vector_handler (tree * node ATTRIBUTE_UNUSED,
+ tree name ATTRIBUTE_UNUSED,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool * no_add_attrs ATTRIBUTE_UNUSED)
+{
+ if (TARGET_R8C)
+ {
+ /* The attribute is not supported for R8C target. */
+ warning (OPT_Wattributes,
+ "`%s' attribute is not supported for R8C target",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+ else if (TREE_CODE (*node) != FUNCTION_DECL)
+ {
+ /* The attribute must be applied to functions only. */
+ warning (OPT_Wattributes,
+ "`%s' attribute applies only to functions",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+ else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
+ {
+ /* The argument must be a constant integer. */
+ warning (OPT_Wattributes,
+ "`%s' attribute argument not an integer constant",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+ else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
+ || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
+ {
+ /* The argument value must be between 18 to 255. */
+ warning (OPT_Wattributes,
+ "`%s' attribute argument should be between 18 to 255",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+ return NULL_TREE;
+}
+
+/* If the function is assigned the attribute 'function_vector', it
+ returns the function vector number, otherwise returns zero. */
+int
+current_function_special_page_vector (rtx x)
+{
+ int num;
+
+ if ((GET_CODE(x) == SYMBOL_REF)
+ && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
+ {
+ tree t = SYMBOL_REF_DECL (x);
+
+ if (TREE_CODE (t) != FUNCTION_DECL)
+ return 0;
+
+ tree list = M32C_ATTRIBUTES (t);
+ while (list)
+ {
+ if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
+ {
+ num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
+ return num;
+ }
+
+ list = TREE_CHAIN (list);
+ }
+
+ return 0;
+ }
+ else
+ return 0;
+}
+
#undef TARGET_ATTRIBUTE_TABLE
#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
static const struct attribute_spec m32c_attribute_table[] = {
{"interrupt", 0, 0, false, false, false, interrupt_handler},
+ {"function_vector", 1, 1, true, false, false, function_vector_handler},
{0, 0, 0, 0, 0, 0, 0}
};
#undef TARGET_COMP_TYPE_ATTRIBUTES
#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
static int
-m32c_comp_type_attributes (tree type1 ATTRIBUTE_UNUSED,
- tree type2 ATTRIBUTE_UNUSED)
+m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
+ const_tree type2 ATTRIBUTE_UNUSED)
{
/* 0=incompatible 1=compatible 2=warning */
return 1;
/* Predicates */
+/* This is a list of legal subregs of hard regs. */
+static const struct {
+ unsigned char outer_mode_size;
+ unsigned char inner_mode_size;
+ unsigned char byte_mask;
+ unsigned char legal_when;
+ unsigned int regno;
+} legal_subregs[] = {
+ {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
+ {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
+ {1, 2, 0x01, 1, A0_REGNO},
+ {1, 2, 0x01, 1, A1_REGNO},
+
+ {1, 4, 0x01, 1, A0_REGNO},
+ {1, 4, 0x01, 1, A1_REGNO},
+
+ {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
+ {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
+ {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
+ {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
+ {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
+
+ {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
+};
+
+/* Returns TRUE if OP is a subreg of a hard reg which we don't
+ support. */
+bool
+m32c_illegal_subreg_p (rtx op)
+{
+ int offset;
+ unsigned int i;
+ int src_mode, dest_mode;
+
+ if (GET_CODE (op) != SUBREG)
+ return false;
+
+ dest_mode = GET_MODE (op);
+ offset = SUBREG_BYTE (op);
+ op = SUBREG_REG (op);
+ src_mode = GET_MODE (op);
+
+ if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
+ return false;
+ if (GET_CODE (op) != REG)
+ return false;
+ if (REGNO (op) >= MEM0_REGNO)
+ return false;
+
+ offset = (1 << offset);
+
+ for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
+ if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
+ && legal_subregs[i].regno == REGNO (op)
+ && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
+ && legal_subregs[i].byte_mask & offset)
+ {
+ switch (legal_subregs[i].legal_when)
+ {
+ case 1:
+ return false;
+ case 16:
+ if (TARGET_A16)
+ return false;
+ break;
+ case 24:
+ if (TARGET_A24)
+ return false;
+ break;
+ }
+ }
+ return true;
+}
+
/* Returns TRUE if we support a move between the first two operands.
At the moment, we just want to discourage mem to mem moves until
after reload, because reload has a hard time with our limited
return true;
}
+/* Returns TRUE if two consecutive HImode mov instructions, generated
+ for moving an immediate double data to a double data type variable
+ location, can be combined into single SImode mov instruction. */
+bool
+m32c_immd_dbl_mov (rtx * operands,
+ enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ int flag = 0, okflag = 0, offset1 = 0, offset2 = 0, offsetsign = 0;
+ const char *str1;
+ const char *str2;
+
+ if (GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF
+ && MEM_SCALAR_P (operands[0])
+ && !MEM_IN_STRUCT_P (operands[0])
+ && GET_CODE (XEXP (operands[2], 0)) == CONST
+ && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 1)) == CONST_INT
+ && MEM_SCALAR_P (operands[2])
+ && !MEM_IN_STRUCT_P (operands[2]))
+ flag = 1;
+
+ else if (GET_CODE (XEXP (operands[0], 0)) == CONST
+ && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (XEXP (operands[0], 0), 0), 0)) == SYMBOL_REF
+ && MEM_SCALAR_P (operands[0])
+ && !MEM_IN_STRUCT_P (operands[0])
+ && !(INTVAL (XEXP (XEXP (XEXP (operands[0], 0), 0), 1)) %4)
+ && GET_CODE (XEXP (operands[2], 0)) == CONST
+ && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
+ && MEM_SCALAR_P (operands[2])
+ && !MEM_IN_STRUCT_P (operands[2]))
+ flag = 2;
+
+ else if (GET_CODE (XEXP (operands[0], 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == REG
+ && REGNO (XEXP (XEXP (operands[0], 0), 0)) == FB_REGNO
+ && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT
+ && MEM_SCALAR_P (operands[0])
+ && !MEM_IN_STRUCT_P (operands[0])
+ && !(INTVAL (XEXP (XEXP (operands[0], 0), 1)) %4)
+ && REGNO (XEXP (XEXP (operands[2], 0), 0)) == FB_REGNO
+ && GET_CODE (XEXP (XEXP (operands[2], 0), 1)) == CONST_INT
+ && MEM_SCALAR_P (operands[2])
+ && !MEM_IN_STRUCT_P (operands[2]))
+ flag = 3;
+
+ else
+ return false;
+
+ switch (flag)
+ {
+ case 1:
+ str1 = XSTR (XEXP (operands[0], 0), 0);
+ str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
+ if (strcmp (str1, str2) == 0)
+ okflag = 1;
+ else
+ okflag = 0;
+ break;
+ case 2:
+ str1 = XSTR (XEXP (XEXP (XEXP (operands[0], 0), 0), 0), 0);
+ str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
+ if (strcmp(str1,str2) == 0)
+ okflag = 1;
+ else
+ okflag = 0;
+ break;
+ case 3:
+ offset1 = INTVAL (XEXP (XEXP (operands[0], 0), 1));
+ offset2 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
+ offsetsign = offset1 >> ((sizeof (offset1) * 8) -1);
+ if (((offset2-offset1) == 2) && offsetsign != 0)
+ okflag = 1;
+ else
+ okflag = 0;
+ break;
+ default:
+ okflag = 0;
+ }
+
+ if (okflag == 1)
+ {
+ HOST_WIDE_INT val;
+ operands[4] = gen_rtx_MEM (SImode, XEXP (operands[0], 0));
+
+ val = (INTVAL (operands[3]) << 16) + (INTVAL (operands[1]) & 0xFFFF);
+ operands[5] = gen_rtx_CONST_INT (VOIDmode, val);
+
+ return true;
+ }
+
+ return false;
+}
+
/* Expanders */
/* Subregs are non-orthogonal for us, because our registers are all
emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
operands[0] = gen_rtx_MEM (mode, dest_reg);
}
- if (!no_new_pseudos && MEM_P (operands[0]) && MEM_P (operands[1]))
+ if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
operands[1] = copy_to_mode_reg (mode, operands[1]);
return 0;
}
/* Before splitting mem-mem moves, force one operand into a
register. */
- if (!no_new_pseudos && MEM_P (operands[0]) && MEM_P (operands[1]))
+ if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
{
#if DEBUG0
fprintf (stderr, "force_reg...\n");
parts = 2;
#if DEBUG_SPLIT
- fprintf (stderr, "\nsplit_move %d all=%d\n", no_new_pseudos, split_all);
+ fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
+ split_all);
debug_rtx (operands[0]);
debug_rtx (operands[1]);
#endif
else
/* We'll only use it for the shift, no point emitting a move. */
temp = operands[2];
-
- if (TARGET_A16 && mode == SImode)
+ if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
{
/* The m16c has a limit of -16..16 for SI shifts, even when the
shift count is in a register. Since there are so many targets
undefined to skip one of the comparisons. */
rtx count;
- rtx label, lref, insn;
+ rtx label, lref, insn, tempvar;
+
+ emit_move_insn (operands[0], operands[1]);
count = temp;
label = gen_label_rtx ();
lref = gen_rtx_LABEL_REF (VOIDmode, label);
LABEL_NUSES (label) ++;
+ tempvar = gen_reg_rtx (mode);
+
if (shift_code == ASHIFT)
{
/* This is a left shift. We only need check positive counts. */
emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
count, GEN_INT (16), label));
- emit_insn (func (operands[1], operands[1], GEN_INT (8)));
- emit_insn (func (operands[1], operands[1], GEN_INT (8)));
+ emit_insn (func (tempvar, operands[0], GEN_INT (8)));
+ emit_insn (func (operands[0], tempvar, GEN_INT (8)));
insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
emit_label_after (label, insn);
}
/* This is a right shift. We only need check negative counts. */
emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
count, GEN_INT (-16), label));
- emit_insn (func (operands[1], operands[1], GEN_INT (-8)));
- emit_insn (func (operands[1], operands[1], GEN_INT (-8)));
+ emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
+ emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
emit_label_after (label, insn);
}
-
+ operands[1] = operands[0];
+ emit_insn (func (operands[0], operands[0], count));
+ return 1;
}
operands[2] = temp;
emit_insn (gen_truncsipsi2 (operands[0], temp2));
}
+static rtx compare_op0, compare_op1;
+
+void
+m32c_pend_compare (rtx *operands)
+{
+ compare_op0 = operands[0];
+ compare_op1 = operands[1];
+}
+
+void
+m32c_unpend_compare (void)
+{
+ switch (GET_MODE (compare_op0))
+ {
+ case QImode:
+ emit_insn (gen_cmpqi_op (compare_op0, compare_op1));
+ case HImode:
+ emit_insn (gen_cmphi_op (compare_op0, compare_op1));
+ case PSImode:
+ emit_insn (gen_cmppsi_op (compare_op0, compare_op1));
+ default:
+ /* Just to silence the "missing case" warnings. */ ;
+ }
+}
+
+void
+m32c_expand_scc (int code, rtx *operands)
+{
+ enum machine_mode mode = TARGET_A16 ? QImode : HImode;
+
+ emit_insn (gen_rtx_SET (mode,
+ operands[0],
+ gen_rtx_fmt_ee (code,
+ mode,
+ compare_op0,
+ compare_op1)));
+}
+
/* Pattern Output Functions */
/* Returns a (OP (reg:CC FLG_REGNO) (const_int 0)) from some other
m32c_expand_movcc (rtx *operands)
{
rtx rel = operands[1];
+ rtx cmp;
+
if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
return 1;
if (GET_CODE (operands[2]) != CONST_INT
operands[2] = operands[3];
operands[3] = tmp;
}
- if (TARGET_A16)
- emit_insn (gen_stzx_16 (operands[0], operands[2], operands[3]));
- else if (GET_MODE (operands[0]) == QImode)
- emit_insn (gen_stzx_24_qi (operands[0], operands[2], operands[3]));
- else
- emit_insn (gen_stzx_24_hi (operands[0], operands[2], operands[3]));
+
+ cmp = gen_rtx_fmt_ee (GET_CODE (rel),
+ GET_MODE (rel),
+ compare_op0,
+ compare_op1);
+
+ emit_move_insn (operands[0],
+ gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
+ cmp,
+ operands[2],
+ operands[3]));
return 0;
}
if (INTVAL (operands[1]) != 1)
return 1;
+ /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
+ if (GET_CODE (operands[3]) != CONST_INT)
+ return 1;
+ if (INTVAL (operands[3]) != 0
+ && INTVAL (operands[3]) != 1
+ && INTVAL (operands[3]) != -1)
+ return 1;
+
mask = 1 << INTVAL (operands[2]);
op0 = operands[0];
op0 = sub;
}
- if (no_new_pseudos
+ if (!can_create_pseudo_p ()
|| (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
src0 = op0;
else
mask >>= 8;
}
- if (INTVAL (operands[3]))
+ /* First, we generate a mask with the correct polarity. If we are
+ storing a zero, we want an AND mask, so invert it. */
+ if (INTVAL (operands[3]) == 0)
{
+ /* Storing a zero, use an AND mask */
if (GET_MODE (op0) == HImode)
mask ^= 0xffff;
else
mask ^= 0xff;
}
+ /* Now we need to properly sign-extend the mask in case we need to
+ fall back to an AND or OR opcode. */
if (GET_MODE (op0) == HImode)
{
if (mask & 0x8000)
return buf;
}
+/* Encode symbol attributes of a SYMBOL_REF into its
+ SYMBOL_REF_FLAGS. */
+static void
+m32c_encode_section_info (tree decl, rtx rtl, int first)
+{
+ int extra_flags = 0;
+
+ default_encode_section_info (decl, rtl, first);
+ if (TREE_CODE (decl) == FUNCTION_DECL
+ && m32c_special_page_vector_p (decl))
+
+ extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
+
+ if (extra_flags)
+ SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
+}
+
/* Returns TRUE if the current function is a leaf, and thus we can
determine which registers an interrupt function really needs to
save. The logic below is mostly about finding the insn sequence
struct sequence_stack *seq;
int rv;
- saved_first = cfun->emit->x_first_insn;
- saved_last = cfun->emit->x_last_insn;
- for (seq = cfun->emit->sequence_stack; seq && seq->next; seq = seq->next)
+ saved_first = crtl->emit.x_first_insn;
+ saved_last = crtl->emit.x_last_insn;
+ for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
;
if (seq)
{
- cfun->emit->x_first_insn = seq->first;
- cfun->emit->x_last_insn = seq->last;
+ crtl->emit.x_first_insn = seq->first;
+ crtl->emit.x_last_insn = seq->last;
}
rv = leaf_function_p ();
- cfun->emit->x_first_insn = saved_first;
- cfun->emit->x_last_insn = saved_last;
+ crtl->emit.x_first_insn = saved_first;
+ crtl->emit.x_last_insn = saved_last;
return rv;
}
rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
insn = get_insns ();
- for (seq = cfun->emit->sequence_stack;
+ for (seq = crtl->emit.sequence_stack;
seq;
insn = seq->first, seq = seq->next);
if (cfun->machine->use_rts == 0)
F (emit_insn (m32c_all_frame_related
(TARGET_A16
- ? gen_prologue_enter_16 (GEN_INT (frame_size))
- : gen_prologue_enter_24 (GEN_INT (frame_size)))));
+ ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
+ : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
if (extra_frame_size)
{
else
emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
- emit_jump_insn (gen_epilogue_reit (GEN_INT (TARGET_A16 ? 4 : 6)));
+ if (TARGET_A16)
+ emit_jump_insn (gen_epilogue_reit_16 ());
+ else
+ emit_jump_insn (gen_epilogue_reit_24 ());
}
else if (cfun->machine->use_rts)
emit_jump_insn (gen_epilogue_rts ());
+ else if (TARGET_A16)
+ emit_jump_insn (gen_epilogue_exitd_16 ());
else
- emit_jump_insn (gen_epilogue_exitd (GEN_INT (TARGET_A16 ? 2 : 4)));
+ emit_jump_insn (gen_epilogue_exitd_24 ());
emit_barrier ();
}
(fudged), and return (fudged). This is actually easier to do in
assembler, so punt to libgcc. */
emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
- /* emit_insn (gen_rtx_CLOBBER (HImode, gen_rtx_REG (HImode, R0L_REGNO))); */
+ /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
emit_barrier ();
}
+/* Indicate which flags must be properly set for a given conditional. */
+static int
+flags_needed_for_conditional (rtx cond)
+{
+ switch (GET_CODE (cond))
+ {
+ case LE:
+ case GT:
+ return FLAGS_OSZ;
+ case LEU:
+ case GTU:
+ return FLAGS_ZC;
+ case LT:
+ case GE:
+ return FLAGS_OS;
+ case LTU:
+ case GEU:
+ return FLAGS_C;
+ case EQ:
+ case NE:
+ return FLAGS_Z;
+ default:
+ return FLAGS_N;
+ }
+}
+
+#define DEBUG_CMP 0
+
+/* Returns true if a compare insn is redundant because it would only
+ set flags that are already set correctly. */
+static bool
+m32c_compare_redundant (rtx cmp, rtx *operands)
+{
+ int flags_needed;
+ int pflags;
+ rtx prev, pp, next;
+ rtx op0, op1, op2;
+#if DEBUG_CMP
+ int prev_icode, i;
+#endif
+
+ op0 = operands[0];
+ op1 = operands[1];
+ op2 = operands[2];
+
+#if DEBUG_CMP
+ fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
+ debug_rtx(cmp);
+ for (i=0; i<2; i++)
+ {
+ fprintf(stderr, "operands[%d] = ", i);
+ debug_rtx(operands[i]);
+ }
+#endif
+
+ next = next_nonnote_insn (cmp);
+ if (!next || !INSN_P (next))
+ {
+#if DEBUG_CMP
+ fprintf(stderr, "compare not followed by insn\n");
+ debug_rtx(next);
+#endif
+ return false;
+ }
+ if (GET_CODE (PATTERN (next)) == SET
+ && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
+ {
+ next = XEXP (XEXP (PATTERN (next), 1), 0);
+ }
+ else if (GET_CODE (PATTERN (next)) == SET)
+ {
+ /* If this is a conditional, flags_needed will be something
+ other than FLAGS_N, which we test below. */
+ next = XEXP (PATTERN (next), 1);
+ }
+ else
+ {
+#if DEBUG_CMP
+ fprintf(stderr, "compare not followed by conditional\n");
+ debug_rtx(next);
+#endif
+ return false;
+ }
+#if DEBUG_CMP
+ fprintf(stderr, "conditional is: ");
+ debug_rtx(next);
+#endif
+
+ flags_needed = flags_needed_for_conditional (next);
+ if (flags_needed == FLAGS_N)
+ {
+#if DEBUG_CMP
+ fprintf(stderr, "compare not followed by conditional\n");
+ debug_rtx(next);
+#endif
+ return false;
+ }
+
+ /* Compare doesn't set overflow and carry the same way that
+ arithmetic instructions do, so we can't replace those. */
+ if (flags_needed & FLAGS_OC)
+ return false;
+
+ prev = cmp;
+ do {
+ prev = prev_nonnote_insn (prev);
+ if (!prev)
+ {
+#if DEBUG_CMP
+ fprintf(stderr, "No previous insn.\n");
+#endif
+ return false;
+ }
+ if (!INSN_P (prev))
+ {
+#if DEBUG_CMP
+ fprintf(stderr, "Previous insn is a non-insn.\n");
+#endif
+ return false;
+ }
+ pp = PATTERN (prev);
+ if (GET_CODE (pp) != SET)
+ {
+#if DEBUG_CMP
+ fprintf(stderr, "Previous insn is not a SET.\n");
+#endif
+ return false;
+ }
+ pflags = get_attr_flags (prev);
+
+ /* Looking up attributes of previous insns corrupted the recog
+ tables. */
+ INSN_UID (cmp) = -1;
+ recog (PATTERN (cmp), cmp, 0);
+
+ if (pflags == FLAGS_N
+ && reg_mentioned_p (op0, pp))
+ {
+#if DEBUG_CMP
+ fprintf(stderr, "intermediate non-flags insn uses op:\n");
+ debug_rtx(prev);
+#endif
+ return false;
+ }
+ } while (pflags == FLAGS_N);
+#if DEBUG_CMP
+ fprintf(stderr, "previous flag-setting insn:\n");
+ debug_rtx(prev);
+ debug_rtx(pp);
+#endif
+
+ if (GET_CODE (pp) == SET
+ && GET_CODE (XEXP (pp, 0)) == REG
+ && REGNO (XEXP (pp, 0)) == FLG_REGNO
+ && GET_CODE (XEXP (pp, 1)) == COMPARE)
+ {
+ /* Adjacent cbranches must have the same operands to be
+ redundant. */
+ rtx pop0 = XEXP (XEXP (pp, 1), 0);
+ rtx pop1 = XEXP (XEXP (pp, 1), 1);
+#if DEBUG_CMP
+ fprintf(stderr, "adjacent cbranches\n");
+ debug_rtx(pop0);
+ debug_rtx(pop1);
+#endif
+ if (rtx_equal_p (op0, pop0)
+ && rtx_equal_p (op1, pop1))
+ return true;
+#if DEBUG_CMP
+ fprintf(stderr, "prev cmp not same\n");
+#endif
+ return false;
+ }
+
+ /* Else the previous insn must be a SET, with either the source or
+ dest equal to operands[0], and operands[1] must be zero. */
+
+ if (!rtx_equal_p (op1, const0_rtx))
+ {
+#if DEBUG_CMP
+ fprintf(stderr, "operands[1] not const0_rtx\n");
+#endif
+ return false;
+ }
+ if (GET_CODE (pp) != SET)
+ {
+#if DEBUG_CMP
+ fprintf (stderr, "pp not set\n");
+#endif
+ return false;
+ }
+ if (!rtx_equal_p (op0, SET_SRC (pp))
+ && !rtx_equal_p (op0, SET_DEST (pp)))
+ {
+#if DEBUG_CMP
+ fprintf(stderr, "operands[0] not found in set\n");
+#endif
+ return false;
+ }
+
+#if DEBUG_CMP
+ fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
+#endif
+ if ((pflags & flags_needed) == flags_needed)
+ return true;
+
+ return false;
+}
+
+/* Return the pattern for a compare. This will be commented out if
+ the compare is redundant, else a normal pattern is returned. Thus,
+ the assembler output says where the compare would have been. */
+char *
+m32c_output_compare (rtx insn, rtx *operands)
+{
+ static char templ[] = ";cmp.b\t%1,%0";
+ /* ^ 5 */
+
+ templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
+ if (m32c_compare_redundant (insn, operands))
+ {
+#if DEBUG_CMP
+ fprintf(stderr, "cbranch: cmp not needed\n");
+#endif
+ return templ;
+ }
+
+#if DEBUG_CMP
+ fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ);
+#endif
+ return templ + 1;
+}
+
+#undef TARGET_ENCODE_SECTION_INFO
+#define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
+
/* The Global `targetm' Variable. */
struct gcc_target targetm = TARGET_INITIALIZER;