/* Output routines for GCC for Renesas / SuperH SH.
Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
- 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
Free Software Foundation, Inc.
Contributed by Steve Chamberlain (sac@cygnus.com).
Improved by Jim Wilson (wilson@cygnus.com).
#include "output.h"
#include "insn-attr.h"
#include "diagnostic-core.h"
-#include "toplev.h"
#include "recog.h"
#include "integrate.h"
#include "dwarf2.h"
#include "cfgloop.h"
#include "alloc-pool.h"
#include "tm-constrs.h"
+#include "opts.h"
int code_for_indirect_jump_scratch = CODE_FOR_indirect_jump_scratch;
static bool shmedia_space_reserved_for_target_registers;
-static bool sh_handle_option (size_t, const char *, int);
static void split_branches (rtx);
static int branch_dest (rtx);
static void force_into (rtx, rtx);
static rtx gen_block_redirect (rtx, int, int);
static void sh_reorg (void);
static void sh_option_override (void);
-static void sh_option_init_struct (struct gcc_options *);
-static void sh_option_default_params (void);
static void output_stack_adjust (int, rtx, int, HARD_REG_SET *, bool);
static rtx frame_insn (rtx);
static rtx push (int);
static void sh_print_operand (FILE *, rtx, int);
static void sh_print_operand_address (FILE *, rtx);
static bool sh_print_operand_punct_valid_p (unsigned char code);
+static bool sh_asm_output_addr_const_extra (FILE *file, rtx x);
static void sh_output_function_epilogue (FILE *, HOST_WIDE_INT);
static void sh_insert_attributes (tree, tree *);
static const char *sh_check_pch_target_flags (int);
static int flow_dependent_p (rtx, rtx);
static void flow_dependent_p_1 (rtx, const_rtx, void *);
static int shiftcosts (rtx);
-static int andcosts (rtx);
+static int and_xor_ior_costs (rtx, int);
static int addsubcosts (rtx);
static int multcosts (rtx);
static bool unspec_caller_rtx_p (rtx);
static bool sh_cannot_copy_insn_p (rtx);
-static bool sh_rtx_costs (rtx, int, int, int *, bool);
+static bool sh_rtx_costs (rtx, int, int, int, int *, bool);
static int sh_address_cost (rtx, bool);
static int sh_pr_n_sets (void);
static rtx sh_allocate_initial_value (rtx);
+static reg_class_t sh_preferred_reload_class (rtx, reg_class_t);
+static reg_class_t sh_secondary_reload (bool, rtx, reg_class_t,
+ enum machine_mode,
+ struct secondary_reload_info *);
static bool sh_legitimate_address_p (enum machine_mode, rtx, bool);
static rtx sh_legitimize_address (rtx, rtx, enum machine_mode);
+static rtx sh_delegitimize_address (rtx);
static int shmedia_target_regs_stack_space (HARD_REG_SET *);
static int shmedia_reserve_space_for_target_registers_p (int, HARD_REG_SET *);
static int shmedia_target_regs_stack_adjust (HARD_REG_SET *);
static rtx sh_libcall_value (enum machine_mode, const_rtx);
static bool sh_return_in_memory (const_tree, const_tree);
static rtx sh_builtin_saveregs (void);
-static void sh_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
-static bool sh_strict_argument_naming (CUMULATIVE_ARGS *);
-static bool sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *);
+static void sh_setup_incoming_varargs (cumulative_args_t, enum machine_mode, tree, int *, int);
+static bool sh_strict_argument_naming (cumulative_args_t);
+static bool sh_pretend_outgoing_varargs_named (cumulative_args_t);
static tree sh_build_builtin_va_list (void);
static void sh_va_start (tree, rtx);
static tree sh_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
int *punsignedp,
const_tree funtype,
int for_return);
-static bool sh_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
+static bool sh_pass_by_reference (cumulative_args_t, enum machine_mode,
const_tree, bool);
-static bool sh_callee_copies (CUMULATIVE_ARGS *, enum machine_mode,
+static bool sh_callee_copies (cumulative_args_t, enum machine_mode,
const_tree, bool);
-static int sh_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
+static int sh_arg_partial_bytes (cumulative_args_t, enum machine_mode,
tree, bool);
-static void sh_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
+static void sh_function_arg_advance (cumulative_args_t, enum machine_mode,
const_tree, bool);
-static rtx sh_function_arg (CUMULATIVE_ARGS *, enum machine_mode,
+static rtx sh_function_arg (cumulative_args_t, enum machine_mode,
const_tree, bool);
static bool sh_scalar_mode_supported_p (enum machine_mode);
static int sh_dwarf_calling_convention (const_tree);
static int sh2a_function_vector_p (tree);
static void sh_trampoline_init (rtx, tree, rtx);
static rtx sh_trampoline_adjust_address (rtx);
+static void sh_conditional_register_usage (void);
+static bool sh_legitimate_constant_p (enum machine_mode, rtx);
+
+static void sh_init_sync_libfuncs (void) ATTRIBUTE_UNUSED;
\f
static const struct attribute_spec sh_attribute_table[] =
{
- /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
- { "interrupt_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
- { "sp_switch", 1, 1, true, false, false, sh_handle_sp_switch_attribute },
- { "trap_exit", 1, 1, true, false, false, sh_handle_trap_exit_attribute },
- { "renesas", 0, 0, false, true, false, sh_handle_renesas_attribute },
- { "trapa_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
- { "nosave_low_regs", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
- { "resbank", 0, 0, true, false, false, sh_handle_resbank_handler_attribute },
- { "function_vector", 1, 1, true, false, false, sh2a_handle_function_vector_handler_attribute },
-#ifdef SYMBIAN
- /* Symbian support adds three new attributes:
- dllexport - for exporting a function/variable that will live in a dll
- dllimport - for importing a function/variable from a dll
-
- Microsoft allows multiple declspecs in one __declspec, separating
- them with spaces. We do NOT support this. Instead, use __declspec
- multiple times. */
- { "dllimport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
- { "dllexport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
-#endif
- { NULL, 0, 0, false, false, false, NULL }
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
+ affects_type_identity } */
+ { "interrupt_handler", 0, 0, true, false, false,
+ sh_handle_interrupt_handler_attribute, false },
+ { "sp_switch", 1, 1, true, false, false,
+ sh_handle_sp_switch_attribute, false },
+ { "trap_exit", 1, 1, true, false, false,
+ sh_handle_trap_exit_attribute, false },
+ { "renesas", 0, 0, false, true, false,
+ sh_handle_renesas_attribute, false },
+ { "trapa_handler", 0, 0, true, false, false,
+ sh_handle_interrupt_handler_attribute, false },
+ { "nosave_low_regs", 0, 0, true, false, false,
+ sh_handle_interrupt_handler_attribute, false },
+ { "resbank", 0, 0, true, false, false,
+ sh_handle_resbank_handler_attribute, false },
+ { "function_vector", 1, 1, true, false, false,
+ sh2a_handle_function_vector_handler_attribute, false },
+ { NULL, 0, 0, false, false, false, NULL, false }
};
-
-/* Set default optimization options. */
-static const struct default_options sh_option_optimization_table[] =
- {
- { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
- { OPT_LEVELS_1_PLUS_SPEED_ONLY, OPT_mdiv_, "inv:minlat", 1 },
- { OPT_LEVELS_SIZE, OPT_mdiv_, SH_DIV_STR_FOR_SIZE, 1 },
- { OPT_LEVELS_0_ONLY, OPT_mdiv_, "", 1 },
- { OPT_LEVELS_SIZE, OPT_mcbranchdi, NULL, 0 },
- /* We can't meaningfully test TARGET_SHMEDIA here, because -m
- options haven't been parsed yet, hence we'd read only the
- default. sh_target_reg_class will return NO_REGS if this is
- not SHMEDIA, so it's OK to always set
- flag_branch_target_load_optimize. */
- { OPT_LEVELS_2_PLUS, OPT_fbranch_target_load_optimize, NULL, 1 },
- { OPT_LEVELS_NONE, 0, NULL, 0 }
- };
\f
/* Initialize the GCC target structure. */
#undef TARGET_ATTRIBUTE_TABLE
#undef TARGET_OPTION_OVERRIDE
#define TARGET_OPTION_OVERRIDE sh_option_override
-#undef TARGET_OPTION_OPTIMIZATION_TABLE
-#define TARGET_OPTION_OPTIMIZATION_TABLE sh_option_optimization_table
-#undef TARGET_OPTION_INIT_STRUCT
-#define TARGET_OPTION_INIT_STRUCT sh_option_init_struct
-#undef TARGET_OPTION_DEFAULT_PARAMS
-#define TARGET_OPTION_DEFAULT_PARAMS sh_option_default_params
#undef TARGET_PRINT_OPERAND
#define TARGET_PRINT_OPERAND sh_print_operand
#define TARGET_PRINT_OPERAND_ADDRESS sh_print_operand_address
#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
#define TARGET_PRINT_OPERAND_PUNCT_VALID_P sh_print_operand_punct_valid_p
-
+#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
+#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA sh_asm_output_addr_const_extra
+
#undef TARGET_ASM_FUNCTION_EPILOGUE
#define TARGET_ASM_FUNCTION_EPILOGUE sh_output_function_epilogue
#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
-#undef TARGET_DEFAULT_TARGET_FLAGS
-#define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
-#undef TARGET_HANDLE_OPTION
-#define TARGET_HANDLE_OPTION sh_handle_option
-
#undef TARGET_REGISTER_MOVE_COST
#define TARGET_REGISTER_MOVE_COST sh_register_move_cost
#undef TARGET_SCHED_INIT
#define TARGET_SCHED_INIT sh_md_init
+#undef TARGET_DELEGITIMIZE_ADDRESS
+#define TARGET_DELEGITIMIZE_ADDRESS sh_delegitimize_address
+
#undef TARGET_LEGITIMIZE_ADDRESS
#define TARGET_LEGITIMIZE_ADDRESS sh_legitimize_address
#undef TARGET_ENCODE_SECTION_INFO
#define TARGET_ENCODE_SECTION_INFO sh_encode_section_info
-#ifdef SYMBIAN
-
-#undef TARGET_ENCODE_SECTION_INFO
-#define TARGET_ENCODE_SECTION_INFO sh_symbian_encode_section_info
-#undef TARGET_STRIP_NAME_ENCODING
-#define TARGET_STRIP_NAME_ENCODING sh_symbian_strip_name_encoding
-#undef TARGET_CXX_IMPORT_EXPORT_CLASS
-#define TARGET_CXX_IMPORT_EXPORT_CLASS sh_symbian_import_export_class
-
-#endif /* SYMBIAN */
-
#undef TARGET_SECONDARY_RELOAD
#define TARGET_SECONDARY_RELOAD sh_secondary_reload
+#undef TARGET_PREFERRED_RELOAD_CLASS
+#define TARGET_PREFERRED_RELOAD_CLASS sh_preferred_reload_class
+
+#undef TARGET_CONDITIONAL_REGISTER_USAGE
+#define TARGET_CONDITIONAL_REGISTER_USAGE sh_conditional_register_usage
+
#undef TARGET_LEGITIMATE_ADDRESS_P
#define TARGET_LEGITIMATE_ADDRESS_P sh_legitimate_address_p
#undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
#define TARGET_TRAMPOLINE_ADJUST_ADDRESS sh_trampoline_adjust_address
+#undef TARGET_LEGITIMATE_CONSTANT_P
+#define TARGET_LEGITIMATE_CONSTANT_P sh_legitimate_constant_p
+
/* Machine-specific symbol_ref flags. */
#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
-struct gcc_target targetm = TARGET_INITIALIZER;
-\f
-/* Implement TARGET_HANDLE_OPTION. */
+/* The tas.b instruction sets the 7th bit in the byte, i.e. 0x80. This value
+ is used by optabs.c atomic op expansion code as well as in sync.md. */
+#undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
+#define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 0x80
-static bool
-sh_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED,
- int value ATTRIBUTE_UNUSED)
-{
- switch (code)
- {
- case OPT_m1:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH1;
- return true;
-
- case OPT_m2:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2;
- return true;
-
- case OPT_m2a:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A;
- return true;
-
- case OPT_m2a_nofpu:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_NOFPU;
- return true;
-
- case OPT_m2a_single:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_SINGLE;
- return true;
-
- case OPT_m2a_single_only:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_SINGLE_ONLY;
- return true;
-
- case OPT_m2e:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2E;
- return true;
-
- case OPT_m3:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH3;
- return true;
-
- case OPT_m3e:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH3E;
- return true;
-
- case OPT_m4:
- case OPT_m4_100:
- case OPT_m4_200:
- case OPT_m4_300:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4;
- return true;
-
- case OPT_m4_nofpu:
- case OPT_m4_100_nofpu:
- case OPT_m4_200_nofpu:
- case OPT_m4_300_nofpu:
- case OPT_m4_340:
- case OPT_m4_400:
- case OPT_m4_500:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_NOFPU;
- return true;
-
- case OPT_m4_single:
- case OPT_m4_100_single:
- case OPT_m4_200_single:
- case OPT_m4_300_single:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_SINGLE;
- return true;
-
- case OPT_m4_single_only:
- case OPT_m4_100_single_only:
- case OPT_m4_200_single_only:
- case OPT_m4_300_single_only:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_SINGLE_ONLY;
- return true;
-
- case OPT_m4a:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A;
- return true;
-
- case OPT_m4a_nofpu:
- case OPT_m4al:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_NOFPU;
- return true;
-
- case OPT_m4a_single:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_SINGLE;
- return true;
-
- case OPT_m4a_single_only:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_SINGLE_ONLY;
- return true;
-
- case OPT_m5_32media:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_32MEDIA;
- return true;
-
- case OPT_m5_32media_nofpu:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_32MEDIA_NOFPU;
- return true;
-
- case OPT_m5_64media:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_64MEDIA;
- return true;
-
- case OPT_m5_64media_nofpu:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_64MEDIA_NOFPU;
- return true;
-
- case OPT_m5_compact:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_COMPACT;
- return true;
-
- case OPT_m5_compact_nofpu:
- target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_COMPACT_NOFPU;
- return true;
-
- default:
- return true;
- }
-}
+struct gcc_target targetm = TARGET_INITIALIZER;
\f
-/* Implement TARGET_OPTION_INIT_STRUCT. */
-static void
-sh_option_init_struct (struct gcc_options *opts)
-{
- /* We can't meaningfully test TARGET_SH2E / TARGET_IEEE
- here, so leave it to TARGET_OPTION_OVERRIDE to set
- flag_finite_math_only. We set it to 2 here so we know if the user
- explicitly requested this to be on or off. */
- opts->x_flag_finite_math_only = 2;
-}
-
-/* Implement TARGET_OPTION_DEFAULT_PARAMS. */
-static void
-sh_option_default_params (void)
-{
- set_default_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 2);
-}
-
/* Implement TARGET_OPTION_OVERRIDE macro. Validate and override
various options, and do some machine dependent initialization. */
static void
SUBTARGET_OVERRIDE_OPTIONS;
if (optimize > 1 && !optimize_size)
target_flags |= MASK_SAVE_ALL_TARGET_REGS;
- if (flag_finite_math_only == 2)
- flag_finite_math_only
- = !flag_signaling_nans && TARGET_SH2E && ! TARGET_IEEE;
- if (TARGET_SH2E && !flag_finite_math_only)
- target_flags |= MASK_IEEE;
sh_cpu = PROCESSOR_SH1;
assembler_dialect = 0;
if (TARGET_SH2)
else
sh_divsi3_libfunc = "__sdivsi3";
if (sh_branch_cost == -1)
- sh_branch_cost
- = TARGET_SH5 ? 1 : ! TARGET_SH2 || TARGET_HARD_SH4 ? 2 : 1;
+ {
+ sh_branch_cost = 1;
+
+ /* The SH1 does not have delay slots, hence we get a pipeline stall
+ at every branch. The SH4 is superscalar, so the single delay slot
+ is not sufficient to keep both pipelines filled. */
+ if (! TARGET_SH2 || TARGET_HARD_SH4)
+ sh_branch_cost = 2;
+ }
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
if (! VALID_REGISTER_P (regno))
if (! VALID_REGISTER_P (ADDREGNAMES_REGNO (regno)))
sh_additional_register_names[regno][0] = '\0';
- flag_omit_frame_pointer = (PREFERRED_DEBUGGING_TYPE == DWARF2_DEBUG);
-
if ((flag_pic && ! TARGET_PREFERGOT)
|| (TARGET_SHMEDIA && !TARGET_PT_FIXED))
flag_no_function_cse = 1;
flag_schedule_insns = 0;
}
- if ((target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS) == 0)
- target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
-
- /* Unwind info is not correct around the CFG unless either a frame
- pointer is present or M_A_O_A is set. Fixing this requires rewriting
- unwind info generation to be aware of the CFG and propagating states
+ /* Unwind info is not correct around the CFG unless either a frame
+ pointer is present or M_A_O_A is set. Fixing this requires rewriting
+ unwind info generation to be aware of the CFG and propagating states
around edges. */
if ((flag_unwind_tables || flag_asynchronous_unwind_tables
- || flag_exceptions || flag_non_call_exceptions)
- && flag_omit_frame_pointer
- && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
+ || flag_exceptions || flag_non_call_exceptions)
+ && flag_omit_frame_pointer && !TARGET_ACCUMULATE_OUTGOING_ARGS)
{
- if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
- warning (0, "unwind tables currently require either a frame pointer "
- "or -maccumulate-outgoing-args for correctness");
- target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
+ warning (0, "unwind tables currently require either a frame pointer "
+ "or -maccumulate-outgoing-args for correctness");
+ TARGET_ACCUMULATE_OUTGOING_ARGS = 1;
}
/* Unwinding with -freorder-blocks-and-partition does not work on this
{
if (flag_exceptions)
{
- inform (input_location,
+ inform (input_location,
"-freorder-blocks-and-partition does not work with "
"exceptions on this architecture");
flag_reorder_blocks_and_partition = 0;
align_functions = min_align;
}
+ /* If the -mieee option was not explicitly set by the user, turn it on
+ unless -ffinite-math-only was specified. See also PR 33135. */
+ if (! global_options_set.x_TARGET_IEEE)
+ TARGET_IEEE = ! flag_finite_math_only;
+
if (sh_fixed_range_str)
sh_fix_range (sh_fixed_range_str);
/* This target defaults to strict volatile bitfields. */
- if (flag_strict_volatile_bitfields < 0)
+ if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
flag_strict_volatile_bitfields = 1;
}
\f
return (code == '.' || code == '#' || code == '@' || code == ','
|| code == '$' || code == '\'' || code == '>');
}
+
+/* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
+
+static bool
+sh_asm_output_addr_const_extra (FILE *file, rtx x)
+{
+ if (GET_CODE (x) == UNSPEC)
+ {
+ switch (XINT (x, 1))
+ {
+ case UNSPEC_DATALABEL:
+ fputs ("datalabel ", file);
+ output_addr_const (file, XVECEXP (x, 0, 0));
+ break;
+ case UNSPEC_PIC:
+ /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
+ output_addr_const (file, XVECEXP (x, 0, 0));
+ break;
+ case UNSPEC_GOT:
+ output_addr_const (file, XVECEXP (x, 0, 0));
+ fputs ("@GOT", file);
+ break;
+ case UNSPEC_GOTOFF:
+ output_addr_const (file, XVECEXP (x, 0, 0));
+ fputs ("@GOTOFF", file);
+ break;
+ case UNSPEC_PLT:
+ output_addr_const (file, XVECEXP (x, 0, 0));
+ fputs ("@PLT", file);
+ break;
+ case UNSPEC_GOTPLT:
+ output_addr_const (file, XVECEXP (x, 0, 0));
+ fputs ("@GOTPLT", file);
+ break;
+ case UNSPEC_DTPOFF:
+ output_addr_const (file, XVECEXP (x, 0, 0));
+ fputs ("@DTPOFF", file);
+ break;
+ case UNSPEC_GOTTPOFF:
+ output_addr_const (file, XVECEXP (x, 0, 0));
+ fputs ("@GOTTPOFF", file);
+ break;
+ case UNSPEC_TPOFF:
+ output_addr_const (file, XVECEXP (x, 0, 0));
+ fputs ("@TPOFF", file);
+ break;
+ case UNSPEC_CALLER:
+ {
+ char name[32];
+ /* LPCS stands for Label for PIC Call Site. */
+ targetm.asm_out.generate_internal_label (name, "LPCS",
+ INTVAL (XVECEXP (x, 0, 0)));
+ assemble_name (file, name);
+ }
+ break;
+ case UNSPEC_EXTRACT_S16:
+ case UNSPEC_EXTRACT_U16:
+ {
+ rtx val, shift;
+
+ val = XVECEXP (x, 0, 0);
+ shift = XVECEXP (x, 0, 1);
+ fputc ('(', file);
+ if (shift != const0_rtx)
+ fputc ('(', file);
+ if (GET_CODE (val) == CONST
+ || GET_RTX_CLASS (GET_CODE (val)) != RTX_OBJ)
+ {
+ fputc ('(', file);
+ output_addr_const (file, val);
+ fputc (')', file);
+ }
+ else
+ output_addr_const (file, val);
+ if (shift != const0_rtx)
+ {
+ fputs (" >> ", file);
+ output_addr_const (file, shift);
+ fputc (')', file);
+ }
+ fputs (" & 65535)", file);
+ }
+ break;
+ case UNSPEC_SYMOFF:
+ output_addr_const (file, XVECEXP (x, 0, 0));
+ fputc ('-', file);
+ if (GET_CODE (XVECEXP (x, 0, 1)) == CONST)
+ {
+ fputc ('(', file);
+ output_addr_const (file, XVECEXP (x, 0, 1));
+ fputc (')', file);
+ }
+ else
+ output_addr_const (file, XVECEXP (x, 0, 1));
+ break;
+ case UNSPEC_PCREL_SYMOFF:
+ output_addr_const (file, XVECEXP (x, 0, 0));
+ fputs ("-(", file);
+ output_addr_const (file, XVECEXP (x, 0, 1));
+ fputs ("-.)", file);
+ break;
+ default:
+ return false;
+ }
+ return true;
+ }
+ else
+ return false;
+}
\f
/* Encode symbol attributes of a SYMBOL_REF into its
rtx from = adjust_automodify_address (src, BLKmode,
src_addr, copied);
- set_mem_size (from, GEN_INT (4));
+ set_mem_size (from, 4);
emit_insn (gen_movua (temp, from));
emit_move_insn (src_addr, plus_constant (src_addr, 4));
emit_move_insn (to, temp);
{
rtx tga_op1, tga_ret, tmp, tmp2;
+ if (! flag_pic
+ && (tls_kind == TLS_MODEL_GLOBAL_DYNAMIC
+ || tls_kind == TLS_MODEL_LOCAL_DYNAMIC
+ || tls_kind == TLS_MODEL_INITIAL_EXEC))
+ {
+ /* Don't schedule insns for getting GOT address when
+ the first scheduling is enabled, to avoid spill
+ failures for R0. */
+ if (flag_schedule_insns)
+ emit_insn (gen_blockage ());
+ emit_insn (gen_GOTaddr2picreg ());
+ emit_use (gen_rtx_REG (SImode, PIC_REG));
+ if (flag_schedule_insns)
+ emit_insn (gen_blockage ());
+ }
+
switch (tls_kind)
{
case TLS_MODEL_GLOBAL_DYNAMIC:
tga_ret = gen_rtx_REG (Pmode, R0_REG);
emit_call_insn (gen_tls_global_dynamic (tga_ret, op1));
- op1 = tga_ret;
+ tmp = gen_reg_rtx (Pmode);
+ emit_move_insn (tmp, tga_ret);
+ op1 = tmp;
break;
case TLS_MODEL_LOCAL_DYNAMIC:
break;
case TLS_MODEL_INITIAL_EXEC:
- if (! flag_pic)
- {
- /* Don't schedule insns for getting GOT address when
- the first scheduling is enabled, to avoid spill
- failures for R0. */
- if (flag_schedule_insns)
- emit_insn (gen_blockage ());
- emit_insn (gen_GOTaddr2picreg ());
- emit_use (gen_rtx_REG (SImode, PIC_REG));
- if (flag_schedule_insns)
- emit_insn (gen_blockage ());
- }
tga_op1 = !can_create_pseudo_p () ? op0 : gen_reg_rtx (Pmode);
tmp = gen_sym2GOTTPOFF (op1);
emit_insn (gen_tls_initial_exec (tga_op1, tmp));
}
/* ??? How should we distribute probabilities when more than one branch
- is generated. So far we only have soem ad-hoc observations:
+ is generated. So far we only have some ad-hoc observations:
- If the operands are random, they are likely to differ in both parts.
- If comparing items in a hash chain, the operands are random or equal;
operation should be EQ or NE.
else if (op2h != CONST0_RTX (SImode))
msw_taken = LTU;
else
- break;
+ {
+ msw_skip = swap_condition (LTU);
+ break;
+ }
msw_skip = swap_condition (msw_taken);
}
break;
{
operands[1] = op1h;
operands[2] = op2h;
+ if (reload_completed
+ && ! arith_reg_or_0_operand (op2h, SImode)
+ && (true_regnum (op1h) || (comparison != EQ && comparison != NE)))
+ {
+ emit_move_insn (scratch, operands[2]);
+ operands[2] = scratch;
+ }
}
operands[3] = skip_label = gen_label_rtx ();
{
default_file_start ();
-#ifdef SYMBIAN
- /* Declare the .directive section before it is used. */
- fputs ("\t.section .directive, \"SM\", @progbits, 1\n", asm_out_file);
- fputs ("\t.asciz \"#<SYMEDIT>#\\n\"\n", asm_out_file);
-#endif
-
if (TARGET_ELF)
/* We need to show the text section with the proper
attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
{
int value;
+ /* There is no pattern for constant first operand. */
+ if (CONST_INT_P (XEXP (x, 0)))
+ return MAX_COST;
+
if (TARGET_SHMEDIA)
- return 1;
+ return COSTS_N_INSNS (1);
if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
{
if (GET_MODE (x) == DImode
&& CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) == 1)
- return 2;
+ return COSTS_N_INSNS (2);
/* Everything else is invalid, because there is no pattern for it. */
return MAX_COST;
}
/* If shift by a non constant, then this will be expensive. */
if (!CONST_INT_P (XEXP (x, 1)))
- return SH_DYNAMIC_SHIFT_COST;
+ return COSTS_N_INSNS (SH_DYNAMIC_SHIFT_COST);
/* Otherwise, return the true cost in instructions. Cope with out of range
shift counts more or less arbitrarily. */
/* If SH3, then we put the constant in a reg and use shad. */
if (cost > 1 + SH_DYNAMIC_SHIFT_COST)
cost = 1 + SH_DYNAMIC_SHIFT_COST;
- return cost;
+ return COSTS_N_INSNS (cost);
}
else
- return shift_insns[value];
+ return COSTS_N_INSNS (shift_insns[value]);
}
-/* Return the cost of an AND operation. */
+/* Return the cost of an AND/XOR/IOR operation. */
static inline int
-andcosts (rtx x)
+and_xor_ior_costs (rtx x, int code)
{
int i;
- /* Anding with a register is a single cycle and instruction. */
+ /* A logical operation with two registers is a single cycle
+ instruction. */
if (!CONST_INT_P (XEXP (x, 1)))
return 1;
|| satisfies_constraint_J16 (XEXP (x, 1)))
return 1;
else
- return 1 + rtx_cost (XEXP (x, 1), AND, !optimize_size);
+ return 1 + rtx_cost (XEXP (x, 1), AND, 1, !optimize_size);
}
/* These constants are single cycle extu.[bw] instructions. */
- if (i == 0xff || i == 0xffff)
+ if ((i == 0xff || i == 0xffff) && code == AND)
return 1;
- /* Constants that can be used in an and immediate instruction in a single
- cycle, but this requires r0, so make it a little more expensive. */
+ /* Constants that can be used in an instruction as an immediate are
+ a single cycle, but this requires r0, so make it a little more
+ expensive. */
if (CONST_OK_FOR_K08 (i))
return 2;
- /* Constants that can be loaded with a mov immediate and an and.
+ /* Constants that can be loaded with a mov immediate need one more cycle.
This case is probably unnecessary. */
if (CONST_OK_FOR_I08 (i))
return 2;
- /* Any other constants requires a 2 cycle pc-relative load plus an and.
+ /* Any other constant requires an additional 2 cycle pc-relative load.
This case is probably unnecessary. */
return 3;
}
scanned. In either case, *TOTAL contains the cost result. */
static bool
-sh_rtx_costs (rtx x, int code, int outer_code, int *total,
- bool speed ATTRIBUTE_UNUSED)
+sh_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
+ int *total, bool speed ATTRIBUTE_UNUSED)
{
switch (code)
{
*total = 8;
return true;
+ case EQ:
+ /* An and with a constant compared against zero is
+ most likely going to be a TST #imm, R0 instruction.
+ Notice that this does not catch the zero_extract variants from
+ the md file. */
+ if (GET_CODE (XEXP (x, 0)) == AND
+ && CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) == 0)
+ {
+ *total = 1;
+ return true;
+ }
+ else
+ return false;
+
case CONST:
case LABEL_REF:
case SYMBOL_REF:
return true;
case AND:
- *total = COSTS_N_INSNS (andcosts (x));
+ case XOR:
+ case IOR:
+ *total = COSTS_N_INSNS (and_xor_ior_costs (x, code));
return true;
case MULT:
case ASHIFT:
case ASHIFTRT:
case LSHIFTRT:
- *total = COSTS_N_INSNS (shiftcosts (x));
+ *total = shiftcosts (x);
return true;
case DIV:
char func[18];
int value;
- if (TARGET_SH3)
+ if (TARGET_SH3 || TARGET_SH2A)
{
if (!CONST_INT_P (operands[2]))
{
}
}
}
- if (TARGET_SH3)
+ if (TARGET_SH3 || TARGET_SH2A)
{
/* Try to use a dynamic shift. */
cost = shift_insns[32 - insize] + 1 + SH_DYNAMIC_SHIFT_COST;
/* Don't emit a constant table int the middle of global pointer setting,
since that that would move the addressing base GOT into another table.
We need the first mov instruction before the _GLOBAL_OFFSET_TABLE_
- in the pool anyway, so just move up the whole constant pool. */
- if (last_got)
- from = PREV_INSN (last_got);
+ in the pool anyway, so just move up the whole constant pool.
+ However, avoid doing so when the last single GOT mov is the starting
+ insn itself. Going past above the start insn would create a negative
+ offset, causing errors. */
+ if (last_got && last_got != orig)
+ from = PREV_INSN (last_got);
/* Don't insert the constant pool table at the position which
may be the landing pad. */
|| LABEL_P (from))
from = PREV_INSN (from);
+ /* Make sure we do not split between a call and its corresponding
+ CALL_ARG_LOCATION note. */
+ if (CALL_P (from))
+ {
+ rtx next = NEXT_INSN (from);
+ if (next && NOTE_P (next)
+ && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
+ from = next;
+ }
+
from = emit_jump_insn_after (gen_jump (label), from);
JUMP_LABEL (from) = label;
LABEL_NUSES (label) = 1;
}
else
jump = emit_jump_insn_after (gen_return (), insn);
+
/* Emit a barrier so that reorg knows that any following instructions
are not reachable via a fall-through path.
But don't do this when not optimizing, since we wouldn't suppress the
if (optimize)
emit_barrier_after (jump);
emit_label_after (bp->near_label, insn);
- JUMP_LABEL (jump) = bp->far_label;
+
+ if (bp->far_label)
+ JUMP_LABEL (jump) = bp->far_label;
+ else
+ {
+ rtx pat = PATTERN (jump);
+ gcc_assert (ANY_RETURN_P (pat));
+ JUMP_LABEL (jump) = pat;
+ }
+
ok = invert_jump (insn, label, 1);
gcc_assert (ok);
slot = 0;
credit -= get_attr_length (prev);
}
- if (prev
- && JUMP_P (prev)
- && JUMP_LABEL (prev))
+ if (prev && jump_to_label_p (prev))
{
rtx x;
if (jump_to_next
/* If relaxing, generate pseudo-ops to associate function calls with
the symbols they call. It does no harm to not generate these
- pseudo-ops. However, when we can generate them, it enables to
+ pseudo-ops. However, when we can generate them, it enables the
linker to potentially relax the jsr to a bsr, and eliminate the
register load and, possibly, the constant pool entry. */
JUMP_LABEL (insn) = far_label;
LABEL_NUSES (far_label)++;
}
- redirect_jump (insn, NULL_RTX, 1);
+ redirect_jump (insn, ret_rtx, 1);
far_label = 0;
}
}
&& (NPARM_REGS(SImode)
> crtl->args.info.arg_count[(int) SH_ARG_INT]))
pretend_args = 0;
- /* Dwarf2 module doesn't expect frame related insns here. */
+
output_stack_adjust (-pretend_args
- crtl->args.info.stack_regs * 8,
- stack_pointer_rtx, 0, NULL, false);
+ stack_pointer_rtx, 0, NULL, true);
stack_usage = pretend_args + crtl->args.info.stack_regs * 8;
if (TARGET_SHCOMPACT && flag_pic && crtl->args.info.call_cookie)
emit_insn (gen_shcompact_incoming_args ());
}
- if (flag_stack_usage)
+ /* If we are profiling, make sure no instructions are scheduled before
+ the call to mcount. Similarly if some call instructions are swapped
+ before frame related insns, it'll confuse the unwinder because
+ currently SH has no unwind info for function epilogues. */
+ if (crtl->profile || flag_exceptions || flag_unwind_tables)
+ emit_insn (gen_blockage ());
+
+ if (flag_stack_usage_info)
current_function_static_stack_size = stack_usage;
}
nfp = 8 - nfp;
else
nfp = 0;
- u = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, u,
- size_int (UNITS_PER_WORD * nfp));
+ u = fold_build_pointer_plus_hwi (u, UNITS_PER_WORD * nfp);
t = build2 (MODIFY_EXPR, ptr_type_node, next_fp_limit, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
nint = 4 - nint;
else
nint = 0;
- u = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, u,
- size_int (UNITS_PER_WORD * nint));
+ u = fold_build_pointer_plus_hwi (u, UNITS_PER_WORD * nint);
t = build2 (MODIFY_EXPR, ptr_type_node, next_o_limit, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
HOST_WIDE_INT size, rsize;
tree tmp, pptr_type_node;
tree addr, lab_over = NULL, result = NULL;
- int pass_by_ref = targetm.calls.must_pass_in_stack (TYPE_MODE (type), type);
+ bool pass_by_ref;
tree eff_type;
+ if (!VOID_TYPE_P (type))
+ pass_by_ref = targetm.calls.must_pass_in_stack (TYPE_MODE (type), type);
+ else
+ pass_by_ref = false;
+
if (pass_by_ref)
type = build_pointer_type (type);
gimplify_assign (unshare_expr (next_fp_tmp), valist, pre_p);
tmp = next_fp_limit;
if (size > 4 && !is_double)
- tmp = build2 (POINTER_PLUS_EXPR, TREE_TYPE (tmp),
- unshare_expr (tmp), size_int (4 - size));
+ tmp = fold_build_pointer_plus_hwi (unshare_expr (tmp), 4 - size);
tmp = build2 (GE_EXPR, boolean_type_node,
unshare_expr (next_fp_tmp), unshare_expr (tmp));
cmp = build3 (COND_EXPR, void_type_node, tmp,
tmp = fold_convert (sizetype, next_fp_tmp);
tmp = build2 (BIT_AND_EXPR, sizetype, tmp,
size_int (UNITS_PER_WORD));
- tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node,
- unshare_expr (next_fp_tmp), tmp);
+ tmp = fold_build_pointer_plus (unshare_expr (next_fp_tmp), tmp);
gimplify_assign (unshare_expr (next_fp_tmp), tmp, pre_p);
}
if (is_double)
}
else
{
- tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node,
- unshare_expr (next_o), size_int (rsize));
+ tmp = fold_build_pointer_plus_hwi (unshare_expr (next_o), rsize);
tmp = build2 (GT_EXPR, boolean_type_node, tmp,
unshare_expr (next_o_limit));
tmp = build3 (COND_EXPR, void_type_node, tmp,
return
gen_rtx_PARALLEL (VOIDmode,
gen_rtvec (2,
- gen_rtx_REG (SFmode,
- DBX_REGISTER_NUMBER (regno+1)),
- gen_rtx_REG (SFmode,
- DBX_REGISTER_NUMBER (regno))));
+ gen_rtx_REG (SFmode, regno + 1),
+ gen_rtx_REG (SFmode, regno)));
}
static enum machine_mode
sh_promote_function_mode (const_tree type, enum machine_mode mode,
int *punsignedp, const_tree funtype,
- int for_return ATTRIBUTE_UNUSED)
+ int for_return)
{
if (sh_promote_prototypes (funtype))
return promote_mode (type, mode, punsignedp);
else
- return mode;
+ return default_promote_function_mode (type, mode, punsignedp, funtype,
+ for_return);
}
static bool
}
static bool
-sh_pass_by_reference (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+sh_pass_by_reference (cumulative_args_t cum_v, enum machine_mode mode,
const_tree type, bool named)
{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+
if (targetm.calls.must_pass_in_stack (mode, type))
return true;
}
static bool
-sh_callee_copies (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+sh_callee_copies (cumulative_args_t cum, enum machine_mode mode,
const_tree type, bool named ATTRIBUTE_UNUSED)
{
/* ??? How can it possibly be correct to return true only on the
caller side of the equation? Is there someplace else in the
sh backend that's magically producing the copies? */
- return (cum->outgoing
+ return (get_cumulative_args (cum)->outgoing
&& ((mode == BLKmode ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode))
% SH_MIN_ALIGN_FOR_CALLEE_COPY == 0));
}
static int
-sh_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+sh_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
tree type, bool named ATTRIBUTE_UNUSED)
{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
int words = 0;
if (!TARGET_SH5
its data type forbids. */
static rtx
-sh_function_arg (CUMULATIVE_ARGS *ca, enum machine_mode mode,
+sh_function_arg (cumulative_args_t ca_v, enum machine_mode mode,
const_tree type, bool named)
{
+ CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
+
if (! TARGET_SH5 && mode == VOIDmode)
return GEN_INT (ca->renesas_abi ? 1 : 0);
available.) */
static void
-sh_function_arg_advance (CUMULATIVE_ARGS *ca, enum machine_mode mode,
+sh_function_arg_advance (cumulative_args_t ca_v, enum machine_mode mode,
const_tree type, bool named)
{
+ CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
+
if (ca->force_mem)
ca->force_mem = 0;
else if (TARGET_SH5)
later. Fortunately, we already have two flags that are part of struct
function that tell if a function uses varargs or stdarg. */
static void
-sh_setup_incoming_varargs (CUMULATIVE_ARGS *ca,
+sh_setup_incoming_varargs (cumulative_args_t ca,
enum machine_mode mode,
tree type,
int *pretend_arg_size,
{
int named_parm_regs, anon_parm_regs;
- named_parm_regs = (ROUND_REG (*ca, mode)
+ named_parm_regs = (ROUND_REG (*get_cumulative_args (ca), mode)
+ (mode == BLKmode
? ROUND_ADVANCE (int_size_in_bytes (type))
: ROUND_ADVANCE (GET_MODE_SIZE (mode))));
}
static bool
-sh_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
+sh_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
{
return TARGET_SH5;
}
static bool
-sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *ca)
+sh_pretend_outgoing_varargs_named (cumulative_args_t ca_v)
{
+ CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
+
return ! (TARGET_HITACHI || ca->renesas_abi) && ! TARGET_SH5;
}
return 0;
}
-/* Returns the function vector number, if the the attribute
+/* Returns the function vector number, if the attribute
'function_vector' is assigned, otherwise returns zero. */
int
sh2a_get_function_vector_number (rtx x)
return 1;
}
-int
-tertiary_reload_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- enum rtx_code code = GET_CODE (op);
- return code == MEM || (TARGET_SH4 && code == CONST_DOUBLE);
-}
-
/* Return the TLS type for TLS symbols, 0 for otherwise. */
enum tls_model
tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
#if 0
/* If this is a label that existed before reload, then the register
- if dead here. However, if this is a label added by reorg, then
+ is dead here. However, if this is a label added by reorg, then
the register may still be live here. We can't tell the difference,
so we just ignore labels completely. */
if (code == CODE_LABEL)
{
int size;
- /* Check if this the address of an unaligned load / store. */
+ /* Check if this is the address of an unaligned load / store. */
if (mode == VOIDmode)
return CONST_OK_FOR_I06 (INTVAL (op));
return true;
}
+/* In the name of slightly smaller debug output, and to cater to
+ general assembler lossage, recognize various UNSPEC sequences
+ and turn them back into a direct symbol reference. */
+
+static rtx
+sh_delegitimize_address (rtx orig_x)
+{
+ rtx x, y;
+
+ orig_x = delegitimize_mem_from_attrs (orig_x);
+
+ x = orig_x;
+ if (MEM_P (x))
+ x = XEXP (x, 0);
+ if (GET_CODE (x) == CONST)
+ {
+ y = XEXP (x, 0);
+ if (GET_CODE (y) == UNSPEC)
+ {
+ if (XINT (y, 1) == UNSPEC_GOT
+ || XINT (y, 1) == UNSPEC_GOTOFF
+ || XINT (y, 1) == UNSPEC_SYMOFF)
+ return XVECEXP (y, 0, 0);
+ else if (XINT (y, 1) == UNSPEC_PCREL_SYMOFF)
+ {
+ if (GET_CODE (XVECEXP (y, 0, 0)) == CONST)
+ {
+ rtx symplt = XEXP (XVECEXP (y, 0, 0), 0);
+
+ if (GET_CODE (symplt) == UNSPEC
+ && XINT (symplt, 1) == UNSPEC_PLT)
+ return XVECEXP (symplt, 0, 0);
+ }
+ }
+ else if (TARGET_SHMEDIA
+ && (XINT (y, 1) == UNSPEC_EXTRACT_S16
+ || XINT (y, 1) == UNSPEC_EXTRACT_U16))
+ {
+ rtx offset = XVECEXP (y, 0, 1);
+
+ x = gen_rtx_PLUS (Pmode, XVECEXP (y, 0, 0), offset);
+ if (MEM_P (orig_x))
+ x = replace_equiv_address_nv (orig_x, x);
+ return x;
+ }
+ }
+ }
+
+ return orig_x;
+}
+
/* Mark the use of a constant in the literal table. If the constant
has multiple labels, make it unique. */
static rtx
else
{
int has_result = signature_args[signature][0] != 0;
+ tree args[3];
if ((signature_args[signature][1] & 8)
&& (((signature_args[signature][1] & 1) && TARGET_SHMEDIA32)
if (! TARGET_FPU_ANY
&& FLOAT_MODE_P (insn_data[d->icode].operand[0].mode))
continue;
- type = void_list_node;
+ for (i = 0; i < (int) ARRAY_SIZE (args); i++)
+ args[i] = NULL_TREE;
for (i = 3; ; i--)
{
int arg = signature_args[signature][i];
arg_type = void_type_node;
if (i == 0)
break;
- type = tree_cons (NULL_TREE, arg_type, type);
+ args[i-1] = arg_type;
}
- type = build_function_type (arg_type, type);
+ type = build_function_type_list (arg_type, args[0], args[1],
+ args[2], NULL_TREE);
if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES)
shared[signature] = type;
}
&& REGCLASS_HAS_GENERAL_REG (srcclass))
|| (REGCLASS_HAS_GENERAL_REG (dstclass)
&& REGCLASS_HAS_FP_REG (srcclass)))
- return ((TARGET_SHMEDIA ? 4 : TARGET_FMOVD ? 8 : 12)
- * ((GET_MODE_SIZE (mode) + 7) / 8U));
+ {
+ /* Discourage trying to use fp regs for a pointer. This also
+ discourages fp regs with SImode because Pmode is an alias
+ of SImode on this target. See PR target/48596. */
+ int addend = (mode == Pmode) ? 40 : 0;
+
+ return (((TARGET_SHMEDIA ? 4 : TARGET_FMOVD ? 8 : 12) + addend)
+ * ((GET_MODE_SIZE (mode) + 7) / 8U));
+ }
if ((dstclass == FPUL_REGS
&& REGCLASS_HAS_GENERAL_REG (srcclass))
{
tree ptype = build_pointer_type (TREE_TYPE (funtype));
- sh_function_arg_advance (&cum, Pmode, ptype, true);
+ sh_function_arg_advance (pack_cumulative_args (&cum), Pmode, ptype, true);
}
- this_rtx = sh_function_arg (&cum, Pmode, ptr_type_node, true);
+ this_rtx
+ = sh_function_arg (pack_cumulative_args (&cum), Pmode, ptr_type_node, true);
/* For SHcompact, we only have r0 for a scratch register: r1 is the
static chain pointer (even if you can't have nested virtual functions
break;
}
if (scratch1 == scratch0)
- error ("Need a second call-clobbered general purpose register");
+ error ("need a second call-clobbered general purpose register");
for (i = FIRST_TARGET_REG; i <= LAST_TARGET_REG; i++)
if (call_used_regs[i] && ! fixed_regs[i])
{
break;
}
if (scratch2 == scratch0)
- error ("Need a call-clobbered target register");
+ error ("need a call-clobbered target register");
}
this_value = plus_constant (this_rtx, delta);
}
sh_reorg ();
-
- if (optimize > 0 && flag_delayed_branch)
- dbr_schedule (insns);
-
shorten_branches (insns);
final_start_function (insns, file, 1);
final (insns, file, 1);
{
pcum->force_mem = ((TARGET_HITACHI || pcum->renesas_abi)
&& aggregate_value_p (TREE_TYPE (fntype), fndecl));
- pcum->prototype_p = TYPE_ARG_TYPES (fntype) ? TRUE : FALSE;
+ pcum->prototype_p = prototype_p (fntype);
pcum->arg_count [(int) SH_ARG_INT]
= TARGET_SH5 && aggregate_value_p (TREE_TYPE (fntype), fndecl);
return fnaddr;
}
-reg_class_t
+/* Implement TARGET_PREFERRED_RELOAD_CLASS. */
+
+static reg_class_t
+sh_preferred_reload_class (rtx x, reg_class_t rclass)
+{
+ if (rclass == NO_REGS
+ && TARGET_SHMEDIA
+ && (CONST_DOUBLE_P (x)
+ || GET_CODE (x) == SYMBOL_REF
+ || PIC_ADDR_P (x)))
+ return GENERAL_REGS;
+
+ return rclass;
+}
+
+/* Implement TARGET_SECONDARY_RELOAD. */
+
+static reg_class_t
sh_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
enum machine_mode mode, secondary_reload_info *sri)
{
if (rclass != GENERAL_REGS && REG_P (x)
&& TARGET_REGISTER_P (REGNO (x)))
return GENERAL_REGS;
+
+ /* If here fall back to loading FPUL register through general registers.
+ This case can happen when movsi_ie insn is picked initially to
+ load/store the FPUL register from/to another register, and then the
+ other register is allocated on the stack. */
+ if (rclass == FPUL_REGS && true_regnum (x) == -1)
+ return GENERAL_REGS;
+
return NO_REGS;
}
+static void
+sh_conditional_register_usage (void)
+{
+ int regno;
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno ++)
+ if (! VALID_REGISTER_P (regno))
+ fixed_regs[regno] = call_used_regs[regno] = 1;
+ /* R8 and R9 are call-clobbered on SH5, but not on earlier SH ABIs. */
+ if (TARGET_SH5)
+ {
+ call_used_regs[FIRST_GENERAL_REG + 8]
+ = call_used_regs[FIRST_GENERAL_REG + 9] = 1;
+ call_really_used_regs[FIRST_GENERAL_REG + 8]
+ = call_really_used_regs[FIRST_GENERAL_REG + 9] = 1;
+ }
+ if (TARGET_SHMEDIA)
+ {
+ regno_reg_class[FIRST_GENERAL_REG] = GENERAL_REGS;
+ CLEAR_HARD_REG_SET (reg_class_contents[FP0_REGS]);
+ regno_reg_class[FIRST_FP_REG] = FP_REGS;
+ }
+ if (flag_pic)
+ {
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
+ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
+ }
+ /* Renesas saves and restores mac registers on call. */
+ if (TARGET_HITACHI && ! TARGET_NOMACSAVE)
+ {
+ call_really_used_regs[MACH_REG] = 0;
+ call_really_used_regs[MACL_REG] = 0;
+ }
+ for (regno = FIRST_FP_REG + (TARGET_LITTLE_ENDIAN != 0);
+ regno <= LAST_FP_REG; regno += 2)
+ SET_HARD_REG_BIT (reg_class_contents[DF_HI_REGS], regno);
+ if (TARGET_SHMEDIA)
+ {
+ for (regno = FIRST_TARGET_REG; regno <= LAST_TARGET_REG; regno ++)
+ if (! fixed_regs[regno] && call_really_used_regs[regno])
+ SET_HARD_REG_BIT (reg_class_contents[SIBCALL_REGS], regno);
+ }
+ else
+ for (regno = FIRST_GENERAL_REG; regno <= LAST_GENERAL_REG; regno++)
+ if (! fixed_regs[regno] && call_really_used_regs[regno])
+ SET_HARD_REG_BIT (reg_class_contents[SIBCALL_REGS], regno);
+}
+
+/* Implement TARGET_LEGITIMATE_CONSTANT_P
+
+ can_store_by_pieces constructs VOIDmode CONST_DOUBLEs. */
+
+static bool
+sh_legitimate_constant_p (enum machine_mode mode, rtx x)
+{
+ return (TARGET_SHMEDIA
+ ? ((mode != DFmode && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
+ || x == CONST0_RTX (mode)
+ || !TARGET_SHMEDIA_FPU
+ || TARGET_SHMEDIA64)
+ : (GET_CODE (x) != CONST_DOUBLE
+ || mode == DFmode || mode == SFmode
+ || mode == DImode || GET_MODE (x) == VOIDmode));
+}
+
enum sh_divide_strategy_e sh_div_strategy = SH_DIV_STRATEGY_DEFAULT;
+static void
+sh_init_sync_libfuncs (void)
+{
+ init_sync_libfuncs (UNITS_PER_WORD);
+}
+
#include "gt-sh.h"