X-Git-Url: http://git.sourceforge.jp/view?p=pf3gnuchains%2Fgcc-fork.git;a=blobdiff_plain;f=gcc%2Fconfig%2Fsparc%2Fsparc.c;h=54c81b191f4cc35af5f0415db49d3028115682bd;hp=f94af2ac5d17423fb930e5e6ac9860fe53cdfaab;hb=d5b52ebcb904321cccc7b7f63d61c584b046ce3e;hpb=723e1902a4a3de3c537888fc3c063a13850f4fc2 diff --git a/gcc/config/sparc/sparc.c b/gcc/config/sparc/sparc.c index f94af2ac5d1..54c81b191f4 100644 --- a/gcc/config/sparc/sparc.c +++ b/gcc/config/sparc/sparc.c @@ -1,6 +1,6 @@ /* Subroutines for insn-output.c for SPARC. Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 + 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans, @@ -10,7 +10,7 @@ This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2, or (at your option) +the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, @@ -19,9 +19,8 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License -along with GCC; see the file COPYING. If not, write to -the Free Software Foundation, 51 Franklin Street, Fifth Floor, -Boston, MA 02110-1301, USA. */ +along with GCC; see the file COPYING3. If not see +. */ #include "config.h" #include "system.h" @@ -52,6 +51,7 @@ Boston, MA 02110-1301, USA. */ #include "tree-gimple.h" #include "langhooks.h" #include "params.h" +#include "df.h" /* Processor costs */ static const @@ -222,6 +222,30 @@ struct processor_costs niagara_costs = { 0, /* shift penalty */ }; +static const +struct processor_costs niagara2_costs = { + COSTS_N_INSNS (3), /* int load */ + COSTS_N_INSNS (3), /* int signed load */ + COSTS_N_INSNS (3), /* int zeroed load */ + COSTS_N_INSNS (3), /* float load */ + COSTS_N_INSNS (6), /* fmov, fneg, fabs */ + COSTS_N_INSNS (6), /* fadd, fsub */ + COSTS_N_INSNS (6), /* fcmp */ + COSTS_N_INSNS (6), /* fmov, fmovr */ + COSTS_N_INSNS (6), /* fmul */ + COSTS_N_INSNS (19), /* fdivs */ + COSTS_N_INSNS (33), /* fdivd */ + COSTS_N_INSNS (19), /* fsqrts */ + COSTS_N_INSNS (33), /* fsqrtd */ + COSTS_N_INSNS (5), /* imul */ + COSTS_N_INSNS (5), /* imulX */ + 0, /* imul bit factor */ + COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */ + COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */ + COSTS_N_INSNS (1), /* movcc/movr */ + 0, /* shift penalty */ +}; + const struct processor_costs *sparc_costs = &cypress_costs; #ifdef HAVE_AS_RELAX_OPTION @@ -251,10 +275,10 @@ static HOST_WIDE_INT actual_fsize; static int num_gfregs; /* The alias set for prologue/epilogue register save/restore. */ -static GTY(()) int sparc_sr_alias_set; +static GTY(()) alias_set_type sparc_sr_alias_set; /* The alias set for the structure return value. */ -static GTY(()) int struct_value_alias_set; +static GTY(()) alias_set_type struct_value_alias_set; /* Save the operands last given to a compare for use when we generate a scc or bcc insn. */ @@ -372,8 +396,8 @@ static int sparc_vis_mul8x16 (int, int); static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree); static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree); -static bool sparc_can_output_mi_thunk (tree, HOST_WIDE_INT, - HOST_WIDE_INT, tree); +static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT, + HOST_WIDE_INT, const_tree); static struct machine_function * sparc_init_machine_status (void); static bool sparc_cannot_force_const_mem (rtx); static rtx sparc_tls_get_addr (void); @@ -381,21 +405,22 @@ static rtx sparc_tls_got (void); static const char *get_some_local_dynamic_name (void); static int get_some_local_dynamic_name_1 (rtx *, void *); static bool sparc_rtx_costs (rtx, int, int, int *); -static bool sparc_promote_prototypes (tree); +static bool sparc_promote_prototypes (const_tree); static rtx sparc_struct_value_rtx (tree, int); -static bool sparc_return_in_memory (tree, tree); +static bool sparc_return_in_memory (const_tree, const_tree); static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *); +static void sparc_va_start (tree, rtx); static tree sparc_gimplify_va_arg (tree, tree, tree *, tree *); static bool sparc_vector_mode_supported_p (enum machine_mode); static bool sparc_pass_by_reference (CUMULATIVE_ARGS *, - enum machine_mode, tree, bool); + enum machine_mode, const_tree, bool); static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode, tree, bool); static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int); static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED; static void sparc_file_end (void); #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING -static const char *sparc_mangle_fundamental_type (tree); +static const char *sparc_mangle_type (const_tree); #endif #ifdef SUBTARGET_ATTRIBUTE_TABLE const struct attribute_spec sparc_attribute_table[]; @@ -494,13 +519,13 @@ static bool fpu_option_set = false; no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime test for this value. */ #undef TARGET_PROMOTE_FUNCTION_ARGS -#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true +#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime test for this value. */ #undef TARGET_PROMOTE_FUNCTION_RETURN -#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true +#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true #undef TARGET_PROMOTE_PROTOTYPES #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes @@ -521,6 +546,8 @@ static bool fpu_option_set = false; #undef TARGET_STRICT_ARGUMENT_NAMING #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming +#undef TARGET_EXPAND_BUILTIN_VA_START +#define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start #undef TARGET_GIMPLIFY_VA_ARG_EXPR #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg @@ -557,8 +584,8 @@ static bool fpu_option_set = false; #define TARGET_ASM_FILE_END sparc_file_end #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING -#undef TARGET_MANGLE_FUNDAMENTAL_TYPE -#define TARGET_MANGLE_FUNDAMENTAL_TYPE sparc_mangle_fundamental_type +#undef TARGET_MANGLE_TYPE +#define TARGET_MANGLE_TYPE sparc_mangle_type #endif struct gcc_target targetm = TARGET_INITIALIZER; @@ -623,6 +650,7 @@ sparc_override_options (void) { TARGET_CPU_ultrasparc, "ultrasparc" }, { TARGET_CPU_ultrasparc3, "ultrasparc3" }, { TARGET_CPU_niagara, "niagara" }, + { TARGET_CPU_niagara2, "niagara2" }, { 0, 0 } }; const struct cpu_default *def; @@ -660,6 +688,7 @@ sparc_override_options (void) { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS}, /* UltraSPARC T1 */ { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS}, + { "niagara2", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9}, { 0, 0, 0, 0 } }; const struct cpu_table *cpu; @@ -704,7 +733,7 @@ sparc_override_options (void) error ("-mcmodel= is not supported on 32 bit systems"); } - fpu = TARGET_FPU; /* save current -mfpu status */ + fpu = target_flags & MASK_FPU; /* save current -mfpu status */ /* Set the default CPU. */ for (def = &cpu_default[0]; def->name; ++def) @@ -770,7 +799,8 @@ sparc_override_options (void) if (align_functions == 0 && (sparc_cpu == PROCESSOR_ULTRASPARC || sparc_cpu == PROCESSOR_ULTRASPARC3 - || sparc_cpu == PROCESSOR_NIAGARA)) + || sparc_cpu == PROCESSOR_NIAGARA + || sparc_cpu == PROCESSOR_NIAGARA2)) align_functions = 32; /* Validate PCC_STRUCT_RETURN. */ @@ -822,6 +852,9 @@ sparc_override_options (void) case PROCESSOR_NIAGARA: sparc_costs = &niagara_costs; break; + case PROCESSOR_NIAGARA2: + sparc_costs = &niagara2_costs; + break; }; #ifdef TARGET_DEFAULT_LONG_DOUBLE_128 @@ -832,7 +865,8 @@ sparc_override_options (void) if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES)) set_param_value ("simultaneous-prefetches", ((sparc_cpu == PROCESSOR_ULTRASPARC - || sparc_cpu == PROCESSOR_NIAGARA) + || sparc_cpu == PROCESSOR_NIAGARA + || sparc_cpu == PROCESSOR_NIAGARA2) ? 2 : (sparc_cpu == PROCESSOR_ULTRASPARC3 ? 8 : 3))); @@ -840,7 +874,8 @@ sparc_override_options (void) set_param_value ("l1-cache-line-size", ((sparc_cpu == PROCESSOR_ULTRASPARC || sparc_cpu == PROCESSOR_ULTRASPARC3 - || sparc_cpu == PROCESSOR_NIAGARA) + || sparc_cpu == PROCESSOR_NIAGARA + || sparc_cpu == PROCESSOR_NIAGARA2) ? 64 : 32)); } @@ -3087,7 +3122,7 @@ legitimize_tls_address (rtx addr) { rtx temp1, temp2, temp3, ret, o0, got, insn; - gcc_assert (! no_new_pseudos); + gcc_assert (can_create_pseudo_p ()); if (GET_CODE (addr) == SYMBOL_REF) switch (SYMBOL_REF_TLS_MODEL (addr)) @@ -3742,20 +3777,20 @@ sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p) if (TARGET_ARCH64) { for (i = 0; i < 8; i++) - if (regs_ever_live[i] && ! call_used_regs[i]) + if (df_regs_ever_live_p (i) && ! call_used_regs[i]) n_regs += 2; } else { for (i = 0; i < 8; i += 2) - if ((regs_ever_live[i] && ! call_used_regs[i]) - || (regs_ever_live[i+1] && ! call_used_regs[i+1])) + if ((df_regs_ever_live_p (i) && ! call_used_regs[i]) + || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1])) n_regs += 2; } for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2) - if ((regs_ever_live[i] && ! call_used_regs[i]) - || (regs_ever_live[i+1] && ! call_used_regs[i+1])) + if ((df_regs_ever_live_p (i) && ! call_used_regs[i]) + || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1])) n_regs += 2; /* Set up values for use in prologue and epilogue. */ @@ -3798,7 +3833,7 @@ sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED) .register being printed for them already. */ for (i = 2; i < 8; i++) { - if (regs_ever_live [i] + if (df_regs_ever_live_p (i) && ! sparc_hard_reg_printed [i]) { sparc_hard_reg_printed [i] = 1; @@ -3829,7 +3864,7 @@ save_or_restore_regs (int low, int high, rtx base, int offset, int action) { for (i = low; i < high; i++) { - if (regs_ever_live[i] && ! call_used_regs[i]) + if (df_regs_ever_live_p (i) && ! call_used_regs[i]) { mem = gen_rtx_MEM (DImode, plus_constant (base, offset)); set_mem_alias_set (mem, sparc_sr_alias_set); @@ -3848,8 +3883,8 @@ save_or_restore_regs (int low, int high, rtx base, int offset, int action) { for (i = low; i < high; i += 2) { - bool reg0 = regs_ever_live[i] && ! call_used_regs[i]; - bool reg1 = regs_ever_live[i+1] && ! call_used_regs[i+1]; + bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i]; + bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]; enum machine_mode mode; int regno; @@ -4469,7 +4504,7 @@ init_cumulative_args (struct sparc_args *cum, tree fntype, When a prototype says `char' or `short', really pass an `int'. */ static bool -sparc_promote_prototypes (tree fntype ATTRIBUTE_UNUSED) +sparc_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED) { return TARGET_ARCH32 ? true : false; } @@ -4681,17 +4716,17 @@ struct function_arg_record_value_parms static void function_arg_record_value_3 (HOST_WIDE_INT, struct function_arg_record_value_parms *); static void function_arg_record_value_2 - (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool); + (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool); static void function_arg_record_value_1 - (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool); -static rtx function_arg_record_value (tree, enum machine_mode, int, int, int); + (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool); +static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int); static rtx function_arg_union_value (int, enum machine_mode, int, int); /* A subroutine of function_arg_record_value. Traverse the structure recursively and determine how many registers will be required. */ static void -function_arg_record_value_1 (tree type, HOST_WIDE_INT startbitpos, +function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos, struct function_arg_record_value_parms *parms, bool packed_p) { @@ -4847,7 +4882,7 @@ function_arg_record_value_3 (HOST_WIDE_INT bitpos, to make that happen. */ static void -function_arg_record_value_2 (tree type, HOST_WIDE_INT startbitpos, +function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos, struct function_arg_record_value_parms *parms, bool packed_p) { @@ -4954,7 +4989,7 @@ function_arg_record_value_2 (tree type, HOST_WIDE_INT startbitpos, REGBASE is the regno of the base register for the parameter array. */ static rtx -function_arg_record_value (tree type, enum machine_mode mode, +function_arg_record_value (const_tree type, enum machine_mode mode, int slotno, int named, int regbase) { HOST_WIDE_INT typesize = int_size_in_bytes (type); @@ -5327,7 +5362,7 @@ sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode, static bool sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED, - enum machine_mode mode, tree type, + enum machine_mode mode, const_tree type, bool named ATTRIBUTE_UNUSED) { if (TARGET_ARCH32) @@ -5424,7 +5459,7 @@ function_arg_advance (struct sparc_args *cum, enum machine_mode mode, argument slot. */ enum direction -function_arg_padding (enum machine_mode mode, tree type) +function_arg_padding (enum machine_mode mode, const_tree type) { if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type)) return upward; @@ -5437,7 +5472,7 @@ function_arg_padding (enum machine_mode mode, tree type) Specify whether to return the return value in memory. */ static bool -sparc_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED) +sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED) { if (TARGET_ARCH32) /* Original SPARC 32-bit ABI says that structures and unions, @@ -5545,7 +5580,7 @@ sparc_struct_value_rtx (tree fndecl, int incoming) except that up to 32 bytes may be returned in registers. */ rtx -function_value (tree type, enum machine_mode mode, int incoming_p) +function_value (const_tree type, enum machine_mode mode, int incoming_p) { /* Beware that the two values are swapped here wrt function_arg. */ int regbase = (incoming_p @@ -5664,7 +5699,7 @@ sparc_builtin_saveregs (void) /* Implement `va_start' for stdarg. */ -void +static void sparc_va_start (tree valist, rtx nextarg) { nextarg = expand_builtin_saveregs (); @@ -5715,18 +5750,20 @@ sparc_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p) incr = valist; if (align) { - incr = fold_build2 (PLUS_EXPR, ptr_type_node, incr, - ssize_int (align - 1)); - incr = fold_build2 (BIT_AND_EXPR, ptr_type_node, incr, - ssize_int (-align)); + incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, + size_int (align - 1)); + incr = fold_convert (sizetype, incr); + incr = fold_build2 (BIT_AND_EXPR, sizetype, incr, + size_int (-align)); + incr = fold_convert (ptr_type_node, incr); } gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue); addr = incr; if (BYTES_BIG_ENDIAN && size < rsize) - addr = fold_build2 (PLUS_EXPR, ptr_type_node, incr, - ssize_int (rsize - size)); + addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, + size_int (rsize - size)); if (indirect) { @@ -5754,7 +5791,7 @@ sparc_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p) else addr = fold_convert (ptrtype, addr); - incr = fold_build2 (PLUS_EXPR, ptr_type_node, incr, ssize_int (rsize)); + incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize)); incr = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, valist, incr); gimplify_and_add (incr, post_p); @@ -6509,7 +6546,7 @@ order_regs_for_local_alloc (void) { static int last_order_nonleaf = 1; - if (regs_ever_live[15] != last_order_nonleaf) + if (df_regs_ever_live_p (15) != last_order_nonleaf) { last_order_nonleaf = !last_order_nonleaf; memcpy ((char *) reg_alloc_order, @@ -7234,7 +7271,8 @@ sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt) emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp)))); if (sparc_cpu != PROCESSOR_ULTRASPARC && sparc_cpu != PROCESSOR_ULTRASPARC3 - && sparc_cpu != PROCESSOR_NIAGARA) + && sparc_cpu != PROCESSOR_NIAGARA + && sparc_cpu != PROCESSOR_NIAGARA2) emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, plus_constant (tramp, 8))))); @@ -7277,7 +7315,8 @@ sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt) if (sparc_cpu != PROCESSOR_ULTRASPARC && sparc_cpu != PROCESSOR_ULTRASPARC3 - && sparc_cpu != PROCESSOR_NIAGARA) + && sparc_cpu != PROCESSOR_NIAGARA + && sparc_cpu != PROCESSOR_NIAGARA2) emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8))))); /* Call __enable_execute_stack after writing onto the stack to make sure @@ -7457,7 +7496,8 @@ sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED, static int sparc_use_sched_lookahead (void) { - if (sparc_cpu == PROCESSOR_NIAGARA) + if (sparc_cpu == PROCESSOR_NIAGARA + || sparc_cpu == PROCESSOR_NIAGARA2) return 0; if (sparc_cpu == PROCESSOR_ULTRASPARC || sparc_cpu == PROCESSOR_ULTRASPARC3) @@ -7475,6 +7515,7 @@ sparc_issue_rate (void) switch (sparc_cpu) { case PROCESSOR_NIAGARA: + case PROCESSOR_NIAGARA2: default: return 1; case PROCESSOR_V9: @@ -7673,7 +7714,7 @@ sparc_check_64 (rtx x, rtx insn) y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN); if (flag_expensive_optimizations - && REG_N_SETS (REGNO (y)) == 1) + && df && DF_REG_DEF_COUNT (REGNO (y)) == 1) set_once = 1; if (insn == 0) @@ -8028,8 +8069,10 @@ sparc_vis_init_builtins (void) Expand builtin functions for sparc intrinsics. */ static rtx -sparc_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, - enum machine_mode tmode, int ignore ATTRIBUTE_UNUSED) +sparc_expand_builtin (tree exp, rtx target, + rtx subtarget ATTRIBUTE_UNUSED, + enum machine_mode tmode ATTRIBUTE_UNUSED, + int ignore ATTRIBUTE_UNUSED) { tree arg; call_expr_arg_iterator iter; @@ -8039,14 +8082,13 @@ sparc_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, enum machine_mode mode[4]; int arg_count = 0; - mode[arg_count] = tmode; - - if (target == 0 - || GET_MODE (target) != tmode - || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) - op[arg_count] = gen_reg_rtx (tmode); + mode[0] = insn_data[icode].operand[0].mode; + if (!target + || GET_MODE (target) != mode[0] + || ! (*insn_data[icode].operand[0].predicate) (target, mode[0])) + op[0] = gen_reg_rtx (mode[0]); else - op[arg_count] = target; + op[0] = target; FOR_EACH_CALL_EXPR_ARG (arg, iter, exp) { @@ -8159,11 +8201,11 @@ sparc_fold_builtin (tree fndecl, tree arglist, bool ignore) { tree arg0, arg1, arg2; tree rtype = TREE_TYPE (TREE_TYPE (fndecl)); - - if (ignore && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrsi_vis + if (ignore + && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrsi_vis && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrdi_vis) - return build_int_cst (rtype, 0); + return fold_convert (rtype, integer_zero_node); switch (DECL_FUNCTION_CODE (fndecl)) { @@ -8277,6 +8319,7 @@ sparc_fold_builtin (tree fndecl, tree arglist, bool ignore) default: break; } + return NULL_TREE; } @@ -8593,7 +8636,6 @@ sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, reload_completed = 1; epilogue_completed = 1; - no_new_pseudos = 1; emit_note (NOTE_INSN_PROLOGUE_END); @@ -8776,20 +8818,20 @@ sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, final_start_function (insn, file, 1); final (insn, file, 1); final_end_function (); + free_after_compilation (cfun); reload_completed = 0; epilogue_completed = 0; - no_new_pseudos = 0; } /* Return true if sparc_output_mi_thunk would be able to output the assembler code for the thunk function specified by the arguments it is passed, and false otherwise. */ static bool -sparc_can_output_mi_thunk (tree thunk_fndecl ATTRIBUTE_UNUSED, +sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED, HOST_WIDE_INT delta ATTRIBUTE_UNUSED, HOST_WIDE_INT vcall_offset, - tree function ATTRIBUTE_UNUSED) + const_tree function ATTRIBUTE_UNUSED) { /* Bound the loop used in the default method above. */ return (vcall_offset >= -32768 || ! fixed_regs[5]); @@ -8885,10 +8927,10 @@ sparc_file_end (void) } #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING -/* Implement TARGET_MANGLE_FUNDAMENTAL_TYPE. */ +/* Implement TARGET_MANGLE_TYPE. */ static const char * -sparc_mangle_fundamental_type (tree type) +sparc_mangle_type (const_tree type) { if (!TARGET_64BIT && TYPE_MAIN_VARIANT (type) == long_double_type_node