return dest;
}
}
+
+/* Emit a call sequence with call pattern PATTERN and return the call
+ instruction itself (which is not necessarily the last instruction
+ emitted). LAZY_P is true if the call address is lazily-bound. */
+
+static rtx
+mips_emit_call_insn (rtx pattern, bool lazy_p)
+{
+ rtx insn;
+
+ insn = emit_call_insn (pattern);
+
+ /* Lazy-binding stubs require $gp to be valid on entry. */
+ if (lazy_p)
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
+
+ if (TARGET_USE_GOT)
+ {
+ /* See the comment above load_call<mode> for details. */
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
+ gen_rtx_REG (Pmode, GOT_VERSION_REGNUM));
+ emit_insn (gen_update_got_version ());
+ }
+ return insn;
+}
\f
+/* Return an instruction that copies $gp into register REG. We want
+ GCC to treat the register's value as constant, so that its value
+ can be rematerialized on demand. */
+
+static rtx
+gen_load_const_gp (rtx reg)
+{
+ return (Pmode == SImode
+ ? gen_load_const_gp_si (reg)
+ : gen_load_const_gp_di (reg));
+}
+
/* Return a pseudo register that contains the value of $gp throughout
the current function. Such registers are needed by MIPS16 functions,
for which $gp itself is not a valid base register or addition operand. */
{
rtx insn, scan, after;
- /* We want GCC to treat the register's value as constant, so that
- it can be rematerialized on demand. */
insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
push_topmost_sequence ();
static rtx
mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
{
- rtx insn, loc, tga, a0;
+ rtx insn, loc, a0;
a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
emit_insn (gen_rtx_SET (Pmode, a0,
gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
- tga = gen_const_mem (Pmode, mips_tls_symbol);
- insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
+ insn = mips_expand_call (v0, mips_tls_symbol, const0_rtx, const0_rtx, false);
CONST_OR_PURE_CALL_P (insn) = 1;
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
insn = get_insns ();
if (TREE_CODE (field) != FIELD_DECL)
continue;
- if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
+ if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
return 0;
if (i == 2)
&& GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
}
+/* Return the representation of an FPR return register when the
+ value being returned in FP_RETURN has mode VALUE_MODE and the
+ return type itself has mode TYPE_MODE. On NewABI targets,
+ the two modes may be different for structures like:
+
+ struct __attribute__((packed)) foo { float f; }
+
+ where we return the SFmode value of "f" in FP_RETURN, but where
+ the structure itself has mode BLKmode. */
+
+static rtx
+mips_return_fpr_single (enum machine_mode type_mode,
+ enum machine_mode value_mode)
+{
+ rtx x;
+
+ x = gen_rtx_REG (value_mode, FP_RETURN);
+ if (type_mode != value_mode)
+ {
+ x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
+ x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
+ }
+ return x;
+}
+
/* Return a composite value in a pair of floating-point registers.
MODE1 and OFFSET1 are the mode and byte offset for the first value,
likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
switch (mips_fpr_return_fields (valtype, fields))
{
case 1:
- return gen_rtx_REG (mode, FP_RETURN);
+ return mips_return_fpr_single (mode,
+ TYPE_MODE (TREE_TYPE (fields[0])));
case 2:
return mips_return_fpr_pair (mode,
return ptr_type_node;
}
-/* Implement EXPAND_BUILTIN_VA_START. */
+/* Implement TARGET_EXPAND_BUILTIN_VA_START. */
-void
+static void
mips_va_start (tree valist, rtx nextarg)
{
if (EABI_FLOAT_VARARGS_P)
fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
arg = gen_rtx_REG (return_mode, GP_RETURN);
call = gen_call_value_internal (arg, fn, const0_rtx);
- insn = emit_call_insn (call);
+ insn = mips_emit_call_insn (call, false);
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), arg);
}
arguments and FP_CODE is the code built by mips_function_arg;
see the comment above CUMULATIVE_ARGS for details.
- Return true if a stub was needed, and emit the call if so.
+ If a stub was needed, emit the call and return the call insn itself.
+ Return null otherwise.
A stub is needed for calls to functions that, in normal mode,
receive arguments in FPRs or return values in FPRs. The stub
to be to a non-MIPS16 function, the linker automatically redirects
the JAL to the stub, otherwise the JAL continues to call FN directly. */
-static bool
+static rtx
mips16_build_call_stub (rtx retval, rtx fn, rtx args_size, int fp_code)
{
const char *fnname;
/* We don't need to do anything if we aren't in MIPS16 mode, or if
we were invoked with the -msoft-float option. */
if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
- return false;
+ return NULL_RTX;
/* Figure out whether the value might come back in a floating-point
register. */
arguments and the value will not be returned in a floating-point
register. */
if (fp_code == 0 && !fp_ret_p)
- return false;
+ return NULL_RTX;
/* We don't need to do anything if this is a call to a special
MIPS16 support function. */
if (GET_CODE (fn) == SYMBOL_REF
&& strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
- return false;
+ return NULL_RTX;
/* This code will only work for o32 and o64 abis. The other ABI's
require more sophisticated support. */
insn = gen_call_internal (stub_fn, args_size);
else
insn = gen_call_value_internal (retval, stub_fn, args_size);
- insn = emit_call_insn (insn);
+ insn = mips_emit_call_insn (insn, false);
/* Tell GCC that this call does indeed use the value of $2. */
CALL_INSN_FUNCTION_USAGE (insn) =
gen_rtx_REG (word_mode, 18)),
CALL_INSN_FUNCTION_USAGE (insn));
- return true;
+ return insn;
}
/* We know the function we are going to call. If we have already
insn = gen_call_internal_direct (fn, args_size);
else
insn = gen_call_value_internal_direct (retval, fn, args_size);
- insn = emit_call_insn (insn);
+ insn = mips_emit_call_insn (insn, false);
/* If we are calling a stub which handles a floating-point return
value, we need to arrange to save $18 in the prologue. We do this
gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
CALL_INSN_FUNCTION_USAGE (insn));
- return true;
+ return insn;
}
\f
/* Return true if calls to X can use R_MIPS_CALL* relocations. */
ADDR is the address of the function, ARGS_SIZE is the size of the
arguments and AUX is the value passed to us by mips_function_arg.
SIBCALL_P is true if we are expanding a sibling call, false if we're
- expanding a normal call. */
+ expanding a normal call.
-void
+ Return the call itself. */
+
+rtx
mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, bool sibcall_p)
{
rtx orig_addr, pattern, insn;
lazy_p = mips_load_call_address (addr, orig_addr, sibcall_p);
}
- if (TARGET_MIPS16
- && TARGET_HARD_FLOAT_ABI
- && mips16_build_call_stub (result, addr, args_size,
- aux == 0 ? 0 : (int) GET_MODE (aux)))
+ insn = mips16_build_call_stub (result, addr, args_size,
+ aux == 0 ? 0 : (int) GET_MODE (aux));
+ if (insn)
{
- gcc_assert (!sibcall_p);
- return;
+ gcc_assert (!sibcall_p && !lazy_p);
+ return insn;
}
if (result == 0)
: gen_call_internal (addr, args_size));
else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
{
+ /* Handle return values created by mips_return_fpr_pair. */
rtx reg1, reg2;
reg1 = XEXP (XVECEXP (result, 0, 0), 0);
: gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
}
else
- pattern = (sibcall_p
- ? gen_sibcall_value_internal (result, addr, args_size)
- : gen_call_value_internal (result, addr, args_size));
-
- insn = emit_call_insn (pattern);
-
- /* Lazy-binding stubs require $gp to be valid on entry. We also pretend
- that they use FAKE_CALL_REGNO; see the load_call<mode> patterns for
- details. */
- if (lazy_p)
{
- use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
- use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
- gen_rtx_REG (Pmode, FAKE_CALL_REGNO));
+ /* Handle return values created by mips_return_fpr_single. */
+ if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
+ result = XEXP (XVECEXP (result, 0, 0), 0);
+ pattern = (sibcall_p
+ ? gen_sibcall_value_internal (result, addr, args_size)
+ : gen_call_value_internal (result, addr, args_size));
}
+
+ return mips_emit_call_insn (pattern, lazy_p);
}
/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
&& const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
return false;
- /* When -minterlink-mips16 is in effect, assume that external
- functions could be MIPS16 ones unless an attribute explicitly
- tells us otherwise. */
+ /* When -minterlink-mips16 is in effect, assume that non-locally-binding
+ functions could be MIPS16 ones unless an attribute explicitly tells
+ us otherwise. */
if (TARGET_INTERLINK_MIPS16
&& decl
- && DECL_EXTERNAL (decl)
+ && (DECL_EXTERNAL (decl) || !targetm.binds_local_p (decl))
&& !mips_nomips16_decl_p (decl)
&& const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
return false;
return false;
}
+ /* We have traditionally not treated zero-sized objects as small data,
+ so this is now effectively part of the ABI. */
size = int_size_in_bytes (TREE_TYPE (decl));
- return size <= mips_small_data_threshold;
+ return size > 0 && size <= mips_small_data_threshold;
}
/* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
/* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
-static void
+static void ATTRIBUTE_UNUSED
mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
{
switch (size)
#ifdef HAVE_AS_GNU_ATTRIBUTE
fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
- TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT ? 1 : 2) : 3);
+ (TARGET_HARD_FLOAT_ABI
+ ? (TARGET_DOUBLE_FLOAT
+ ? ((!TARGET_64BIT && TARGET_FLOAT64) ? 4 : 1) : 2) : 3));
#endif
}
return offset;
}
\f
-/* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
- value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
+/* Implement TARGET_EXTRA_LIVE_ON_ENTRY. */
static void
mips_extra_live_on_entry (bitmap regs)
{
- if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
- bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
+ if (TARGET_USE_GOT)
+ {
+ /* PIC_FUNCTION_ADDR_REGNUM is live if we need it to set up
+ the global pointer. */
+ if (!TARGET_ABSOLUTE_ABICALLS)
+ bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
+
+ /* See the comment above load_call<mode> for details. */
+ bitmap_set_bit (regs, GOT_VERSION_REGNUM);
+ }
}
/* Implement RETURN_ADDR_RTX. We do not support moving back to a
static void
mips_emit_loadgp (void)
{
- rtx addr, offset, incoming_address, base, index;
+ rtx addr, offset, incoming_address, base, index, pic_reg;
+ pic_reg = pic_offset_table_rtx;
switch (mips_current_loadgp_style ())
{
case LOADGP_ABSOLUTE:
mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
}
- emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
+ emit_insn (Pmode == SImode
+ ? gen_loadgp_absolute_si (pic_reg, mips_gnu_local_gp)
+ : gen_loadgp_absolute_di (pic_reg, mips_gnu_local_gp));
break;
case LOADGP_NEWABI:
addr = XEXP (DECL_RTL (current_function_decl), 0);
offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
- emit_insn (gen_loadgp_newabi (offset, incoming_address));
+ emit_insn (Pmode == SImode
+ ? gen_loadgp_newabi_si (pic_reg, offset, incoming_address)
+ : gen_loadgp_newabi_di (pic_reg, offset, incoming_address));
if (!TARGET_EXPLICIT_RELOCS)
emit_insn (gen_loadgp_blockage ());
break;
case LOADGP_RTP:
base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
- emit_insn (gen_loadgp_rtp (base, index));
+ emit_insn (Pmode == SImode
+ ? gen_loadgp_rtp_si (pic_reg, base, index)
+ : gen_loadgp_rtp_di (pic_reg, base, index));
if (!TARGET_EXPLICIT_RELOCS)
emit_insn (gen_loadgp_blockage ());
break;
if (ALL_COP_REG_P (regno))
return class == MODE_INT && size <= UNITS_PER_WORD;
+ if (regno == GOT_VERSION_REGNUM)
+ return mode == SImode;
+
return false;
}
dfa_finish ();
}
\f
-/* Subroutine of mips_reorg. If there is a hazard between INSN
- and a previous instruction, avoid it by inserting nops after
+/* This structure records that the current function has a LO_SUM
+ involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is
+ the largest offset applied to BASE by all such LO_SUMs. */
+struct mips_lo_sum_offset {
+ rtx base;
+ HOST_WIDE_INT offset;
+};
+
+/* Return a hash value for SYMBOL_REF or LABEL_REF BASE. */
+
+static hashval_t
+mips_hash_base (rtx base)
+{
+ int do_not_record_p;
+
+ return hash_rtx (base, GET_MODE (base), &do_not_record_p, NULL, false);
+}
+
+/* Hash-table callbacks for mips_lo_sum_offsets. */
+
+static hashval_t
+mips_lo_sum_offset_hash (const void *entry)
+{
+ return mips_hash_base (((const struct mips_lo_sum_offset *) entry)->base);
+}
+
+static int
+mips_lo_sum_offset_eq (const void *entry, const void *value)
+{
+ return rtx_equal_p (((const struct mips_lo_sum_offset *) entry)->base,
+ (const_rtx) value);
+}
+
+/* Look up symbolic constant X in HTAB, which is a hash table of
+ mips_lo_sum_offsets. If OPTION is NO_INSERT, return true if X can be
+ paired with a recorded LO_SUM, otherwise record X in the table. */
+
+static bool
+mips_lo_sum_offset_lookup (htab_t htab, rtx x, enum insert_option option)
+{
+ rtx base, offset;
+ void **slot;
+ struct mips_lo_sum_offset *entry;
+
+ /* Split X into a base and offset. */
+ split_const (x, &base, &offset);
+ if (UNSPEC_ADDRESS_P (base))
+ base = UNSPEC_ADDRESS (base);
+
+ /* Look up the base in the hash table. */
+ slot = htab_find_slot_with_hash (htab, base, mips_hash_base (base), option);
+ if (slot == NULL)
+ return false;
+
+ entry = (struct mips_lo_sum_offset *) *slot;
+ if (option == INSERT)
+ {
+ if (entry == NULL)
+ {
+ entry = XNEW (struct mips_lo_sum_offset);
+ entry->base = base;
+ entry->offset = INTVAL (offset);
+ *slot = entry;
+ }
+ else
+ {
+ if (INTVAL (offset) > entry->offset)
+ entry->offset = INTVAL (offset);
+ }
+ }
+ return INTVAL (offset) <= entry->offset;
+}
+
+/* A for_each_rtx callback for which DATA is a mips_lo_sum_offset hash table.
+ Record every LO_SUM in *LOC. */
+
+static int
+mips_record_lo_sum (rtx *loc, void *data)
+{
+ if (GET_CODE (*loc) == LO_SUM)
+ mips_lo_sum_offset_lookup ((htab_t) data, XEXP (*loc, 1), INSERT);
+ return 0;
+}
+
+/* Return true if INSN is a SET of an orphaned high-part relocation.
+ HTAB is a hash table of mips_lo_sum_offsets that describes all the
+ LO_SUMs in the current function. */
+
+static bool
+mips_orphaned_high_part_p (htab_t htab, rtx insn)
+{
+ enum mips_symbol_type type;
+ rtx x, set;
+
+ set = single_set (insn);
+ if (set)
+ {
+ /* Check for %his. */
+ x = SET_SRC (set);
+ if (GET_CODE (x) == HIGH
+ && absolute_symbolic_operand (XEXP (x, 0), VOIDmode))
+ return !mips_lo_sum_offset_lookup (htab, XEXP (x, 0), NO_INSERT);
+
+ /* Check for local %gots (and %got_pages, which is redundant but OK). */
+ if (GET_CODE (x) == UNSPEC
+ && XINT (x, 1) == UNSPEC_LOAD_GOT
+ && mips_symbolic_constant_p (XVECEXP (x, 0, 1),
+ SYMBOL_CONTEXT_LEA, &type)
+ && type == SYMBOL_GOTOFF_PAGE)
+ return !mips_lo_sum_offset_lookup (htab, XVECEXP (x, 0, 1), NO_INSERT);
+ }
+ return false;
+}
+
+/* Subroutine of mips_reorg_process_insns. If there is a hazard between
+ INSN and a previous instruction, avoid it by inserting nops after
instruction AFTER.
*DELAYED_REG and *HILO_DELAY describe the hazards that apply at
rtx *delayed_reg, rtx lo_reg)
{
rtx pattern, set;
- int nops, ninsns, hazard_set;
-
- if (!INSN_P (insn))
- return;
+ int nops, ninsns;
pattern = PATTERN (insn);
break;
case HAZARD_DELAY:
- hazard_set = (int) get_attr_hazard_set (insn);
- if (hazard_set == 0)
- set = single_set (insn);
- else
- {
- gcc_assert (GET_CODE (PATTERN (insn)) == PARALLEL);
- set = XVECEXP (PATTERN (insn), 0, hazard_set - 1);
- }
- gcc_assert (set && GET_CODE (set) == SET);
+ set = single_set (insn);
+ gcc_assert (set);
*delayed_reg = SET_DEST (set);
break;
}
}
/* Go through the instruction stream and insert nops where necessary.
- See if the whole function can then be put into .set noreorder &
- .set nomacro. */
+ Also delete any high-part relocations whose partnering low parts
+ are now all dead. See if the whole function can then be put into
+ .set noreorder and .set nomacro. */
static void
-mips_avoid_hazards (void)
+mips_reorg_process_insns (void)
{
- rtx insn, last_insn, lo_reg, delayed_reg;
- int hilo_delay, i;
+ rtx insn, last_insn, subinsn, next_insn, lo_reg, delayed_reg;
+ int hilo_delay;
+ htab_t htab;
/* Force all instructions to be split into their final form. */
split_all_insns_noflow ();
cfun->machine->all_noreorder_p = true;
+ /* Code that doesn't use explicit relocs can't be ".set nomacro". */
+ if (!TARGET_EXPLICIT_RELOCS)
+ cfun->machine->all_noreorder_p = false;
+
/* Profiled functions can't be all noreorder because the profiler
support uses assembler macros. */
if (current_function_profile)
if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
cfun->machine->all_noreorder_p = false;
+ htab = htab_create (37, mips_lo_sum_offset_hash,
+ mips_lo_sum_offset_eq, free);
+
+ /* Make a first pass over the instructions, recording all the LO_SUMs. */
+ for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
+ FOR_EACH_SUBINSN (subinsn, insn)
+ if (INSN_P (subinsn))
+ for_each_rtx (&PATTERN (subinsn), mips_record_lo_sum, htab);
+
last_insn = 0;
hilo_delay = 2;
delayed_reg = 0;
lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
- for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
- if (INSN_P (insn))
- {
- if (GET_CODE (PATTERN (insn)) == SEQUENCE)
- for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
- mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
- &hilo_delay, &delayed_reg, lo_reg);
- else
- mips_avoid_hazard (last_insn, insn, &hilo_delay,
- &delayed_reg, lo_reg);
-
- last_insn = insn;
- }
+ /* Make a second pass over the instructions. Delete orphaned
+ high-part relocations or turn them into NOPs. Avoid hazards
+ by inserting NOPs. */
+ for (insn = get_insns (); insn != 0; insn = next_insn)
+ {
+ next_insn = NEXT_INSN (insn);
+ if (INSN_P (insn))
+ {
+ if (GET_CODE (PATTERN (insn)) == SEQUENCE)
+ {
+ /* If we find an orphaned high-part relocation in a delay
+ slot, it's easier to turn that instruction into a NOP than
+ to delete it. The delay slot will be a NOP either way. */
+ FOR_EACH_SUBINSN (subinsn, insn)
+ if (INSN_P (subinsn))
+ {
+ if (mips_orphaned_high_part_p (htab, subinsn))
+ {
+ PATTERN (subinsn) = gen_nop ();
+ INSN_CODE (subinsn) = CODE_FOR_nop;
+ }
+ mips_avoid_hazard (last_insn, subinsn, &hilo_delay,
+ &delayed_reg, lo_reg);
+ }
+ last_insn = insn;
+ }
+ else
+ {
+ /* INSN is a single instruction. Delete it if it's an
+ orphaned high-part relocation. */
+ if (mips_orphaned_high_part_p (htab, insn))
+ delete_insn (insn);
+ else
+ {
+ mips_avoid_hazard (last_insn, insn, &hilo_delay,
+ &delayed_reg, lo_reg);
+ last_insn = insn;
+ }
+ }
+ }
+ }
+
+ htab_delete (htab);
}
/* Implement TARGET_MACHINE_DEPENDENT_REORG. */
mips_reorg (void)
{
mips16_lay_out_constants ();
- if (TARGET_EXPLICIT_RELOCS)
- {
- if (mips_base_delayed_branch)
- dbr_schedule (get_insns ());
- mips_avoid_hazards ();
- if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
- vr4130_align_insns ();
- }
+ if (mips_base_delayed_branch)
+ dbr_schedule (get_insns ());
+ mips_reorg_process_insns ();
+ if (TARGET_EXPLICIT_RELOCS && TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
+ vr4130_align_insns ();
}
\f
/* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
final_start_function (insn, file, 1);
final (insn, file, 1);
final_end_function ();
+ free_after_compilation (cfun);
/* Clean up the vars set above. Note that final_end_function resets
the global pointer for us. */
/* Restore base settings of various flags. */
target_flags = mips_base_target_flags;
- flag_delayed_branch = mips_base_delayed_branch;
flag_schedule_insns = mips_base_schedule_insns;
flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
flag_move_loop_invariants = mips_base_move_loop_invariants;
/* Switch to normal (non-MIPS16) mode. */
target_flags &= ~MASK_MIPS16;
- /* When using explicit relocs, we call dbr_schedule from within
- mips_reorg. */
- if (TARGET_EXPLICIT_RELOCS)
- flag_delayed_branch = 0;
-
/* Provide default values for align_* for 64-bit targets. */
if (TARGET_64BIT)
{
/* Now select the ISA mode. */
mips_set_mips16_mode (mips_base_mips16);
+
+ /* We call dbr_schedule from within mips_reorg. */
+ flag_delayed_branch = 0;
}
/* Swap the register information for registers I and I + 1, which
#undef TARGET_BUILD_BUILTIN_VA_LIST
#define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
+#undef TARGET_EXPAND_BUILTIN_VA_START
+#define TARGET_EXPAND_BUILTIN_VA_START mips_va_start
#undef TARGET_GIMPLIFY_VA_ARG_EXPR
#define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr