/* The expressions for any register column that is saved. */
cfi_vec reg_save;
-
- /* The value of any DW_CFA_GNU_args_size. */
- HOST_WIDE_INT args_size;
} dw_cfi_row;
/* The caller's ORIG_REG is saved in SAVED_IN_REG. */
/* The row state at the beginning and end of the trace. */
dw_cfi_row *beg_row, *end_row;
- /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
- bool switch_sections;
+ /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
+ while scanning insns. However, the args_size value is irrelevant at
+ any point except can_throw_internal_p insns. Therefore the "delay"
+ sizes the values that must actually be emitted for this trace. */
+ HOST_WIDE_INT beg_true_args_size, end_true_args_size;
+ HOST_WIDE_INT beg_delay_args_size, end_delay_args_size;
+
+ /* The first EH insn in the trace, where beg_delay_args_size must be set. */
+ rtx eh_head;
/* The following variables contain data used in interpreting frame related
expressions. These are not part of the "real" row state as defined by
a maximum of 5 entries. */
VEC(reg_saved_in_data, heap) *regs_saved_in_regs;
+ /* An identifier for this trace. Used only for debugging dumps. */
+ unsigned id;
+
+ /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
+ bool switch_sections;
+
+ /* True if we've seen different values incoming to beg_true_args_size. */
+ bool args_size_undefined;
} dw_trace_info;
DEF_VEC_O (dw_trace_info);
/* The current, i.e. most recently generated, row of the CFI table. */
static dw_cfi_row *cur_row;
+/* A copy of the current CFA, for use during the processing of a
+ single insn. */
+static dw_cfa_location *cur_cfa;
+
/* We delay emitting a register save until either (a) we reach the end
of the prologue or (b) the register is clobbered. This clusters
register saves so that there are fewer pc advances. */
static VEC(queued_reg_save, heap) *queued_reg_saves;
-/* The (really) current value for DW_CFA_GNU_args_size. We delay actually
- emitting this data, i.e. updating CUR_ROW, without async unwind. */
-static HOST_WIDE_INT queued_args_size;
-
/* True if any CFI directives were emitted at the current insn. */
static bool any_cfis_emitted;
return a->head == b->head;
}
-static unsigned
-get_trace_index (dw_trace_info *trace)
-{
- return trace - VEC_address (dw_trace_info, trace_info);
-}
-
static dw_trace_info *
get_trace_info (rtx insn)
{
{
dw_cfi_ref cfi = new_cfi ();
+ /* While we can occasionally have args_size < 0 internally, this state
+ should not persist at a point we actually need an opcode. */
+ gcc_assert (size >= 0);
+
cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
else if (!cfa_equal_p (&a->cfa, &b->cfa))
return false;
- /* Logic suggests that we compare args_size here. However, if
- EXIT_IGNORE_STACK we don't bother tracking the args_size after
- the last time it really matters within the function. This does
- in fact lead to paths with differing arg_size, but in cases for
- which it doesn't matter. */
- /* ??? If we really want to sanity check the output of the optimizers,
- find a way to backtrack from epilogues to the last EH site. This
- would allow us to distinguish regions with garbage args_size and
- regions where paths ought to agree. */
-
n_a = VEC_length (dw_cfi_ref, a->reg_save);
n_b = VEC_length (dw_cfi_ref, b->reg_save);
n_max = MAX (n_a, n_b);
if (cfi)
{
cur_row->cfa = *new_cfa;
- if (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression)
- cur_row->cfa_cfi = cfi;
+ cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
+ ? cfi : NULL);
add_cfi (cfi);
}
update_row_reg_save (cur_row, reg, cfi);
}
-/* Given a SET, calculate the amount of stack adjustment it
- contains. */
-
-static HOST_WIDE_INT
-stack_adjust_offset (const_rtx pattern, HOST_WIDE_INT cur_args_size,
- HOST_WIDE_INT cur_offset)
-{
- const_rtx src = SET_SRC (pattern);
- const_rtx dest = SET_DEST (pattern);
- HOST_WIDE_INT offset = 0;
- enum rtx_code code;
-
- if (dest == stack_pointer_rtx)
- {
- code = GET_CODE (src);
-
- /* Assume (set (reg sp) (reg whatever)) sets args_size
- level to 0. */
- if (code == REG && src != stack_pointer_rtx)
- {
- offset = -cur_args_size;
-#ifndef STACK_GROWS_DOWNWARD
- offset = -offset;
-#endif
- return offset - cur_offset;
- }
-
- if (! (code == PLUS || code == MINUS)
- || XEXP (src, 0) != stack_pointer_rtx
- || !CONST_INT_P (XEXP (src, 1)))
- return 0;
-
- /* (set (reg sp) (plus (reg sp) (const_int))) */
- offset = INTVAL (XEXP (src, 1));
- if (code == PLUS)
- offset = -offset;
- return offset;
- }
-
- if (MEM_P (src) && !MEM_P (dest))
- dest = src;
- if (MEM_P (dest))
- {
- /* (set (mem (pre_dec (reg sp))) (foo)) */
- src = XEXP (dest, 0);
- code = GET_CODE (src);
-
- switch (code)
- {
- case PRE_MODIFY:
- case POST_MODIFY:
- if (XEXP (src, 0) == stack_pointer_rtx)
- {
- rtx val = XEXP (XEXP (src, 1), 1);
- /* We handle only adjustments by constant amount. */
- gcc_assert (GET_CODE (XEXP (src, 1)) == PLUS
- && CONST_INT_P (val));
- offset = -INTVAL (val);
- break;
- }
- return 0;
-
- case PRE_DEC:
- case POST_DEC:
- if (XEXP (src, 0) == stack_pointer_rtx)
- {
- offset = GET_MODE_SIZE (GET_MODE (dest));
- break;
- }
- return 0;
-
- case PRE_INC:
- case POST_INC:
- if (XEXP (src, 0) == stack_pointer_rtx)
- {
- offset = -GET_MODE_SIZE (GET_MODE (dest));
- break;
- }
- return 0;
-
- default:
- return 0;
- }
- }
- else
- return 0;
-
- return offset;
-}
-
-/* Add a CFI to update the running total of the size of arguments
- pushed onto the stack. */
+/* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
+ and adjust data structures to match. */
static void
-dwarf2out_args_size (HOST_WIDE_INT size)
+notice_args_size (rtx insn)
{
- if (size == cur_row->args_size)
- return;
-
- cur_row->args_size = size;
- add_cfi_args_size (size);
-}
+ HOST_WIDE_INT args_size, delta;
+ rtx note;
-/* Record a stack adjustment of OFFSET bytes. */
+ note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
+ if (note == NULL)
+ return;
-static void
-dwarf2out_stack_adjust (HOST_WIDE_INT offset)
-{
- dw_cfa_location loc = cur_row->cfa;
+ args_size = INTVAL (XEXP (note, 0));
+ delta = args_size - cur_trace->end_true_args_size;
+ if (delta == 0)
+ return;
- if (loc.reg == dw_stack_pointer_regnum)
- loc.offset += offset;
+ cur_trace->end_true_args_size = args_size;
- if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
- cur_trace->cfa_store.offset += offset;
+ /* If the CFA is computed off the stack pointer, then we must adjust
+ the computation of the CFA as well. */
+ if (cur_cfa->reg == dw_stack_pointer_regnum)
+ {
+ gcc_assert (!cur_cfa->indirect);
+ /* Convert a change in args_size (always a positive in the
+ direction of stack growth) to a change in stack pointer. */
#ifndef STACK_GROWS_DOWNWARD
- offset = -offset;
+ delta = -delta;
#endif
-
- queued_args_size += offset;
- if (queued_args_size < 0)
- queued_args_size = 0;
-
- /* ??? The assumption seems to be that if A_O_A, the only CFA adjustments
- involving the stack pointer are inside the prologue and marked as
- RTX_FRAME_RELATED_P. That said, should we not verify this assumption
- by *asserting* A_O_A at this point? Why else would we have a change
- to the stack pointer? */
- if (ACCUMULATE_OUTGOING_ARGS)
- return;
-
- def_cfa_1 (&loc);
- if (flag_asynchronous_unwind_tables)
- dwarf2out_args_size (queued_args_size);
+ cur_cfa->offset += delta;
+ }
}
-/* Check INSN to see if it looks like a push or a stack adjustment, and
- make a note of it if it does. EH uses this information to find out
- how much extra space it needs to pop off the stack. */
+/* A subroutine of scan_trace. INSN is can_throw_internal. Update the
+ data within the trace related to EH insns and args_size. */
static void
-dwarf2out_notice_stack_adjust (rtx insn, bool after_p)
+notice_eh_throw (rtx insn)
{
- HOST_WIDE_INT offset;
- int i;
-
- /* Don't handle epilogues at all. Certainly it would be wrong to do so
- with this function. Proper support would require all frame-related
- insns to be marked, and to be able to handle saving state around
- epilogues textually in the middle of the function. */
- if (prologue_epilogue_contains (insn))
- return;
-
- /* If INSN is an instruction from target of an annulled branch, the
- effects are for the target only and so current argument size
- shouldn't change at all. */
- if (final_sequence
- && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
- && INSN_FROM_TARGET_P (insn))
- return;
+ HOST_WIDE_INT args_size;
- /* If only calls can throw, and we have a frame pointer,
- save up adjustments until we see the CALL_INSN. */
- if (!flag_asynchronous_unwind_tables
- && cur_row->cfa.reg != dw_stack_pointer_regnum)
+ args_size = cur_trace->end_true_args_size;
+ if (cur_trace->eh_head == NULL)
{
- if (CALL_P (insn) && !after_p)
- {
- /* Extract the size of the args from the CALL rtx itself. */
- insn = PATTERN (insn);
- if (GET_CODE (insn) == PARALLEL)
- insn = XVECEXP (insn, 0, 0);
- if (GET_CODE (insn) == SET)
- insn = SET_SRC (insn);
- gcc_assert (GET_CODE (insn) == CALL);
- gcc_assert (queued_args_size == INTVAL (XEXP (insn, 1)));
- dwarf2out_args_size (queued_args_size);
- }
- return;
+ cur_trace->eh_head = insn;
+ cur_trace->beg_delay_args_size = args_size;
+ cur_trace->end_delay_args_size = args_size;
}
-
- if (CALL_P (insn) && !after_p)
+ else if (cur_trace->end_delay_args_size != args_size)
{
- if (!flag_asynchronous_unwind_tables)
- dwarf2out_args_size (queued_args_size);
- return;
- }
- else if (BARRIER_P (insn))
- return;
- else if (GET_CODE (PATTERN (insn)) == SET)
- offset = stack_adjust_offset (PATTERN (insn), queued_args_size, 0);
- else if (GET_CODE (PATTERN (insn)) == PARALLEL
- || GET_CODE (PATTERN (insn)) == SEQUENCE)
- {
- /* There may be stack adjustments inside compound insns. Search
- for them. */
- for (offset = 0, i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
- if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
- offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
- queued_args_size, offset);
- }
- else
- return;
-
- if (offset == 0)
- return;
+ cur_trace->end_delay_args_size = args_size;
- dwarf2out_stack_adjust (offset);
+ /* ??? If the CFA is the stack pointer, search backward for the last
+ CFI note and insert there. Given that the stack changed for the
+ args_size change, there *must* be such a note in between here and
+ the last eh insn. */
+ add_cfi_args_size (args_size);
+ }
}
/* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
static void
dwarf2out_frame_debug_def_cfa (rtx pat)
{
- dw_cfa_location loc;
+ memset (cur_cfa, 0, sizeof (*cur_cfa));
- memset (&loc, 0, sizeof (loc));
-
- switch (GET_CODE (pat))
+ if (GET_CODE (pat) == PLUS)
{
- case PLUS:
- loc.reg = dwf_regno (XEXP (pat, 0));
- loc.offset = INTVAL (XEXP (pat, 1));
- break;
-
- case REG:
- loc.reg = dwf_regno (pat);
- break;
-
- case MEM:
- loc.indirect = 1;
+ cur_cfa->offset = INTVAL (XEXP (pat, 1));
+ pat = XEXP (pat, 0);
+ }
+ if (MEM_P (pat))
+ {
+ cur_cfa->indirect = 1;
pat = XEXP (pat, 0);
if (GET_CODE (pat) == PLUS)
{
- loc.base_offset = INTVAL (XEXP (pat, 1));
+ cur_cfa->base_offset = INTVAL (XEXP (pat, 1));
pat = XEXP (pat, 0);
}
- loc.reg = dwf_regno (pat);
- break;
-
- default:
- /* Recurse and define an expression. */
- gcc_unreachable ();
}
-
- def_cfa_1 (&loc);
+ /* ??? If this fails, we could be calling into the _loc functions to
+ define a full expression. So far no port does that. */
+ gcc_assert (REG_P (pat));
+ cur_cfa->reg = dwf_regno (pat);
}
/* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
static void
dwarf2out_frame_debug_adjust_cfa (rtx pat)
{
- dw_cfa_location loc = cur_row->cfa;
rtx src, dest;
gcc_assert (GET_CODE (pat) == SET);
switch (GET_CODE (src))
{
case PLUS:
- gcc_assert (dwf_regno (XEXP (src, 0)) == loc.reg);
- loc.offset -= INTVAL (XEXP (src, 1));
+ gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
+ cur_cfa->offset -= INTVAL (XEXP (src, 1));
break;
case REG:
- break;
+ break;
default:
- gcc_unreachable ();
+ gcc_unreachable ();
}
- loc.reg = dwf_regno (dest);
- gcc_assert (loc.indirect == 0);
-
- def_cfa_1 (&loc);
+ cur_cfa->reg = dwf_regno (dest);
+ gcc_assert (cur_cfa->indirect == 0);
}
/* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
switch (GET_CODE (addr))
{
case REG:
- gcc_assert (dwf_regno (addr) == cur_row->cfa.reg);
- offset = -cur_row->cfa.offset;
+ gcc_assert (dwf_regno (addr) == cur_cfa->reg);
+ offset = -cur_cfa->offset;
break;
case PLUS:
- gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_row->cfa.reg);
- offset = INTVAL (XEXP (addr, 1)) - cur_row->cfa.offset;
+ gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
+ offset = INTVAL (XEXP (addr, 1)) - cur_cfa->offset;
break;
default:
gcc_unreachable ();
cfa current rule for calculating the CFA. It usually
consists of a register and an offset. This is
- actually stored in cur_row->cfa, but abbreviated
+ actually stored in *cur_cfa, but abbreviated
for the purposes of this documentation.
cfa_store register used by prologue code to save things to the stack
cfa_store.offset is the offset from the value of
static void
dwarf2out_frame_debug_expr (rtx expr)
{
- dw_cfa_location cfa = cur_row->cfa;
rtx src, dest, span;
HOST_WIDE_INT offset;
dw_fde_ref fde;
&& (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
&& (RTX_FRAME_RELATED_P (elem) || par_index == 0))
dwarf2out_frame_debug_expr (elem);
- else if (GET_CODE (elem) == SET
- && par_index != 0
- && !RTX_FRAME_RELATED_P (elem))
- {
- /* Stack adjustment combining might combine some post-prologue
- stack adjustment into a prologue stack adjustment. */
- HOST_WIDE_INT offset
- = stack_adjust_offset (elem, queued_args_size, 0);
-
- if (offset != 0)
- dwarf2out_stack_adjust (offset);
- }
}
return;
}
{
/* Setting FP from SP. */
case REG:
- if (cfa.reg == dwf_regno (src))
+ if (cur_cfa->reg == dwf_regno (src))
{
/* Rule 1 */
/* Update the CFA rule wrt SP or FP. Make sure src is
ARM copies SP to a temporary register, and from there to
FP. So we just rely on the backends to only set
RTX_FRAME_RELATED_P on appropriate insns. */
- cfa.reg = dwf_regno (dest);
- cur_trace->cfa_temp.reg = cfa.reg;
- cur_trace->cfa_temp.offset = cfa.offset;
+ cur_cfa->reg = dwf_regno (dest);
+ cur_trace->cfa_temp.reg = cur_cfa->reg;
+ cur_trace->cfa_temp.offset = cur_cfa->offset;
}
else
{
&& REGNO (src) == STACK_POINTER_REGNUM)
gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
&& fde->drap_reg != INVALID_REGNUM
- && cfa.reg != dwf_regno (src));
+ && cur_cfa->reg != dwf_regno (src));
else
queue_reg_save (src, dest, 0);
}
if (XEXP (src, 0) == hard_frame_pointer_rtx)
{
/* Restoring SP from FP in the epilogue. */
- gcc_assert (cfa.reg == dw_frame_pointer_regnum);
- cfa.reg = dw_stack_pointer_regnum;
+ gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
+ cur_cfa->reg = dw_stack_pointer_regnum;
}
else if (GET_CODE (src) == LO_SUM)
/* Assume we've set the source reg of the LO_SUM from sp. */
if (GET_CODE (src) != MINUS)
offset = -offset;
- if (cfa.reg == dw_stack_pointer_regnum)
- cfa.offset += offset;
+ if (cur_cfa->reg == dw_stack_pointer_regnum)
+ cur_cfa->offset += offset;
if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
cur_trace->cfa_store.offset += offset;
}
gcc_assert (frame_pointer_needed);
gcc_assert (REG_P (XEXP (src, 0))
- && dwf_regno (XEXP (src, 0)) == cfa.reg
+ && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
&& CONST_INT_P (XEXP (src, 1)));
offset = INTVAL (XEXP (src, 1));
if (GET_CODE (src) != MINUS)
offset = -offset;
- cfa.offset += offset;
- cfa.reg = dw_frame_pointer_regnum;
+ cur_cfa->offset += offset;
+ cur_cfa->reg = dw_frame_pointer_regnum;
}
else
{
/* Rule 4 */
if (REG_P (XEXP (src, 0))
- && dwf_regno (XEXP (src, 0)) == cfa.reg
+ && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
&& CONST_INT_P (XEXP (src, 1)))
{
/* Setting a temporary CFA register that will be copied
into the FP later on. */
offset = - INTVAL (XEXP (src, 1));
- cfa.offset += offset;
- cfa.reg = dwf_regno (dest);
+ cur_cfa->offset += offset;
+ cur_cfa->reg = dwf_regno (dest);
/* Or used to save regs to the stack. */
- cur_trace->cfa_temp.reg = cfa.reg;
- cur_trace->cfa_temp.offset = cfa.offset;
+ cur_trace->cfa_temp.reg = cur_cfa->reg;
+ cur_trace->cfa_temp.offset = cur_cfa->offset;
}
/* Rule 5 */
{
/* Setting a scratch register that we will use instead
of SP for saving registers to the stack. */
- gcc_assert (cfa.reg == dw_stack_pointer_regnum);
+ gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
cur_trace->cfa_store.reg = dwf_regno (dest);
cur_trace->cfa_store.offset
- = cfa.offset - cur_trace->cfa_temp.offset;
+ = cur_cfa->offset - cur_trace->cfa_temp.offset;
}
/* Rule 9 */
fde->stack_realignment = INTVAL (XEXP (src, 1));
cur_trace->cfa_store.offset = 0;
- if (cfa.reg != dw_stack_pointer_regnum
- && cfa.reg != dw_frame_pointer_regnum)
- fde->drap_reg = cfa.reg;
+ if (cur_cfa->reg != dw_stack_pointer_regnum
+ && cur_cfa->reg != dw_frame_pointer_regnum)
+ fde->drap_reg = cur_cfa->reg;
}
return;
default:
gcc_unreachable ();
}
-
- def_cfa_1 (&cfa);
break;
case MEM:
&& cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
cur_trace->cfa_store.offset += offset;
- if (cfa.reg == dw_stack_pointer_regnum)
- cfa.offset = cur_trace->cfa_store.offset;
+ if (cur_cfa->reg == dw_stack_pointer_regnum)
+ cur_cfa->offset = cur_trace->cfa_store.offset;
if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
offset -= cur_trace->cfa_store.offset;
regiser. */
if (fde
&& fde->stack_realign
- && src == hard_frame_pointer_rtx)
+ && REG_P (src)
+ && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
{
- gcc_assert (cfa.reg != dw_frame_pointer_regnum);
+ gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
cur_trace->cfa_store.offset = 0;
}
- if (cfa.reg == dw_stack_pointer_regnum)
- cfa.offset = cur_trace->cfa_store.offset;
+ if (cur_cfa->reg == dw_stack_pointer_regnum)
+ cur_cfa->offset = cur_trace->cfa_store.offset;
if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
offset += -cur_trace->cfa_store.offset;
regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
- if (cfa.reg == regno)
- offset -= cfa.offset;
+ if (cur_cfa->reg == regno)
+ offset -= cur_cfa->offset;
else if (cur_trace->cfa_store.reg == regno)
offset -= cur_trace->cfa_store.offset;
else
{
unsigned int regno = dwf_regno (XEXP (dest, 0));
- if (cfa.reg == regno)
- offset = -cfa.offset;
+ if (cur_cfa->reg == regno)
+ offset = -cur_cfa->offset;
else if (cur_trace->cfa_store.reg == regno)
offset = -cur_trace->cfa_store.offset;
else
if (REG_P (src)
&& REGNO (src) != STACK_POINTER_REGNUM
&& REGNO (src) != HARD_FRAME_POINTER_REGNUM
- && dwf_regno (src) == cfa.reg)
+ && dwf_regno (src) == cur_cfa->reg)
{
/* We're storing the current CFA reg into the stack. */
- if (cfa.offset == 0)
+ if (cur_cfa->offset == 0)
{
/* Rule 19 */
/* If stack is aligned, putting CFA reg into stack means
value. */
if (fde
&& fde->stack_realign
- && cfa.indirect == 0
- && cfa.reg != dw_frame_pointer_regnum)
+ && cur_cfa->indirect == 0
+ && cur_cfa->reg != dw_frame_pointer_regnum)
{
- dw_cfa_location cfa_exp;
+ gcc_assert (fde->drap_reg == cur_cfa->reg);
- gcc_assert (fde->drap_reg == cfa.reg);
-
- cfa_exp.indirect = 1;
- cfa_exp.reg = dw_frame_pointer_regnum;
- cfa_exp.base_offset = offset;
- cfa_exp.offset = 0;
+ cur_cfa->indirect = 1;
+ cur_cfa->reg = dw_frame_pointer_regnum;
+ cur_cfa->base_offset = offset;
+ cur_cfa->offset = 0;
fde->drap_reg_saved = 1;
-
- def_cfa_1 (&cfa_exp);
break;
}
/* If the source register is exactly the CFA, assume
we're saving SP like any other register; this happens
on the ARM. */
- def_cfa_1 (&cfa);
queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
break;
}
x = XEXP (x, 0);
gcc_assert (REG_P (x));
- cfa.reg = dwf_regno (x);
- cfa.base_offset = offset;
- cfa.indirect = 1;
- def_cfa_1 (&cfa);
+ cur_cfa->reg = dwf_regno (x);
+ cur_cfa->base_offset = offset;
+ cur_cfa->indirect = 1;
break;
}
}
- def_cfa_1 (&cfa);
-
span = NULL;
if (REG_P (src))
span = targetm.dwarf_register_span (src);
}
}
-/* Record call frame debugging information for INSN, which either
- sets SP or FP (adjusting how we calculate the frame address) or saves a
- register to the stack. If INSN is NULL_RTX, initialize our state.
-
- If AFTER_P is false, we're being called before the insn is emitted,
- otherwise after. Call instructions get invoked twice. */
+/* Record call frame debugging information for INSN, which either sets
+ SP or FP (adjusting how we calculate the frame address) or saves a
+ register to the stack. */
static void
-dwarf2out_frame_debug (rtx insn, bool after_p)
+dwarf2out_frame_debug (rtx insn)
{
rtx note, n;
bool handled_one = false;
- bool need_flush = false;
-
- if (!NONJUMP_INSN_P (insn) || clobbers_queued_reg_save (insn))
- dwarf2out_flush_queued_reg_saves ();
-
- if (!RTX_FRAME_RELATED_P (insn))
- {
- /* ??? This should be done unconditionally since stack adjustments
- matter if the stack pointer is not the CFA register anymore but
- is still used to save registers. */
- if (!ACCUMULATE_OUTGOING_ARGS)
- dwarf2out_notice_stack_adjust (insn, after_p);
- return;
- }
-
- any_cfis_emitted = false;
for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
switch (REG_NOTE_KIND (note))
break;
case REG_CFA_FLUSH_QUEUE:
- /* The actual flush happens below. */
- need_flush = true;
+ /* The actual flush happens elsewhere. */
handled_one = true;
break;
break;
}
- if (handled_one)
- {
- /* Minimize the number of advances by emitting the entire queue
- once anything is emitted. */
- need_flush |= any_cfis_emitted;
- }
- else
+ if (!handled_one)
{
insn = PATTERN (insn);
do_frame_expr:
/* Check again. A parallel can save and update the same register.
We could probably check just once, here, but this is safer than
removing the check at the start of the function. */
- if (any_cfis_emitted || clobbers_queued_reg_save (insn))
- need_flush = true;
+ if (clobbers_queued_reg_save (insn))
+ dwarf2out_flush_queued_reg_saves ();
}
-
- if (need_flush)
- dwarf2out_flush_queued_reg_saves ();
}
/* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
add_cfi (cfi);
}
- if (old_row->args_size != new_row->args_size)
- add_cfi_args_size (new_row->args_size);
-
n_old = VEC_length (dw_cfi_ref, old_row->reg_save);
n_new = VEC_length (dw_cfi_ref, new_row->reg_save);
n_max = MAX (n_old, n_new);
if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
{
bool required = cfi_label_required_p (NOTE_CFI (insn));
- while (next && NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
- {
- required |= cfi_label_required_p (NOTE_CFI (next));
+ while (next)
+ if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
+ {
+ required |= cfi_label_required_p (NOTE_CFI (next));
+ next = NEXT_INSN (next);
+ }
+ else if (active_insn_p (next)
+ || (NOTE_P (next) && (NOTE_KIND (next)
+ == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
+ break;
+ else
next = NEXT_INSN (next);
- }
if (required)
{
int num = dwarf2out_cfi_label_num;
do
{
- VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, NOTE_CFI (insn));
+ if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
+ VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi,
+ NOTE_CFI (insn));
insn = NEXT_INSN (insn);
}
while (insn != next);
maybe_record_trace_start (rtx start, rtx origin)
{
dw_trace_info *ti;
-
- /* Sync queued data before propagating to a destination,
- lest we propagate out-of-date data. */
- dwarf2out_flush_queued_reg_saves ();
- dwarf2out_args_size (queued_args_size);
+ HOST_WIDE_INT args_size;
ti = get_trace_info (start);
gcc_assert (ti != NULL);
if (dump_file)
{
fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
- get_trace_index (cur_trace), get_trace_index (ti),
+ cur_trace->id, ti->id,
(origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
(origin ? INSN_UID (origin) : 0));
}
+ args_size = cur_trace->end_true_args_size;
if (ti->beg_row == NULL)
{
/* This is the first time we've encountered this trace. Propagate
state across the edge and push the trace onto the work list. */
ti->beg_row = copy_cfi_row (cur_row);
+ ti->beg_true_args_size = args_size;
+
ti->cfa_store = cur_trace->cfa_store;
ti->cfa_temp = cur_trace->cfa_temp;
ti->regs_saved_in_regs = VEC_copy (reg_saved_in_data, heap,
VEC_safe_push (dw_trace_info_ref, heap, trace_work_list, ti);
if (dump_file)
- fprintf (dump_file, "\tpush trace %u to worklist\n",
- get_trace_index (ti));
+ fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
}
else
{
+
/* We ought to have the same state incoming to a given trace no
matter how we arrive at the trace. Anything else means we've
got some kind of optimization error. */
gcc_checking_assert (cfi_row_equal_p (cur_row, ti->beg_row));
+
+ /* The args_size is allowed to conflict if it isn't actually used. */
+ if (ti->beg_true_args_size != args_size)
+ ti->args_size_undefined = true;
}
}
+/* Similarly, but handle the args_size and CFA reset across EH
+ and non-local goto edges. */
+
+static void
+maybe_record_trace_start_abnormal (rtx start, rtx origin)
+{
+ HOST_WIDE_INT save_args_size, delta;
+ dw_cfa_location save_cfa;
+
+ save_args_size = cur_trace->end_true_args_size;
+ if (save_args_size == 0)
+ {
+ maybe_record_trace_start (start, origin);
+ return;
+ }
+
+ delta = -save_args_size;
+ cur_trace->end_true_args_size = 0;
+
+ save_cfa = cur_row->cfa;
+ if (cur_row->cfa.reg == dw_stack_pointer_regnum)
+ {
+ /* Convert a change in args_size (always a positive in the
+ direction of stack growth) to a change in stack pointer. */
+#ifndef STACK_GROWS_DOWNWARD
+ delta = -delta;
+#endif
+ cur_row->cfa.offset += delta;
+ }
+
+ maybe_record_trace_start (start, origin);
+
+ cur_trace->end_true_args_size = save_args_size;
+ cur_row->cfa = save_cfa;
+}
+
/* Propagate CUR_TRACE state to the destinations implied by INSN. */
/* ??? Sadly, this is in large part a duplicate of make_edges. */
if (JUMP_P (insn))
{
if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
- ;
- else if (tablejump_p (insn, NULL, &tmp))
+ return;
+
+ if (tablejump_p (insn, NULL, &tmp))
{
rtvec vec;
/* Process non-local goto edges. */
if (can_nonlocal_goto (insn))
for (lab = nonlocal_goto_handler_labels; lab; lab = XEXP (lab, 1))
- maybe_record_trace_start (XEXP (lab, 0), insn);
+ maybe_record_trace_start_abnormal (XEXP (lab, 0), insn);
+ }
+ else if (GET_CODE (PATTERN (insn)) == SEQUENCE)
+ {
+ rtx seq = PATTERN (insn);
+ int i, n = XVECLEN (seq, 0);
+ for (i = 0; i < n; ++i)
+ create_trace_edges (XVECEXP (seq, 0, i));
+ return;
}
/* Process EH edges. */
{
eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
if (lp)
- maybe_record_trace_start (lp->landing_pad, insn);
+ maybe_record_trace_start_abnormal (lp->landing_pad, insn);
}
}
+/* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
+
+static void
+scan_insn_after (rtx insn)
+{
+ if (RTX_FRAME_RELATED_P (insn))
+ dwarf2out_frame_debug (insn);
+ notice_args_size (insn);
+}
+
/* Scan the trace beginning at INSN and create the CFI notes for the
instructions therein. */
static void
scan_trace (dw_trace_info *trace)
{
- rtx insn = trace->head;
+ rtx prev, insn = trace->head;
+ dw_cfa_location this_cfa;
if (dump_file)
fprintf (dump_file, "Processing trace %u : start at %s %d\n",
- get_trace_index (trace), rtx_name[(int) GET_CODE (insn)],
+ trace->id, rtx_name[(int) GET_CODE (insn)],
INSN_UID (insn));
trace->end_row = copy_cfi_row (trace->beg_row);
+ trace->end_true_args_size = trace->beg_true_args_size;
cur_trace = trace;
cur_row = trace->end_row;
- queued_args_size = cur_row->args_size;
- for (insn = NEXT_INSN (insn); insn ; insn = NEXT_INSN (insn))
+ this_cfa = cur_row->cfa;
+ cur_cfa = &this_cfa;
+
+ for (prev = insn, insn = NEXT_INSN (insn);
+ insn;
+ prev = insn, insn = NEXT_INSN (insn))
{
- rtx pat;
+ rtx control;
- add_cfi_insn = PREV_INSN (insn);
+ /* Do everything that happens "before" the insn. */
+ add_cfi_insn = prev;
/* Notice the end of a trace. */
- if (BARRIER_P (insn) || save_point_p (insn))
+ if (BARRIER_P (insn))
+ {
+ /* Don't bother saving the unneeded queued registers at all. */
+ VEC_truncate (queued_reg_save, queued_reg_saves, 0);
+ break;
+ }
+ if (save_point_p (insn))
{
- dwarf2out_flush_queued_reg_saves ();
- dwarf2out_args_size (queued_args_size);
-
/* Propagate across fallthru edges. */
- if (!BARRIER_P (insn))
- maybe_record_trace_start (insn, NULL);
+ dwarf2out_flush_queued_reg_saves ();
+ maybe_record_trace_start (insn, NULL);
break;
}
if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
continue;
- pat = PATTERN (insn);
- if (asm_noperands (pat) >= 0)
+ /* Handle all changes to the row state. Sequences require special
+ handling for the positioning of the notes. */
+ if (GET_CODE (PATTERN (insn)) == SEQUENCE)
{
- dwarf2out_frame_debug (insn, false);
+ rtx elt, pat = PATTERN (insn);
+ int i, n = XVECLEN (pat, 0);
+
+ control = XVECEXP (pat, 0, 0);
+ if (can_throw_internal (control))
+ notice_eh_throw (control);
+ dwarf2out_flush_queued_reg_saves ();
+
+ if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
+ {
+ /* ??? Hopefully multiple delay slots are not annulled. */
+ gcc_assert (n == 2);
+ gcc_assert (!RTX_FRAME_RELATED_P (control));
+ gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
+
+ elt = XVECEXP (pat, 0, 1);
+
+ /* If ELT is an instruction from target of an annulled branch,
+ the effects are for the target only and so the args_size
+ and CFA along the current path shouldn't change. */
+ if (INSN_FROM_TARGET_P (elt))
+ {
+ HOST_WIDE_INT restore_args_size;
+ cfi_vec save_row_reg_save;
+
+ add_cfi_insn = NULL;
+ restore_args_size = cur_trace->end_true_args_size;
+ cur_cfa = &cur_row->cfa;
+ save_row_reg_save = VEC_copy (dw_cfi_ref, gc, cur_row->reg_save);
+
+ scan_insn_after (elt);
+
+ /* ??? Should we instead save the entire row state? */
+ gcc_assert (!VEC_length (queued_reg_save, queued_reg_saves));
+
+ create_trace_edges (control);
+
+ cur_trace->end_true_args_size = restore_args_size;
+ cur_row->cfa = this_cfa;
+ cur_row->reg_save = save_row_reg_save;
+ cur_cfa = &this_cfa;
+ continue;
+ }
+ }
+
+ /* The insns in the delay slot should all be considered to happen
+ "before" a call insn. Consider a call with a stack pointer
+ adjustment in the delay slot. The backtrace from the callee
+ should include the sp adjustment. Unfortunately, that leaves
+ us with an unavoidable unwinding error exactly at the call insn
+ itself. For jump insns we'd prefer to avoid this error by
+ placing the notes after the sequence. */
+ if (JUMP_P (control))
+ add_cfi_insn = insn;
+
+ for (i = 1; i < n; ++i)
+ {
+ elt = XVECEXP (pat, 0, i);
+ scan_insn_after (elt);
+ }
+
+ /* Make sure any register saves are visible at the jump target. */
+ dwarf2out_flush_queued_reg_saves ();
+ any_cfis_emitted = false;
+
+ /* However, if there is some adjustment on the call itself, e.g.
+ a call_pop, that action should be considered to happen after
+ the call returns. */
add_cfi_insn = insn;
+ scan_insn_after (control);
}
else
{
- if (GET_CODE (pat) == SEQUENCE)
+ /* Flush data before calls and jumps, and of course if necessary. */
+ if (can_throw_internal (insn))
{
- int i, n = XVECLEN (pat, 0);
- for (i = 1; i < n; ++i)
- dwarf2out_frame_debug (XVECEXP (pat, 0, i), false);
+ notice_eh_throw (insn);
+ dwarf2out_flush_queued_reg_saves ();
}
-
- if (CALL_P (insn))
- dwarf2out_frame_debug (insn, false);
- else if (find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL)
- || (cfun->can_throw_non_call_exceptions
- && can_throw_internal (insn)))
+ else if (!NONJUMP_INSN_P (insn)
+ || clobbers_queued_reg_save (insn)
+ || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
dwarf2out_flush_queued_reg_saves ();
+ any_cfis_emitted = false;
- /* Do not separate tablejump insns from their ADDR_DIFF_VEC.
- Putting the note after the VEC should be ok. */
- if (!tablejump_p (insn, NULL, &add_cfi_insn))
- add_cfi_insn = insn;
-
- dwarf2out_frame_debug (insn, true);
+ add_cfi_insn = insn;
+ scan_insn_after (insn);
+ control = insn;
}
+ /* Between frame-related-p and args_size we might have otherwise
+ emitted two cfa adjustments. Do it now. */
+ def_cfa_1 (&this_cfa);
+
+ /* Minimize the number of advances by emitting the entire queue
+ once anything is emitted. */
+ if (any_cfis_emitted
+ || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
+ dwarf2out_flush_queued_reg_saves ();
+
/* Note that a test for control_flow_insn_p does exactly the
same tests as are done to actually create the edges. So
always call the routine and let it not create edges for
non-control-flow insns. */
- create_trace_edges (insn);
+ create_trace_edges (control);
}
add_cfi_insn = NULL;
cur_row = NULL;
cur_trace = NULL;
+ cur_cfa = NULL;
}
/* Scan the function and create the initial set of CFI notes. */
VEC_free (dw_trace_info_ref, heap, trace_work_list);
}
+/* Return the insn before the first NOTE_INSN_CFI after START. */
+
+static rtx
+before_next_cfi_note (rtx start)
+{
+ rtx prev = start;
+ while (start)
+ {
+ if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
+ return prev;
+ prev = start;
+ start = NEXT_INSN (start);
+ }
+ gcc_unreachable ();
+}
+
/* Insert CFI notes between traces to properly change state between them. */
-/* ??? TODO: Make use of remember/restore_state. */
static void
connect_traces (void)
unsigned i, n = VEC_length (dw_trace_info, trace_info);
dw_trace_info *prev_ti, *ti;
- prev_ti = VEC_index (dw_trace_info, trace_info, 0);
+ /* ??? Ideally, we should have both queued and processed every trace.
+ However the current representation of constant pools on various targets
+ is indistinguishable from unreachable code. Assume for the moment that
+ we can simply skip over such traces. */
+ /* ??? Consider creating a DATA_INSN rtx code to indicate that
+ these are not "real" instructions, and should not be considered.
+ This could be generically useful for tablejump data as well. */
+ /* Remove all unprocessed traces from the list. */
+ for (i = n - 1; i > 0; --i)
+ {
+ ti = VEC_index (dw_trace_info, trace_info, i);
+ if (ti->beg_row == NULL)
+ {
+ VEC_ordered_remove (dw_trace_info, trace_info, i);
+ n -= 1;
+ }
+ else
+ gcc_assert (ti->end_row != NULL);
+ }
- for (i = 1; i < n; ++i, prev_ti = ti)
+ /* Work from the end back to the beginning. This lets us easily insert
+ remember/restore_state notes in the correct order wrt other notes. */
+ prev_ti = VEC_index (dw_trace_info, trace_info, n - 1);
+ for (i = n - 1; i > 0; --i)
{
dw_cfi_row *old_row;
- ti = VEC_index (dw_trace_info, trace_info, i);
+ ti = prev_ti;
+ prev_ti = VEC_index (dw_trace_info, trace_info, i - 1);
- /* We must have both queued and processed every trace. */
- gcc_assert (ti->beg_row && ti->end_row);
+ add_cfi_insn = ti->head;
/* In dwarf2out_switch_text_section, we'll begin a new FDE
for the portion of the function in the alternate text
if (ti->switch_sections)
old_row = cie_cfi_row;
else
- old_row = prev_ti->end_row;
+ {
+ old_row = prev_ti->end_row;
+ /* If there's no change from the previous end state, fine. */
+ if (cfi_row_equal_p (old_row, ti->beg_row))
+ ;
+ /* Otherwise check for the common case of sharing state with
+ the beginning of an epilogue, but not the end. Insert
+ remember/restore opcodes in that case. */
+ else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
+ {
+ dw_cfi_ref cfi;
+
+ /* Note that if we blindly insert the remember at the
+ start of the trace, we can wind up increasing the
+ size of the unwind info due to extra advance opcodes.
+ Instead, put the remember immediately before the next
+ state change. We know there must be one, because the
+ state at the beginning and head of the trace differ. */
+ add_cfi_insn = before_next_cfi_note (prev_ti->head);
+ cfi = new_cfi ();
+ cfi->dw_cfi_opc = DW_CFA_remember_state;
+ add_cfi (cfi);
+
+ add_cfi_insn = ti->head;
+ cfi = new_cfi ();
+ cfi->dw_cfi_opc = DW_CFA_restore_state;
+ add_cfi (cfi);
+
+ old_row = prev_ti->beg_row;
+ }
+ /* Otherwise, we'll simply change state from the previous end. */
+ }
- add_cfi_insn = ti->head;
change_cfi_row (old_row, ti->beg_row);
if (dump_file && add_cfi_insn != ti->head)
{
rtx note;
- fprintf (dump_file, "Fixup between trace %u and %u:\n", i - 1, i);
+ fprintf (dump_file, "Fixup between trace %u and %u:\n",
+ prev_ti->id, ti->id);
note = ti->head;
do
while (note != add_cfi_insn);
}
}
+
+ /* Connect args_size between traces that have can_throw_internal insns. */
+ if (cfun->eh->lp_array != NULL)
+ {
+ HOST_WIDE_INT prev_args_size = 0;
+
+ for (i = 0; i < n; ++i)
+ {
+ ti = VEC_index (dw_trace_info, trace_info, i);
+
+ if (ti->switch_sections)
+ prev_args_size = 0;
+ if (ti->eh_head == NULL)
+ continue;
+ gcc_assert (!ti->args_size_undefined);
+
+ if (ti->beg_delay_args_size != prev_args_size)
+ {
+ /* ??? Search back to previous CFI note. */
+ add_cfi_insn = PREV_INSN (ti->eh_head);
+ add_cfi_args_size (ti->beg_delay_args_size);
+ }
+
+ prev_args_size = ti->end_delay_args_size;
+ }
+ }
}
/* Set up the pseudo-cfg of instruction traces, as described at the
memset (ti, 0, sizeof (*ti));
ti->head = insn;
ti->switch_sections = switch_sections;
+ ti->id = VEC_length (dw_trace_info, trace_info) - 1;
saw_barrier = false;
switch_sections = false;
if (dwarf2out_do_cfi_asm ())
output_cfi_directive (asm_out_file, cfi);
}
+
+static void
+dump_cfi_row (FILE *f, dw_cfi_row *row)
+{
+ dw_cfi_ref cfi;
+ unsigned i;
+
+ cfi = row->cfa_cfi;
+ if (!cfi)
+ {
+ dw_cfa_location dummy;
+ memset(&dummy, 0, sizeof(dummy));
+ dummy.reg = INVALID_REGNUM;
+ cfi = def_cfa_0 (&dummy, &row->cfa);
+ }
+ output_cfi_directive (f, cfi);
+
+ FOR_EACH_VEC_ELT (dw_cfi_ref, row->reg_save, i, cfi)
+ if (cfi)
+ output_cfi_directive (f, cfi);
+}
+
+void debug_cfi_row (dw_cfi_row *row);
+
+void
+debug_cfi_row (dw_cfi_row *row)
+{
+ dump_cfi_row (stderr, row);
+}
\f
/* Save the result of dwarf2out_do_frame across PCH.