/* Common subexpression elimination library for GNU compiler.
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
- Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
+ 2012 Free Software Foundation, Inc.
This file is part of GCC.
static bool cselib_record_memory;
static bool cselib_preserve_constants;
+static bool cselib_any_perm_equivs;
static int entry_and_rtx_equal_p (const void *, const void *);
static hashval_t get_value_hash (const void *);
static struct elt_list *new_elt_list (struct elt_list *, cselib_val *);
-static struct elt_loc_list *new_elt_loc_list (struct elt_loc_list *, rtx);
+static void new_elt_loc_list (cselib_val *, rtx);
static void unchain_one_value (cselib_val *);
static void unchain_one_elt_list (struct elt_list **);
static void unchain_one_elt_loc_list (struct elt_loc_list **);
that is constant through the whole function and should never be
eliminated. */
static cselib_val *cfa_base_preserved_val;
-static unsigned int cfa_base_preserved_regno;
+static unsigned int cfa_base_preserved_regno = INVALID_REGNUM;
/* Used to list all values that contain memory reference.
May or may not contain the useless values - the list is compacted
return el;
}
-/* Allocate a struct elt_loc_list and fill in its two elements with the
- arguments. */
+/* Allocate a struct elt_loc_list with LOC and prepend it to VAL's loc
+ list. */
-static inline struct elt_loc_list *
-new_elt_loc_list (struct elt_loc_list *next, rtx loc)
+static inline void
+new_elt_loc_list (cselib_val *val, rtx loc)
{
- struct elt_loc_list *el;
- el = (struct elt_loc_list *) pool_alloc (elt_loc_list_pool);
- el->next = next;
- el->loc = loc;
- el->setting_insn = cselib_current_insn;
- gcc_assert (!next || !next->setting_insn
- || !DEBUG_INSN_P (next->setting_insn));
+ struct elt_loc_list *el, *next = val->locs;
+
+ gcc_checking_assert (!next || !next->setting_insn
+ || !DEBUG_INSN_P (next->setting_insn)
+ || cselib_current_insn == next->setting_insn);
/* If we're creating the first loc in a debug insn context, we've
just created a debug value. Count it. */
if (!next && cselib_current_insn && DEBUG_INSN_P (cselib_current_insn))
n_debug_values++;
- return el;
+ val = canonical_cselib_val (val);
+ next = val->locs;
+
+ if (GET_CODE (loc) == VALUE)
+ {
+ loc = canonical_cselib_val (CSELIB_VAL_PTR (loc))->val_rtx;
+
+ gcc_checking_assert (PRESERVED_VALUE_P (loc)
+ == PRESERVED_VALUE_P (val->val_rtx));
+
+ if (val->val_rtx == loc)
+ return;
+ else if (val->uid > CSELIB_VAL_PTR (loc)->uid)
+ {
+ /* Reverse the insertion. */
+ new_elt_loc_list (CSELIB_VAL_PTR (loc), val->val_rtx);
+ return;
+ }
+
+ gcc_checking_assert (val->uid < CSELIB_VAL_PTR (loc)->uid);
+
+ if (CSELIB_VAL_PTR (loc)->locs)
+ {
+ /* Bring all locs from LOC to VAL. */
+ for (el = CSELIB_VAL_PTR (loc)->locs; el->next; el = el->next)
+ {
+ /* Adjust values that have LOC as canonical so that VAL
+ becomes their canonical. */
+ if (el->loc && GET_CODE (el->loc) == VALUE)
+ {
+ gcc_checking_assert (CSELIB_VAL_PTR (el->loc)->locs->loc
+ == loc);
+ CSELIB_VAL_PTR (el->loc)->locs->loc = val->val_rtx;
+ }
+ }
+ el->next = val->locs;
+ next = val->locs = CSELIB_VAL_PTR (loc)->locs;
+ }
+
+ if (CSELIB_VAL_PTR (loc)->addr_list)
+ {
+ /* Bring in addr_list into canonical node. */
+ struct elt_list *last = CSELIB_VAL_PTR (loc)->addr_list;
+ while (last->next)
+ last = last->next;
+ last->next = val->addr_list;
+ val->addr_list = CSELIB_VAL_PTR (loc)->addr_list;
+ CSELIB_VAL_PTR (loc)->addr_list = NULL;
+ }
+
+ if (CSELIB_VAL_PTR (loc)->next_containing_mem != NULL
+ && val->next_containing_mem == NULL)
+ {
+ /* Add VAL to the containing_mem list after LOC. LOC will
+ be removed when we notice it doesn't contain any
+ MEMs. */
+ val->next_containing_mem = CSELIB_VAL_PTR (loc)->next_containing_mem;
+ CSELIB_VAL_PTR (loc)->next_containing_mem = val;
+ }
+
+ /* Chain LOC back to VAL. */
+ el = (struct elt_loc_list *) pool_alloc (elt_loc_list_pool);
+ el->loc = val->val_rtx;
+ el->setting_insn = cselib_current_insn;
+ el->next = NULL;
+ CSELIB_VAL_PTR (loc)->locs = el;
+ }
+
+ el = (struct elt_loc_list *) pool_alloc (elt_loc_list_pool);
+ el->loc = loc;
+ el->setting_insn = cselib_current_insn;
+ el->next = next;
+ val->locs = el;
}
/* Promote loc L to a nondebug cselib_current_insn if L is marked as
{
n_debug_values--;
l->setting_insn = cselib_current_insn;
- gcc_assert (!l->next);
+ if (cselib_preserve_constants && l->next)
+ {
+ gcc_assert (l->next->setting_insn
+ && DEBUG_INSN_P (l->next->setting_insn)
+ && !l->next->next);
+ l->next->setting_insn = cselib_current_insn;
+ }
+ else
+ gcc_assert (!l->next);
}
}
cselib_reset_table (1);
}
-/* Remove from hash table all VALUEs except constants. */
+/* Return TRUE if V is a constant, a function invariant or a VALUE
+ equivalence; FALSE otherwise. */
-static int
-preserve_only_constants (void **x, void *info ATTRIBUTE_UNUSED)
+static bool
+invariant_or_equiv_p (cselib_val *v)
{
- cselib_val *v = (cselib_val *)*x;
+ struct elt_loc_list *l;
+
+ if (v == cfa_base_preserved_val)
+ return true;
+
+ /* Keep VALUE equivalences around. */
+ for (l = v->locs; l; l = l->next)
+ if (GET_CODE (l->loc) == VALUE)
+ return true;
if (v->locs != NULL
&& v->locs->next == NULL)
if (CONSTANT_P (v->locs->loc)
&& (GET_CODE (v->locs->loc) != CONST
|| !references_value_p (v->locs->loc, 0)))
- return 1;
- if (cfa_base_preserved_val)
- {
- if (v == cfa_base_preserved_val)
- return 1;
- if (GET_CODE (v->locs->loc) == PLUS
- && CONST_INT_P (XEXP (v->locs->loc, 1))
- && XEXP (v->locs->loc, 0) == cfa_base_preserved_val->val_rtx)
- return 1;
- }
+ return true;
+ /* Although a debug expr may be bound to different expressions,
+ we can preserve it as if it was constant, to get unification
+ and proper merging within var-tracking. */
+ if (GET_CODE (v->locs->loc) == DEBUG_EXPR
+ || GET_CODE (v->locs->loc) == DEBUG_IMPLICIT_PTR
+ || GET_CODE (v->locs->loc) == ENTRY_VALUE
+ || GET_CODE (v->locs->loc) == DEBUG_PARAMETER_REF)
+ return true;
+
+ /* (plus (value V) (const_int C)) is invariant iff V is invariant. */
+ if (GET_CODE (v->locs->loc) == PLUS
+ && CONST_INT_P (XEXP (v->locs->loc, 1))
+ && GET_CODE (XEXP (v->locs->loc, 0)) == VALUE
+ && invariant_or_equiv_p (CSELIB_VAL_PTR (XEXP (v->locs->loc, 0))))
+ return true;
}
- htab_clear_slot (cselib_hash_table, x);
+ return false;
+}
+
+/* Remove from hash table all VALUEs except constants, function
+ invariants and VALUE equivalences. */
+
+static int
+preserve_constants_and_equivs (void **x, void *info ATTRIBUTE_UNUSED)
+{
+ cselib_val *v = (cselib_val *)*x;
+
+ if (!invariant_or_equiv_p (v))
+ htab_clear_slot (cselib_hash_table, x);
return 1;
}
}
if (cselib_preserve_constants)
- htab_traverse (cselib_hash_table, preserve_only_constants, NULL);
+ htab_traverse (cselib_hash_table, preserve_constants_and_equivs, NULL);
else
- htab_empty (cselib_hash_table);
+ {
+ htab_empty (cselib_hash_table);
+ gcc_checking_assert (!cselib_any_perm_equivs);
+ }
n_useless_values = 0;
n_useless_debug_values = 0;
int i, j;
if (GET_CODE (x) == VALUE
- && (! only_useless || CSELIB_VAL_PTR (x)->locs == 0))
+ && (! only_useless ||
+ (CSELIB_VAL_PTR (x)->locs == 0 && !PRESERVED_VALUE_P (x))))
return 1;
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
p = &first_containing_mem;
for (v = *p; v != &dummy_val; v = v->next_containing_mem)
- if (v->locs)
+ if (v->locs && v == canonical_cselib_val (v))
{
*p = v;
p = &(*p)->next_containing_mem;
if (x == y)
return 1;
- if (GET_CODE (x) == VALUE && GET_CODE (y) == VALUE)
- return CSELIB_VAL_PTR (x) == CSELIB_VAL_PTR (y);
-
if (GET_CODE (x) == VALUE)
{
- cselib_val *e = CSELIB_VAL_PTR (x);
+ cselib_val *e = canonical_cselib_val (CSELIB_VAL_PTR (x));
struct elt_loc_list *l;
+ if (GET_CODE (y) == VALUE)
+ return e == canonical_cselib_val (CSELIB_VAL_PTR (y));
+
for (l = e->locs; l; l = l->next)
{
rtx t = l->loc;
- /* Avoid infinite recursion. */
- if (REG_P (t) || MEM_P (t))
+ /* Avoid infinite recursion. We know we have the canonical
+ value, so we can just skip any values in the equivalence
+ list. */
+ if (REG_P (t) || MEM_P (t) || GET_CODE (t) == VALUE)
continue;
else if (rtx_equal_for_cselib_1 (t, y, memmode))
return 1;
return 0;
}
-
- if (GET_CODE (y) == VALUE)
+ else if (GET_CODE (y) == VALUE)
{
- cselib_val *e = CSELIB_VAL_PTR (y);
+ cselib_val *e = canonical_cselib_val (CSELIB_VAL_PTR (y));
struct elt_loc_list *l;
for (l = e->locs; l; l = l->next)
{
rtx t = l->loc;
- if (REG_P (t) || MEM_P (t))
+ if (REG_P (t) || MEM_P (t) || GET_CODE (t) == VALUE)
continue;
else if (rtx_equal_for_cselib_1 (x, t, memmode))
return 1;
return DEBUG_IMPLICIT_PTR_DECL (x)
== DEBUG_IMPLICIT_PTR_DECL (y);
+ case DEBUG_PARAMETER_REF:
+ return DEBUG_PARAMETER_REF_DECL (x)
+ == DEBUG_PARAMETER_REF_DECL (y);
+
+ case ENTRY_VALUE:
+ /* ENTRY_VALUEs are function invariant, it is thus undesirable to
+ use rtx_equal_for_cselib_1 to compare the operands. */
+ return rtx_equal_p (ENTRY_VALUE_EXP (x), ENTRY_VALUE_EXP (y));
+
case LABEL_REF:
return XEXP (x, 0) == XEXP (y, 0);
switch (code)
{
+ case VALUE:
+ e = CSELIB_VAL_PTR (x);
+ return e->hash;
+
case MEM:
case REG:
e = cselib_lookup (x, GET_MODE (x), create, memmode);
+ DECL_UID (DEBUG_IMPLICIT_PTR_DECL (x));
return hash ? hash : (unsigned int) DEBUG_IMPLICIT_PTR;
+ case DEBUG_PARAMETER_REF:
+ hash += ((unsigned) DEBUG_PARAMETER_REF << 7)
+ + DECL_UID (DEBUG_PARAMETER_REF_DECL (x));
+ return hash ? hash : (unsigned int) DEBUG_PARAMETER_REF;
+
+ case ENTRY_VALUE:
+ /* ENTRY_VALUEs are function invariant, thus try to avoid
+ recursing on argument if ENTRY_VALUE is one of the
+ forms emitted by expand_debug_expr, otherwise
+ ENTRY_VALUE hash would depend on the current value
+ in some register or memory. */
+ if (REG_P (ENTRY_VALUE_EXP (x)))
+ hash += (unsigned int) REG
+ + (unsigned int) GET_MODE (ENTRY_VALUE_EXP (x))
+ + (unsigned int) REGNO (ENTRY_VALUE_EXP (x));
+ else if (MEM_P (ENTRY_VALUE_EXP (x))
+ && REG_P (XEXP (ENTRY_VALUE_EXP (x), 0)))
+ hash += (unsigned int) MEM
+ + (unsigned int) GET_MODE (XEXP (ENTRY_VALUE_EXP (x), 0))
+ + (unsigned int) REGNO (XEXP (ENTRY_VALUE_EXP (x), 0));
+ else
+ hash += cselib_hash_rtx (ENTRY_VALUE_EXP (x), create, memmode);
+ return hash ? hash : (unsigned int) ENTRY_VALUE;
+
case CONST_INT:
hash += ((unsigned) CONST_INT << 7) + INTVAL (x);
return hash ? hash : (unsigned int) CONST_INT;
{
struct elt_loc_list *l;
+ addr_elt = canonical_cselib_val (addr_elt);
+ mem_elt = canonical_cselib_val (mem_elt);
+
/* Avoid duplicates. */
for (l = mem_elt->locs; l; l = l->next)
if (MEM_P (l->loc)
}
addr_elt->addr_list = new_elt_list (addr_elt->addr_list, mem_elt);
- mem_elt->locs
- = new_elt_loc_list (mem_elt->locs,
- replace_equiv_address_nv (x, addr_elt->val_rtx));
+ new_elt_loc_list (mem_elt,
+ replace_equiv_address_nv (x, addr_elt->val_rtx));
if (mem_elt->next_containing_mem == NULL)
{
mem_elt->next_containing_mem = first_containing_mem;
if (! addr)
return 0;
+ addr = canonical_cselib_val (addr);
/* Find a value that describes a value of our mode at that address. */
for (l = addr->addr_list; l; l = l->next)
if (GET_MODE (l->elt->val_rtx) == mode)
unsigned int regno = UINT_MAX;
struct elt_loc_list *p_in = p;
- for (; p; p = p -> next)
+ for (; p; p = p->next)
{
+ /* Return these right away to avoid returning stack pointer based
+ expressions for frame pointer and vice versa, which is something
+ that would confuse DSE. See the comment in cselib_expand_value_rtx_1
+ for more details. */
+ if (REG_P (p->loc)
+ && (REGNO (p->loc) == STACK_POINTER_REGNUM
+ || REGNO (p->loc) == FRAME_POINTER_REGNUM
+ || REGNO (p->loc) == HARD_FRAME_POINTER_REGNUM
+ || REGNO (p->loc) == cfa_base_preserved_regno))
+ return p->loc;
/* Avoid infinite recursion trying to expand a reg into a
the same reg. */
if ((REG_P (p->loc))
if (GET_MODE (l->elt->val_rtx) == GET_MODE (orig))
{
rtx result;
- int regno = REGNO (orig);
+ unsigned regno = REGNO (orig);
/* The only thing that we are not willing to do (this
is requirement of dse and if others potential uses
make the frame assumptions. */
if (regno == STACK_POINTER_REGNUM
|| regno == FRAME_POINTER_REGNUM
- || regno == HARD_FRAME_POINTER_REGNUM)
+ || regno == HARD_FRAME_POINTER_REGNUM
+ || regno == cfa_base_preserved_regno)
return orig;
bitmap_set_bit (evd->regs_active, regno);
}
return e->val_rtx;
+ case ENTRY_VALUE:
+ e = cselib_lookup (x, GET_MODE (x), 0, memmode);
+ if (! e)
+ break;
+ return e->val_rtx;
+
case CONST_DOUBLE:
case CONST_VECTOR:
case CONST_INT:
return copy;
}
+/* Wrapper for cselib_subst_to_values, that indicates X is in INSN. */
+
+rtx
+cselib_subst_to_values_from_insn (rtx x, enum machine_mode memmode, rtx insn)
+{
+ rtx ret;
+ gcc_assert (!cselib_current_insn);
+ cselib_current_insn = insn;
+ ret = cselib_subst_to_values (x, memmode);
+ cselib_current_insn = NULL;
+ return ret;
+}
+
/* Look up the rtl expression X in our tables and return the value it
has. If CREATE is zero, we return NULL if we don't know the value.
Otherwise, we create a new one if possible, using mode MODE if X
}
e = new_cselib_val (next_uid, GET_MODE (x), x);
- e->locs = new_elt_loc_list (e->locs, x);
+ new_elt_loc_list (e, x);
if (REG_VALUES (i) == 0)
{
/* Maintain the invariant that the first entry of
used_regs[n_used_regs++] = i;
REG_VALUES (i) = new_elt_list (REG_VALUES (i), NULL);
}
+ else if (cselib_preserve_constants
+ && GET_MODE_CLASS (mode) == MODE_INT)
+ {
+ /* During var-tracking, try harder to find equivalences
+ for SUBREGs. If a setter sets say a DImode register
+ and user uses that register only in SImode, add a lowpart
+ subreg location. */
+ struct elt_list *lwider = NULL;
+ l = REG_VALUES (i);
+ if (l && l->elt == NULL)
+ l = l->next;
+ for (; l; l = l->next)
+ if (GET_MODE_CLASS (GET_MODE (l->elt->val_rtx)) == MODE_INT
+ && GET_MODE_SIZE (GET_MODE (l->elt->val_rtx))
+ > GET_MODE_SIZE (mode)
+ && (lwider == NULL
+ || GET_MODE_SIZE (GET_MODE (l->elt->val_rtx))
+ < GET_MODE_SIZE (GET_MODE (lwider->elt->val_rtx))))
+ {
+ struct elt_loc_list *el;
+ if (i < FIRST_PSEUDO_REGISTER
+ && hard_regno_nregs[i][GET_MODE (l->elt->val_rtx)] != 1)
+ continue;
+ for (el = l->elt->locs; el; el = el->next)
+ if (!REG_P (el->loc))
+ break;
+ if (el)
+ lwider = l;
+ }
+ if (lwider)
+ {
+ rtx sub = lowpart_subreg (mode, lwider->elt->val_rtx,
+ GET_MODE (lwider->elt->val_rtx));
+ if (sub)
+ new_elt_loc_list (e, sub);
+ }
+ }
REG_VALUES (i)->next = new_elt_list (REG_VALUES (i)->next, e);
slot = cselib_find_slot (x, e->hash, INSERT, memmode);
*slot = e;
the hash table is inconsistent until we do so, and
cselib_subst_to_values will need to do lookups. */
*slot = (void *) e;
- e->locs = new_elt_loc_list (e->locs,
- cselib_subst_to_values (x, memmode));
+ new_elt_loc_list (e, cselib_subst_to_values (x, memmode));
return e;
}
else
unchain_one_elt_list (l);
+ v = canonical_cselib_val (v);
+
had_locs = v->locs != NULL;
setting_insn = v->locs ? v->locs->setting_insn : NULL;
}
}
\f
-/* Return 1 if X has a value that can vary even between two
- executions of the program. 0 means X can be compared reliably
- against certain constants or near-constants. */
-
-static bool
-cselib_rtx_varies_p (const_rtx x ATTRIBUTE_UNUSED, bool from_alias ATTRIBUTE_UNUSED)
-{
- /* We actually don't need to verify very hard. This is because
- if X has actually changed, we invalidate the memory anyway,
- so assume that all common memory addresses are
- invariant. */
- return 0;
-}
-
/* Invalidate any locations in the table which are changed because of a
store to MEM_RTX. If this is called because of a non-const call
instruction, MEM_RTX is (mem:BLK const0_rtx). */
continue;
}
if (num_mems < PARAM_VALUE (PARAM_MAX_CSELIB_MEMORY_LOCATIONS)
- && ! canon_true_dependence (mem_rtx, GET_MODE (mem_rtx), mem_addr,
- x, NULL_RTX, cselib_rtx_varies_p))
+ && ! canon_true_dependence (mem_rtx, GET_MODE (mem_rtx),
+ mem_addr, x, NULL_RTX))
{
has_mem = true;
num_mems++;
/* We must have a mapping from this MEM's address to the
value (E). Remove that, too. */
addr = cselib_lookup (XEXP (x, 0), VOIDmode, 0, GET_MODE (x));
+ addr = canonical_cselib_val (addr);
+ gcc_checking_assert (v == canonical_cselib_val (v));
mem_chain = &addr->addr_list;
for (;;)
{
- if ((*mem_chain)->elt == v)
+ cselib_val *canon = canonical_cselib_val ((*mem_chain)->elt);
+
+ if (canon == v)
{
unchain_one_elt_list (mem_chain);
break;
}
+ /* Record canonicalized elt. */
+ (*mem_chain)->elt = canon;
+
mem_chain = &(*mem_chain)->next;
}
if (src_elt->locs == 0 && !PRESERVED_VALUE_P (src_elt->val_rtx))
n_useless_values--;
- src_elt->locs = new_elt_loc_list (src_elt->locs, dest);
+ new_elt_loc_list (src_elt, dest);
}
else if (MEM_P (dest) && dest_addr_elt != 0
&& cselib_record_memory)
}
}
+/* Make ELT and X's VALUE equivalent to each other at INSN. */
+
+void
+cselib_add_permanent_equiv (cselib_val *elt, rtx x, rtx insn)
+{
+ cselib_val *nelt;
+ rtx save_cselib_current_insn = cselib_current_insn;
+
+ gcc_checking_assert (elt);
+ gcc_checking_assert (PRESERVED_VALUE_P (elt->val_rtx));
+ gcc_checking_assert (!side_effects_p (x));
+
+ cselib_current_insn = insn;
+
+ nelt = cselib_lookup (x, GET_MODE (elt->val_rtx), 1, VOIDmode);
+
+ if (nelt != elt)
+ {
+ cselib_any_perm_equivs = true;
+
+ if (!PRESERVED_VALUE_P (nelt->val_rtx))
+ cselib_preserve_value (nelt);
+
+ new_elt_loc_list (nelt, elt->val_rtx);
+ }
+
+ cselib_current_insn = save_cselib_current_insn;
+}
+
+/* Return TRUE if any permanent equivalences have been recorded since
+ the table was last initialized. */
+bool
+cselib_have_permanent_equivalences (void)
+{
+ return cselib_any_perm_equivs;
+}
+
/* There is no good way to determine how many elements there can be
in a PARALLEL. Since it's fairly cheap, use a really large number. */
#define MAX_SETS (FIRST_PSEUDO_REGISTER * 2)
value_pool = create_alloc_pool ("value", RTX_CODE_SIZE (VALUE), 100);
cselib_record_memory = record_what & CSELIB_RECORD_MEMORY;
cselib_preserve_constants = record_what & CSELIB_PRESERVE_CONSTANTS;
+ cselib_any_perm_equivs = false;
/* (mem:BLK (scratch)) is a special mechanism to conflict with everything,
see canon_true_dependence. This is only created once. */
if (!reg_values || reg_values_size < cselib_nregs
|| (reg_values_size > 10 && reg_values_size > cselib_nregs * 4))
{
- if (reg_values)
- free (reg_values);
+ free (reg_values);
/* Some space for newly emit instructions so we don't end up
reallocating in between passes. */
reg_values_size = cselib_nregs + (63 + cselib_nregs) / 16;
{
cselib_discard_hook = NULL;
cselib_preserve_constants = false;
+ cselib_any_perm_equivs = false;
cfa_base_preserved_val = NULL;
cfa_base_preserved_regno = INVALID_REGNUM;
free_alloc_pool (elt_list_pool);
fputs (" locs:", out);
do
{
- fprintf (out, "\n from insn %i ",
- INSN_UID (l->setting_insn));
+ if (l->setting_insn)
+ fprintf (out, "\n from insn %i ",
+ INSN_UID (l->setting_insn));
+ else
+ fprintf (out, "\n ");
print_inline_rtx (out, l->loc, 4);
}
while ((l = l->next));