+ else
+ {
+ /* Object is now smaller then cache size. */
+ unsigned entry_low_idx = __MF_CACHE_INDEX (low);
+ unsigned entry_high_idx = __MF_CACHE_INDEX (high);
+ if (entry_low_idx <= entry_high_idx)
+ {
+ entry = & __mf_lookup_cache [entry_low_idx];
+ for (i = entry_low_idx; i <= entry_high_idx; i++, entry++)
+ {
+ /* NB: the "||" in the following test permits this code to
+ tolerate the situation introduced by __mf_check over
+ contiguous objects, where a cache entry spans several
+ objects. */
+ if (entry->low == low || entry->high == high)
+ {
+ entry->low = MAXPTR;
+ entry->high = MINPTR;
+ }
+ }
+ }
+ else
+ {
+ /* Object wrapped around the end of the cache. First search
+ from low to end of cache and then from 0 to high. */
+ entry = & __mf_lookup_cache [entry_low_idx];
+ for (i = entry_low_idx; i <= __mf_lc_mask; i++, entry++)
+ {
+ /* NB: the "||" in the following test permits this code to
+ tolerate the situation introduced by __mf_check over
+ contiguous objects, where a cache entry spans several
+ objects. */
+ if (entry->low == low || entry->high == high)
+ {
+ entry->low = MAXPTR;
+ entry->high = MINPTR;
+ }
+ }
+ entry = & __mf_lookup_cache [0];
+ for (i = 0; i <= entry_high_idx; i++, entry++)
+ {
+ /* NB: the "||" in the following test permits this code to
+ tolerate the situation introduced by __mf_check over
+ contiguous objects, where a cache entry spans several
+ objects. */
+ if (entry->low == low || entry->high == high)
+ {
+ entry->low = MAXPTR;
+ entry->high = MINPTR;
+ }
+ }
+ }
+ }