OSDN Git Service

Fix problem found by CFLAGS=-fmudflap build.
[pf3gnuchains/gcc-fork.git] / boehm-gc / allchblk.c
index 7a5a3a1..f9c31e0 100644 (file)
@@ -1,7 +1,8 @@
 /* 
  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1998 by Silicon Graphics.  All rights reserved.
+ * Copyright (c) 1998-1999 by Silicon Graphics.  All rights reserved.
+ * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
  *
  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
  * provided the above notices are retained, and a notice that the code was
  * modified is included with the above copyright notice.
  */
-/* Boehm, August 9, 1995 5:08 pm PDT */
 
-#define DEBUG
-#undef DEBUG
+/* #define DEBUG */
 #include <stdio.h>
-#include "gc_priv.h"
+#include "private/gc_priv.h"
 
+GC_bool GC_use_entire_heap = 0;
 
 /*
- * allocate/free routines for heap blocks
- * Note that everything called from outside the garbage collector
- * should be prepared to abort at any point as the result of a signal.
+ * Free heap blocks are kept on one of several free lists,
+ * depending on the size of the block.  Each free list is doubly linked.
+ * Adjacent free blocks are coalesced.
  */
 
-/*
- * Free heap blocks are kept on a list sorted by address.
- * The hb_hdr.hbh_sz field of a free heap block contains the length
- * (in bytes) of the entire block.
- * Neighbors are coalesced.
- */
  
 # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
                /* largest block we will allocate starting on a black   */
                /* listed block.  Must be >= HBLKSIZE.                  */
 
-struct hblk * GC_hblkfreelist = 0;
 
-struct hblk *GC_savhbp = (struct hblk *)0;  /* heap block preceding next */
-                                        /* block to be examined by   */
-                                        /* GC_allochblk.                */
+# define UNIQUE_THRESHOLD 32
+       /* Sizes up to this many HBLKs each have their own free list    */
+# define HUGE_THRESHOLD 256
+       /* Sizes of at least this many heap blocks are mapped to a      */
+       /* single free list.                                            */
+# define FL_COMPRESSION 8
+       /* In between sizes map this many distinct sizes to a single    */
+       /* bin.                                                         */
+
+# define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
+                                + UNIQUE_THRESHOLD
+
+struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
+
+#ifndef USE_MUNMAP
+
+  word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
+       /* Number of free bytes on each list.   */
+
+  /* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS      */
+  /* > GC_max_large_allocd_bytes?                                      */
+# ifdef __GNUC__
+  __inline__
+# endif
+  static GC_bool GC_enough_large_bytes_left(bytes,n)
+  word bytes;
+  int n;
+  {
+    int i;
+    for (i = N_HBLK_FLS; i >= n; --i) {
+       bytes += GC_free_bytes[i];
+       if (bytes > GC_max_large_allocd_bytes) return TRUE;
+    }
+    return FALSE;
+  }
+
+# define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
+
+# define FREE_ASSERT(e) GC_ASSERT(e)
+
+#else /* USE_MUNMAP */
+
+# define INCR_FREE_BYTES(n, b)
+# define FREE_ASSERT(e)
+
+#endif /* USE_MUNMAP */
+
+/* Map a number of blocks to the appropriate large block free list index. */
+int GC_hblk_fl_from_blocks(blocks_needed)
+word blocks_needed;
+{
+    if (blocks_needed <= UNIQUE_THRESHOLD) return blocks_needed;
+    if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
+    return (blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
+                                       + UNIQUE_THRESHOLD;
+    
+}
+
+# define PHDR(hhdr) HDR(hhdr -> hb_prev)
+# define NHDR(hhdr) HDR(hhdr -> hb_next)
+
+# ifdef USE_MUNMAP
+#   define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
+# else  /* !USE_MMAP */
+#   define IS_MAPPED(hhdr) 1
+# endif /* USE_MUNMAP */
 
 # if !defined(NO_DEBUGGING)
 void GC_print_hblkfreelist()
 {
-    struct hblk * h = GC_hblkfreelist;
+    struct hblk * h;
     word total_free = 0;
-    hdr * hhdr = HDR(h);
+    hdr * hhdr;
     word sz;
+    int i;
     
-    while (h != 0) {
+    for (i = 0; i <= N_HBLK_FLS; ++i) {
+      h = GC_hblkfreelist[i];
+#     ifdef USE_MUNMAP
+        if (0 != h) GC_printf1("Free list %ld:\n",
+                              (unsigned long)i);
+#     else
+        if (0 != h) GC_printf2("Free list %ld (Total size %ld):\n",
+                              (unsigned long)i,
+                              (unsigned long)GC_free_bytes[i]);
+#     endif
+      while (h != 0) {
+        hhdr = HDR(h);
         sz = hhdr -> hb_sz;
-       GC_printf2("0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
+       GC_printf2("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
        total_free += sz;
         if (GC_is_black_listed(h, HBLKSIZE) != 0) {
              GC_printf0("start black listed\n");
@@ -63,11 +131,92 @@ void GC_print_hblkfreelist()
              GC_printf0("not black listed\n");
         }
         h = hhdr -> hb_next;
-        hhdr = HDR(h);
+      }
     }
+#   ifndef USE_MUNMAP
+      if (total_free != GC_large_free_bytes) {
+       GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
+                  (unsigned long) GC_large_free_bytes);
+      }
+#   endif
     GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
 }
 
+/* Return the free list index on which the block described by the header */
+/* appears, or -1 if it appears nowhere.                                */
+int free_list_index_of(wanted)
+hdr * wanted;
+{
+    struct hblk * h;
+    hdr * hhdr;
+    int i;
+    
+    for (i = 0; i <= N_HBLK_FLS; ++i) {
+      h = GC_hblkfreelist[i];
+      while (h != 0) {
+        hhdr = HDR(h);
+       if (hhdr == wanted) return i;
+        h = hhdr -> hb_next;
+      }
+    }
+    return -1;
+}
+
+void GC_dump_regions()
+{
+    unsigned i;
+    ptr_t start, end;
+    ptr_t p;
+    size_t bytes;
+    hdr *hhdr;
+    for (i = 0; i < GC_n_heap_sects; ++i) {
+       start = GC_heap_sects[i].hs_start;
+       bytes = GC_heap_sects[i].hs_bytes;
+       end = start + bytes;
+       /* Merge in contiguous sections.        */
+         while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
+           ++i;
+           end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
+         }
+       GC_printf2("***Section from 0x%lx to 0x%lx\n", start, end);
+       for (p = start; p < end;) {
+           hhdr = HDR(p);
+           GC_printf1("\t0x%lx ", (unsigned long)p);
+           if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+               GC_printf1("Missing header!!(%ld)\n", hhdr);
+               p += HBLKSIZE;
+               continue;
+           }
+           if (HBLK_IS_FREE(hhdr)) {
+                int correct_index = GC_hblk_fl_from_blocks(
+                                       divHBLKSZ(hhdr -> hb_sz));
+               int actual_index;
+               
+               GC_printf1("\tfree block of size 0x%lx bytes",
+                          (unsigned long)(hhdr -> hb_sz));
+               if (IS_MAPPED(hhdr)) {
+                   GC_printf0("\n");
+               } else {
+                   GC_printf0("(unmapped)\n");
+               }
+               actual_index = free_list_index_of(hhdr);
+               if (-1 == actual_index) {
+                   GC_printf1("\t\tBlock not on free list %ld!!\n",
+                               correct_index);
+               } else if (correct_index != actual_index) {
+                   GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
+                              actual_index, correct_index);
+               }
+               p += hhdr -> hb_sz;
+           } else {
+               GC_printf1("\tused for blocks of size 0x%lx bytes\n",
+                          (unsigned long)WORDS_TO_BYTES(hhdr -> hb_sz));
+               p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
+           }
+       }
+    }
+}
+
 # endif /* NO_DEBUGGING */
 
 /* Initialize hdr for a block containing the indicated size and        */
@@ -100,69 +249,372 @@ unsigned char flags;
     return(TRUE);
 }
 
-#ifdef EXACT_FIRST
-#   define LAST_TRIP 2
-#else
-#   define LAST_TRIP 1
-#endif
+#define FL_UNKNOWN -1
+/*
+ * Remove hhdr from the appropriate free list.
+ * We assume it is on the nth free list, or on the size
+ * appropriate free list if n is FL_UNKNOWN.
+ */
+void GC_remove_from_fl(hhdr, n)
+hdr * hhdr;
+int n;
+{
+    int index;
+
+    GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
+#   ifndef USE_MUNMAP
+      /* We always need index to mainatin free counts. */
+      if (FL_UNKNOWN == n) {
+          index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
+      } else {
+         index = n;
+      }
+#   endif
+    if (hhdr -> hb_prev == 0) {
+#      ifdef USE_MUNMAP
+         if (FL_UNKNOWN == n) {
+            index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
+         } else {
+           index = n;
+         }
+#      endif
+       GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
+       GC_hblkfreelist[index] = hhdr -> hb_next;
+    } else {
+       hdr *phdr;
+       GET_HDR(hhdr -> hb_prev, phdr);
+       phdr -> hb_next = hhdr -> hb_next;
+    }
+    INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz));
+    FREE_ASSERT(GC_free_bytes[index] >= 0);
+    if (0 != hhdr -> hb_next) {
+       hdr * nhdr;
+       GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
+       GET_HDR(hhdr -> hb_next, nhdr);
+       nhdr -> hb_prev = hhdr -> hb_prev;
+    }
+}
+
+/*
+ * Return a pointer to the free block ending just before h, if any.
+ */
+struct hblk * GC_free_block_ending_at(h)
+struct hblk *h;
+{
+    struct hblk * p = h - 1;
+    hdr * phdr;
+
+    GET_HDR(p, phdr);
+    while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
+       p = FORWARDED_ADDR(p,phdr);
+       phdr = HDR(p);
+    }
+    if (0 != phdr) {
+        if(HBLK_IS_FREE(phdr)) {
+           return p;
+       } else {
+           return 0;
+       }
+    }
+    p = GC_prev_block(h - 1);
+    if (0 != p) {
+      phdr = HDR(p);
+      if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
+       return p;
+      }
+    }
+    return 0;
+}
+
+/*
+ * Add hhdr to the appropriate free list.
+ * We maintain individual free lists sorted by address.
+ */
+void GC_add_to_fl(h, hhdr)
+struct hblk *h;
+hdr * hhdr;
+{
+    int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
+    struct hblk *second = GC_hblkfreelist[index];
+    hdr * second_hdr;
+#   ifdef GC_ASSERTIONS
+      struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
+      hdr * nexthdr = HDR(next);
+      struct hblk *prev = GC_free_block_ending_at(h);
+      hdr * prevhdr = HDR(prev);
+      GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
+      GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
+#   endif
+    GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
+    GC_hblkfreelist[index] = h;
+    INCR_FREE_BYTES(index, hhdr -> hb_sz);
+    FREE_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes)
+    hhdr -> hb_next = second;
+    hhdr -> hb_prev = 0;
+    if (0 != second) {
+      GET_HDR(second, second_hdr);
+      second_hdr -> hb_prev = h;
+    }
+    GC_invalidate_map(hhdr);
+}
+
+#ifdef USE_MUNMAP
+
+/* Unmap blocks that haven't been recently touched.  This is the only way */
+/* way blocks are ever unmapped.                                         */
+void GC_unmap_old(void)
+{
+    struct hblk * h;
+    hdr * hhdr;
+    word sz;
+    unsigned short last_rec, threshold;
+    int i;
+#   define UNMAP_THRESHOLD 6
+    
+    for (i = 0; i <= N_HBLK_FLS; ++i) {
+      for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
+        hhdr = HDR(h);
+       if (!IS_MAPPED(hhdr)) continue;
+       threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
+       last_rec = hhdr -> hb_last_reclaimed;
+       if ((last_rec > GC_gc_no || last_rec < threshold)
+           && threshold < GC_gc_no /* not recently wrapped */) {
+          sz = hhdr -> hb_sz;
+         GC_unmap((ptr_t)h, sz);
+         hhdr -> hb_flags |= WAS_UNMAPPED;
+       }
+      }
+    }  
+}
+
+/* Merge all unmapped blocks that are adjacent to other free           */
+/* blocks.  This may involve remapping, since all blocks are either    */
+/* fully mapped or fully unmapped.                                     */
+void GC_merge_unmapped(void)
+{
+    struct hblk * h, *next;
+    hdr * hhdr, *nexthdr;
+    word size, nextsize;
+    int i;
+    
+    for (i = 0; i <= N_HBLK_FLS; ++i) {
+      h = GC_hblkfreelist[i];
+      while (h != 0) {
+       GET_HDR(h, hhdr);
+       size = hhdr->hb_sz;
+       next = (struct hblk *)((word)h + size);
+       GET_HDR(next, nexthdr);
+       /* Coalesce with successor, if possible */
+         if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
+           nextsize = nexthdr -> hb_sz;
+           if (IS_MAPPED(hhdr)) {
+             GC_ASSERT(!IS_MAPPED(nexthdr));
+             /* make both consistent, so that we can merge */
+               if (size > nextsize) {
+                 GC_remap((ptr_t)next, nextsize);
+               } else {
+                 GC_unmap((ptr_t)h, size);
+                 hhdr -> hb_flags |= WAS_UNMAPPED;
+               }
+           } else if (IS_MAPPED(nexthdr)) {
+             GC_ASSERT(!IS_MAPPED(hhdr));
+             if (size > nextsize) {
+               GC_unmap((ptr_t)next, nextsize);
+             } else {
+               GC_remap((ptr_t)h, size);
+               hhdr -> hb_flags &= ~WAS_UNMAPPED;
+               hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
+             }
+           } else {
+             /* Unmap any gap in the middle */
+               GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
+           }
+           /* If they are both unmapped, we merge, but leave unmapped. */
+           GC_remove_from_fl(hhdr, i);
+           GC_remove_from_fl(nexthdr, FL_UNKNOWN);
+           hhdr -> hb_sz += nexthdr -> hb_sz; 
+           GC_remove_header(next);
+           GC_add_to_fl(h, hhdr); 
+           /* Start over at beginning of list */
+           h = GC_hblkfreelist[i];
+         } else /* not mergable with successor */ {
+           h = hhdr -> hb_next;
+         }
+      } /* while (h != 0) ... */
+    } /* for ... */
+}
+
+#endif /* USE_MUNMAP */
+
+/*
+ * Return a pointer to a block starting at h of length bytes.
+ * Memory for the block is mapped.
+ * Remove the block from its free list, and return the remainder (if any)
+ * to its appropriate free list.
+ * May fail by returning 0.
+ * The header for the returned block must be set up by the caller.
+ * If the return value is not 0, then hhdr is the header for it.
+ */
+struct hblk * GC_get_first_part(h, hhdr, bytes, index)
+struct hblk *h;
+hdr * hhdr;
+word bytes;
+int index;
+{
+    word total_size = hhdr -> hb_sz;
+    struct hblk * rest;
+    hdr * rest_hdr;
+
+    GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
+    GC_remove_from_fl(hhdr, index);
+    if (total_size == bytes) return h;
+    rest = (struct hblk *)((word)h + bytes);
+    rest_hdr = GC_install_header(rest);
+    if (0 == rest_hdr) {
+       /* This may be very bad news ... */
+       WARN("Header allocation failed: Dropping block.\n", 0);
+       return(0);
+    }
+    rest_hdr -> hb_sz = total_size - bytes;
+    rest_hdr -> hb_flags = 0;
+#   ifdef GC_ASSERTIONS
+      /* Mark h not free, to avoid assertion about adjacent free blocks. */
+        hhdr -> hb_map = 0;
+#   endif
+    GC_add_to_fl(rest, rest_hdr);
+    return h;
+}
+
+/*
+ * H is a free block.  N points at an address inside it.
+ * A new header for n has already been set up.  Fix up h's header
+ * to reflect the fact that it is being split, move it to the
+ * appropriate free list.
+ * N replaces h in the original free list.
+ *
+ * Nhdr is not completely filled in, since it is about to allocated.
+ * It may in fact end up on the wrong free list for its size.
+ * (Hence adding it to a free list is silly.  But this path is hopefully
+ * rare enough that it doesn't matter.  The code is cleaner this way.)
+ */
+void GC_split_block(h, hhdr, n, nhdr, index)
+struct hblk *h;
+hdr * hhdr;
+struct hblk *n;
+hdr * nhdr;
+int index;     /* Index of free list */
+{
+    word total_size = hhdr -> hb_sz;
+    word h_size = (word)n - (word)h;
+    struct hblk *prev = hhdr -> hb_prev;
+    struct hblk *next = hhdr -> hb_next;
+
+    /* Replace h with n on its freelist */
+      nhdr -> hb_prev = prev;
+      nhdr -> hb_next = next;
+      nhdr -> hb_sz = total_size - h_size;
+      nhdr -> hb_flags = 0;
+      if (0 != prev) {
+       HDR(prev) -> hb_next = n;
+      } else {
+        GC_hblkfreelist[index] = n;
+      }
+      if (0 != next) {
+       HDR(next) -> hb_prev = n;
+      }
+      INCR_FREE_BYTES(index, -(signed_word)h_size);
+      FREE_ASSERT(GC_free_bytes[index] > 0);
+#     ifdef GC_ASSERTIONS
+       nhdr -> hb_map = 0;     /* Don't fail test for consecutive      */
+                               /* free blocks in GC_add_to_fl.         */
+#     endif
+#   ifdef USE_MUNMAP
+      hhdr -> hb_last_reclaimed = GC_gc_no;
+#   endif
+    hhdr -> hb_sz = h_size;
+    GC_add_to_fl(h, hhdr);
+    GC_invalidate_map(nhdr);
+}
        
+struct hblk * GC_allochblk_nth();
+
 /*
  * Allocate (and return pointer to) a heap block
- *   for objects of size sz words.
+ *   for objects of size sz words, searching the nth free list.
  *
  * NOTE: We set obj_map field in header correctly.
- *       Caller is resposnsible for building an object freelist in block.
+ *       Caller is responsible for building an object freelist in block.
  *
- * We clear the block if it is destined for large objects, and if
- * kind requires that newly allocated objects be cleared.
+ * Unlike older versions of the collectors, the client is responsible
+ * for clearing the block, if necessary.
  */
 struct hblk *
 GC_allochblk(sz, kind, flags)
 word sz;
 int kind;
+unsigned flags;  /* IGNORE_OFF_PAGE or 0 */
+{
+    word blocks = OBJ_SZ_TO_BLOCKS(sz);
+    int start_list = GC_hblk_fl_from_blocks(blocks);
+    int i;
+    for (i = start_list; i <= N_HBLK_FLS; ++i) {
+       struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
+       if (0 != result) {
+           return result;
+       }
+    }
+    return 0;
+}
+/*
+ * The same, but with search restricted to nth free list.
+ */
+struct hblk *
+GC_allochblk_nth(sz, kind, flags, n)
+word sz;
+int kind;
 unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */
+int n;
 {
-    register struct hblk *thishbp;
-    register hdr * thishdr;            /* Header corr. to thishbp */
     register struct hblk *hbp;
     register hdr * hhdr;               /* Header corr. to hbp */
-    struct hblk *prevhbp;
-    register hdr * phdr;               /* Header corr. to prevhbp */
+    register struct hblk *thishbp;
+    register hdr * thishdr;            /* Header corr. to hbp */
     signed_word size_needed;    /* number of bytes in requested objects */
     signed_word size_avail;    /* bytes available in this block        */
-    int trip_count = 0;
 
     size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
 
     /* search for a big enough block in free list */
-       hbp = GC_savhbp;
-       hhdr = HDR(hbp);
-       for(;;) {
-
-           prevhbp = hbp;
-           phdr = hhdr;
-           hbp = (prevhbp == 0? GC_hblkfreelist : phdr->hb_next);
-           hhdr = HDR(hbp);
-
-           if( prevhbp == GC_savhbp) {
-               if (trip_count == LAST_TRIP) return(0);
-               ++trip_count;
-           }
-
-           if( hbp == 0 ) continue;
-
+       hbp = GC_hblkfreelist[n];
+       for(; 0 != hbp; hbp = hhdr -> hb_next) {
+           GET_HDR(hbp, hhdr);
            size_avail = hhdr->hb_sz;
-#          ifdef EXACT_FIRST
-               if (trip_count <= 1 && size_avail != size_needed) continue;
-#          endif
            if (size_avail < size_needed) continue;
-#          ifdef PRESERVE_LAST
-               if (size_avail != size_needed
-                   && !GC_incremental
-                   && GC_in_last_heap_sect(hbp) && GC_should_collect()) {
+           if (size_avail != size_needed
+               && !GC_use_entire_heap
+               && !GC_dont_gc
+               && USED_HEAP_SIZE >= GC_requested_heapsize
+               && !TRUE_INCREMENTAL && GC_should_collect()) {
+#              ifdef USE_MUNMAP
                    continue;
-               } 
-#          endif
+#              else
+                   /* If we have enough large blocks left to cover any */
+                   /* previous request for large blocks, we go ahead   */
+                   /* and split.  Assuming a steady state, that should */
+                   /* be safe.  It means that we can use the full      */
+                   /* heap if we allocate only small objects.          */
+                   if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) {
+                     continue;
+                   } 
+                   /* If we are deallocating lots of memory from       */
+                   /* finalizers, fail and collect sooner rather       */
+                   /* than later.                                      */
+                   if (WORDS_TO_BYTES(GC_finalizer_mem_freed)
+                       > (GC_heapsize >> 4))  {
+                     continue;
+                   }
+#              endif /* !USE_MUNMAP */
+           }
            /* If the next heap block is obviously better, go on.       */
            /* This prevents us from disassembling a single large block */
            /* to get tiny blocks.                                      */
@@ -170,13 +622,14 @@ unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */
              signed_word next_size;
              
              thishbp = hhdr -> hb_next;
-             if (thishbp == 0) thishbp = GC_hblkfreelist; 
-             thishdr = HDR(thishbp);
-             next_size = (signed_word)(thishdr -> hb_sz);
-             if (next_size < size_avail
+             if (thishbp != 0) {
+               GET_HDR(thishbp, thishdr);
+               next_size = (signed_word)(thishdr -> hb_sz);
+               if (next_size < size_avail
                  && next_size >= size_needed
                  && !GC_is_black_listed(thishbp, (word)size_needed)) {
                  continue;
+               }
              }
            }
            if ( !IS_UNCOLLECTABLE(kind) &&
@@ -191,39 +644,46 @@ unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */
              
              while ((ptr_t)lasthbp <= search_end
                     && (thishbp = GC_is_black_listed(lasthbp,
-                                                     (word)eff_size_needed))) {
+                                                     (word)eff_size_needed))
+                       != 0) {
                lasthbp = thishbp;
              }
              size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
              thishbp = lasthbp;
              if (size_avail >= size_needed) {
-               if (thishbp != hbp && GC_install_header(thishbp)) {
+               if (thishbp != hbp &&
+                   0 != (thishdr = GC_install_header(thishbp))) {
+                 /* Make sure it's mapped before we mangle it. */
+#                  ifdef USE_MUNMAP
+                     if (!IS_MAPPED(hhdr)) {
+                       GC_remap((ptr_t)hbp, hhdr -> hb_sz);
+                       hhdr -> hb_flags &= ~WAS_UNMAPPED;
+                     }
+#                  endif
                  /* Split the block at thishbp */
-                     thishdr = HDR(thishbp);
-                     /* GC_invalidate_map not needed, since we will    */
-                     /* allocate this block.                           */
-                     thishdr -> hb_next = hhdr -> hb_next;
-                     thishdr -> hb_sz = size_avail;
-                     hhdr -> hb_sz = (ptr_t)thishbp - (ptr_t)hbp;
-                     hhdr -> hb_next = thishbp;
+                     GC_split_block(hbp, hhdr, thishbp, thishdr, n);
                  /* Advance to thishbp */
-                     prevhbp = hbp;
-                     phdr = hhdr;
                      hbp = thishbp;
                      hhdr = thishdr;
+                     /* We must now allocate thishbp, since it may     */
+                     /* be on the wrong free list.                     */
                }
              } else if (size_needed > (signed_word)BL_LIMIT
                         && orig_avail - size_needed
                            > (signed_word)BL_LIMIT) {
                /* Punt, since anything else risks unreasonable heap growth. */
-               WARN("Needed to allocate blacklisted block at 0x%lx\n",
-                    (word)hbp);
-               thishbp = hbp;
+               if (++GC_large_alloc_warn_suppressed
+                   >= GC_large_alloc_warn_interval) {
+                 WARN("Repeated allocation of very large block "
+                      "(appr. size %ld):\n"
+                      "\tMay lead to memory leak and poor performance.\n",
+                      size_needed);
+                 GC_large_alloc_warn_suppressed = 0;
+               }
                size_avail = orig_avail;
-             } else if (size_avail == 0
-                        && size_needed == HBLKSIZE
-                        && prevhbp != 0) {
-#              ifndef FIND_LEAK
+             } else if (size_avail == 0 && size_needed == HBLKSIZE
+                        && IS_MAPPED(hhdr)) {
+               if (!GC_find_leak) {
                  static unsigned count = 0;
                  
                  /* The block is completely blacklisted.  We need      */
@@ -235,84 +695,66 @@ unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */
                    /* Allocate and drop the block in small chunks, to  */
                    /* maximize the chance that we will recover some    */
                    /* later.                                           */
-                     struct hblk * limit = hbp + (hhdr->hb_sz/HBLKSIZE);
+                     word total_size = hhdr -> hb_sz;
+                     struct hblk * limit = hbp + divHBLKSZ(total_size);
                      struct hblk * h;
+                     struct hblk * prev = hhdr -> hb_prev;
                      
-                     GC_words_wasted += hhdr->hb_sz;
-                     phdr -> hb_next = hhdr -> hb_next;
+                     GC_words_wasted += BYTES_TO_WORDS(total_size);
+                     GC_large_free_bytes -= total_size;
+                     GC_remove_from_fl(hhdr, n);
                      for (h = hbp; h < limit; h++) {
-                       if (h == hbp || GC_install_header(h)) {
-                         hhdr = HDR(h);
+                       if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
                          (void) setup_header(
                                  hhdr,
-                                 BYTES_TO_WORDS(HBLKSIZE - HDR_BYTES),
+                                 BYTES_TO_WORDS(HBLKSIZE),
                                  PTRFREE, 0); /* Cant fail */
                          if (GC_debugging_started) {
-                           BZERO(hbp + HDR_BYTES, HBLKSIZE - HDR_BYTES);
+                           BZERO(h, HBLKSIZE);
                          }
                        }
                      }
                    /* Restore hbp to point at free block */
-                     if (GC_savhbp == hbp) GC_savhbp = prevhbp;
-                     hbp = prevhbp;
-                     hhdr = phdr;
-                     if (hbp == GC_savhbp) --trip_count;
+                     hbp = prev;
+                     if (0 == hbp) {
+                       return GC_allochblk_nth(sz, kind, flags, n);
+                     }
+                     hhdr = HDR(hbp);
                  }
-#              endif
+               }
              }
            }
            if( size_avail >= size_needed ) {
-               /* found a big enough block       */
-               /* let thishbp --> the block      */
-               /* set prevhbp, hbp to bracket it */
-                   thishbp = hbp;
-                   thishdr = hhdr;
-                   if( size_avail == size_needed ) {
-                       hbp = hhdr->hb_next;
-                       hhdr = HDR(hbp);
-                   } else {
-                       hbp = (struct hblk *)
-                           (((word)thishbp) + size_needed);
-                       if (!GC_install_header(hbp)) {
-                           hbp = thishbp;
-                           continue;
-                       }
-                       hhdr = HDR(hbp);
-                       GC_invalidate_map(hhdr);
-                       hhdr->hb_next = thishdr->hb_next;
-                       hhdr->hb_sz = size_avail - size_needed;
-                   }
-               /* remove *thishbp from hblk freelist */
-                   if( prevhbp == 0 ) {
-                       GC_hblkfreelist = hbp;
-                   } else {
-                       phdr->hb_next = hbp;
-                   }
-               /* save current list search position */
-                   GC_savhbp = hbp;
+#              ifdef USE_MUNMAP
+                 if (!IS_MAPPED(hhdr)) {
+                   GC_remap((ptr_t)hbp, hhdr -> hb_sz);
+                   hhdr -> hb_flags &= ~WAS_UNMAPPED;
+                 }
+#              endif
+               /* hbp may be on the wrong freelist; the parameter n    */
+               /* is important.                                        */
+               hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
                break;
            }
        }
+
+    if (0 == hbp) return 0;
        
-    /* Notify virtual dirty bit implementation that we are about to write. */
-       GC_write_hint(thishbp);
-    
     /* Add it to map of valid blocks */
-       if (!GC_install_counts(thishbp, (word)size_needed)) return(0);
+       if (!GC_install_counts(hbp, (word)size_needed)) return(0);
        /* This leaks memory under very rare conditions. */
                
     /* Set up header */
-        if (!setup_header(thishdr, sz, kind, flags)) {
-            GC_remove_counts(thishbp, (word)size_needed);
+        if (!setup_header(hhdr, sz, kind, flags)) {
+            GC_remove_counts(hbp, (word)size_needed);
             return(0); /* ditto */
         }
-        
-    /* Clear block if necessary */
-       if (GC_debugging_started
-           || sz > MAXOBJSZ && GC_obj_kinds[kind].ok_init) {
-           BZERO(thishbp + HDR_BYTES,  size_needed - HDR_BYTES);
-       }
 
+    /* Notify virtual dirty bit implementation that we are about to write.  */
+    /* Ensure that pointerfree objects are not protected if it's avoidable. */
+       GC_remove_protection(hbp, divHBLKSZ(size_needed),
+                            (hhdr -> hb_descr == 0) /* pointer-free */);
+        
     /* We just successfully allocated a block.  Restart count of       */
     /* consecutive failures.                                           */
     {
@@ -320,8 +762,11 @@ unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */
        
        GC_fail_count = 0;
     }
+
+    GC_large_free_bytes -= size_needed;
     
-    return( thishbp );
+    GC_ASSERT(IS_MAPPED(hhdr));
+    return( hbp );
 }
  
 struct hblk * GC_freehblk_ptr = 0;  /* Search position hint for GC_freehblk */
@@ -334,75 +779,60 @@ struct hblk * GC_freehblk_ptr = 0;  /* Search position hint for GC_freehblk */
  * All mark words are assumed to be cleared.
  */
 void
-GC_freehblk(p)
-register struct hblk *p;
+GC_freehblk(hbp)
+struct hblk *hbp;
 {
-register hdr *phdr;    /* Header corresponding to p */
-register struct hblk *hbp, *prevhbp;
-register hdr *hhdr, *prevhdr;
-register signed_word size;
+struct hblk *next, *prev;
+hdr *hhdr, *prevhdr, *nexthdr;
+signed_word size;
 
-    /* GC_savhbp may become invalid due to coalescing.  Clear it. */
-       GC_savhbp = (struct hblk *)0;
 
-    phdr = HDR(p);
-    size = phdr->hb_sz;
+    GET_HDR(hbp, hhdr);
+    size = hhdr->hb_sz;
     size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
-    GC_remove_counts(p, (word)size);
-    phdr->hb_sz = size;
-    GC_invalidate_map(phdr);
-    prevhbp = 0;
-    
-    /* The following optimization was suggested by David Detlefs.      */
-    /* Note that the header cannot be NIL, since there cannot be an    */
-    /* intervening  call to GC_freehblk without resetting              */
-    /* GC_freehblk_ptr.                                                        */
-    if (GC_freehblk_ptr != 0 &&
-       HDR(GC_freehblk_ptr)->hb_map == GC_invalid_map &&
-       (ptr_t)GC_freehblk_ptr < (ptr_t)p) {
-      hbp = GC_freehblk_ptr;
-    } else {
-      hbp = GC_hblkfreelist;
-    };
-    hhdr = HDR(hbp);
-
-    while( (hbp != 0) && (hbp < p) ) {
-       prevhbp = hbp;
-       prevhdr = hhdr;
-       hbp = hhdr->hb_next;
-       hhdr = HDR(hbp);
-    }
-    GC_freehblk_ptr = prevhbp;
+    GC_remove_counts(hbp, (word)size);
+    hhdr->hb_sz = size;
+#   ifdef USE_MUNMAP
+      hhdr -> hb_last_reclaimed = GC_gc_no;
+#   endif
     
     /* Check for duplicate deallocation in the easy case */
-      if (hbp != 0 && (ptr_t)p + size > (ptr_t)hbp
-        || prevhbp != 0 && (ptr_t)prevhbp + prevhdr->hb_sz > (ptr_t)p) {
+      if (HBLK_IS_FREE(hhdr)) {
         GC_printf1("Duplicate large block deallocation of 0x%lx\n",
-                  (unsigned long) p);
-        GC_printf2("Surrounding free blocks are 0x%lx and 0x%lx\n",
-                  (unsigned long) prevhbp, (unsigned long) hbp);
+                  (unsigned long) hbp);
+       ABORT("Duplicate large block deallocation");
       }
 
+    GC_ASSERT(IS_MAPPED(hhdr));
+    GC_invalidate_map(hhdr);
+    next = (struct hblk *)((word)hbp + size);
+    GET_HDR(next, nexthdr);
+    prev = GC_free_block_ending_at(hbp);
     /* Coalesce with successor, if possible */
-      if( (((word)p)+size) == ((word)hbp) ) {
-       phdr->hb_next = hhdr->hb_next;
-       phdr->hb_sz += hhdr->hb_sz;
-       GC_remove_header(hbp);
-      } else {
-       phdr->hb_next = hbp;
+      if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
+       GC_remove_from_fl(nexthdr, FL_UNKNOWN);
+       hhdr -> hb_sz += nexthdr -> hb_sz; 
+       GC_remove_header(next);
+      }
+    /* Coalesce with predecessor, if possible. */
+      if (0 != prev) {
+       prevhdr = HDR(prev);
+       if (IS_MAPPED(prevhdr)) {
+         GC_remove_from_fl(prevhdr, FL_UNKNOWN);
+         prevhdr -> hb_sz += hhdr -> hb_sz;
+#        ifdef USE_MUNMAP
+           prevhdr -> hb_last_reclaimed = GC_gc_no;
+#        endif
+         GC_remove_header(hbp);
+         hbp = prev;
+         hhdr = prevhdr;
+       }
       }
+    /* FIXME: It is not clear we really always want to do these merges */
+    /* with -DUSE_MUNMAP, since it updates ages and hence prevents     */
+    /* unmapping.                                                      */
 
-    
-    if( prevhbp == 0 ) {
-       GC_hblkfreelist = p;
-    } else if( (((word)prevhbp) + prevhdr->hb_sz)
-              == ((word)p) ) {
-      /* Coalesce with predecessor */
-       prevhdr->hb_next = phdr->hb_next;
-       prevhdr->hb_sz += phdr->hb_sz;
-       GC_remove_header(p);
-    } else {
-       prevhdr->hb_next = p;
-    }
+    GC_large_free_bytes += size;
+    GC_add_to_fl(hbp, hhdr);    
 }