OSDN Git Service

2006-03-24 Andreas Tobler <a.tobler@schweiz.ch>
[pf3gnuchains/gcc-fork.git] / boehm-gc / allchblk.c
index 189b942..1a1efc6 100644 (file)
  * modified is included with the above copyright notice.
  */
 
-#define DEBUG
-#undef DEBUG
+/* #define DEBUG */
 #include <stdio.h>
-#include "gc_priv.h"
+#include "private/gc_priv.h"
 
+GC_bool GC_use_entire_heap = 0;
 
 /*
  * Free heap blocks are kept on one of several free lists,
 
 struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
 
+#ifndef USE_MUNMAP
+
+  word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
+       /* Number of free bytes on each list.   */
+
+  /* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS      */
+  /* > GC_max_large_allocd_bytes?                                      */
+# ifdef __GNUC__
+  __inline__
+# endif
+  static GC_bool GC_enough_large_bytes_left(bytes,n)
+  word bytes;
+  int n;
+  {
+    int i;
+    for (i = N_HBLK_FLS; i >= n; --i) {
+       bytes += GC_free_bytes[i];
+       if (bytes > GC_max_large_allocd_bytes) return TRUE;
+    }
+    return FALSE;
+  }
+
+# define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
+
+# define FREE_ASSERT(e) GC_ASSERT(e)
+
+#else /* USE_MUNMAP */
+
+# define INCR_FREE_BYTES(n, b)
+# define FREE_ASSERT(e)
+
+#endif /* USE_MUNMAP */
+
 /* Map a number of blocks to the appropriate large block free list index. */
 int GC_hblk_fl_from_blocks(blocks_needed)
 word blocks_needed;
@@ -57,7 +90,6 @@ word blocks_needed;
     
 }
 
-# define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map)
 # define PHDR(hhdr) HDR(hhdr -> hb_prev)
 # define NHDR(hhdr) HDR(hhdr -> hb_next)
 
@@ -78,7 +110,14 @@ void GC_print_hblkfreelist()
     
     for (i = 0; i <= N_HBLK_FLS; ++i) {
       h = GC_hblkfreelist[i];
-      if (0 != h) GC_printf1("Free list %ld:\n", (unsigned long)i);
+#     ifdef USE_MUNMAP
+        if (0 != h) GC_printf1("Free list %ld:\n",
+                              (unsigned long)i);
+#     else
+        if (0 != h) GC_printf2("Free list %ld (Total size %ld):\n",
+                              (unsigned long)i,
+                              (unsigned long)GC_free_bytes[i]);
+#     endif
       while (h != 0) {
         hhdr = HDR(h);
         sz = hhdr -> hb_sz;
@@ -94,10 +133,12 @@ void GC_print_hblkfreelist()
         h = hhdr -> hb_next;
       }
     }
-    if (total_free != GC_large_free_bytes) {
+#   ifndef USE_MUNMAP
+      if (total_free != GC_large_free_bytes) {
        GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
                   (unsigned long) GC_large_free_bytes);
-    }
+      }
+#   endif
     GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
 }
 
@@ -142,7 +183,7 @@ void GC_dump_regions()
            hhdr = HDR(p);
            GC_printf1("\t0x%lx ", (unsigned long)p);
            if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
-               GC_printf1("Missing header!!\n", hhdr);
+               GC_printf1("Missing header!!(%ld)\n", hhdr);
                p += HBLKSIZE;
                continue;
            }
@@ -218,22 +259,39 @@ void GC_remove_from_fl(hhdr, n)
 hdr * hhdr;
 int n;
 {
+    int index;
+
     GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
+#   ifndef USE_MUNMAP
+      /* We always need index to mainatin free counts. */
+      if (FL_UNKNOWN == n) {
+          index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
+      } else {
+         index = n;
+      }
+#   endif
     if (hhdr -> hb_prev == 0) {
-        int index;
-       if (FL_UNKNOWN == n) {
+#      ifdef USE_MUNMAP
+         if (FL_UNKNOWN == n) {
             index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
-       } else {
+         } else {
            index = n;
-       }
+         }
+#      endif
        GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
        GC_hblkfreelist[index] = hhdr -> hb_next;
     } else {
-       PHDR(hhdr) -> hb_next = hhdr -> hb_next;
+       hdr *phdr;
+       GET_HDR(hhdr -> hb_prev, phdr);
+       phdr -> hb_next = hhdr -> hb_next;
     }
+    FREE_ASSERT(GC_free_bytes[index] >= hhdr -> hb_sz);
+    INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz));
     if (0 != hhdr -> hb_next) {
+       hdr * nhdr;
        GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
-       NHDR(hhdr) -> hb_prev = hhdr -> hb_prev;
+       GET_HDR(hhdr -> hb_next, nhdr);
+       nhdr -> hb_prev = hhdr -> hb_prev;
     }
 }
 
@@ -244,13 +302,20 @@ struct hblk * GC_free_block_ending_at(h)
 struct hblk *h;
 {
     struct hblk * p = h - 1;
-    hdr * phdr = HDR(p);
+    hdr * phdr;
 
+    GET_HDR(p, phdr);
     while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
        p = FORWARDED_ADDR(p,phdr);
        phdr = HDR(p);
     }
-    if (0 != phdr && HBLK_IS_FREE(phdr)) return p;
+    if (0 != phdr) {
+        if(HBLK_IS_FREE(phdr)) {
+           return p;
+       } else {
+           return 0;
+       }
+    }
     p = GC_prev_block(h - 1);
     if (0 != p) {
       phdr = HDR(p);
@@ -271,6 +336,7 @@ hdr * hhdr;
 {
     int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
     struct hblk *second = GC_hblkfreelist[index];
+    hdr * second_hdr;
 #   ifdef GC_ASSERTIONS
       struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
       hdr * nexthdr = HDR(next);
@@ -281,9 +347,14 @@ hdr * hhdr;
 #   endif
     GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
     GC_hblkfreelist[index] = h;
+    INCR_FREE_BYTES(index, hhdr -> hb_sz);
+    FREE_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes)
     hhdr -> hb_next = second;
     hhdr -> hb_prev = 0;
-    if (0 != second) HDR(second) -> hb_prev = h;
+    if (0 != second) {
+      GET_HDR(second, second_hdr);
+      second_hdr -> hb_prev = h;
+    }
     GC_invalidate_map(hhdr);
 }
 
@@ -306,9 +377,8 @@ void GC_unmap_old(void)
        if (!IS_MAPPED(hhdr)) continue;
        threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
        last_rec = hhdr -> hb_last_reclaimed;
-       if (last_rec > GC_gc_no
-           || last_rec < threshold && threshold < GC_gc_no
-                                      /* not recently wrapped */) {
+       if ((last_rec > GC_gc_no || last_rec < threshold)
+           && threshold < GC_gc_no /* not recently wrapped */) {
           sz = hhdr -> hb_sz;
          GC_unmap((ptr_t)h, sz);
          hhdr -> hb_flags |= WAS_UNMAPPED;
@@ -330,10 +400,10 @@ void GC_merge_unmapped(void)
     for (i = 0; i <= N_HBLK_FLS; ++i) {
       h = GC_hblkfreelist[i];
       while (h != 0) {
-       hhdr = HDR(h);
+       GET_HDR(h, hhdr);
        size = hhdr->hb_sz;
        next = (struct hblk *)((word)h + size);
-       nexthdr = HDR(next);
+       GET_HDR(next, nexthdr);
        /* Coalesce with successor, if possible */
          if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
            nextsize = nexthdr -> hb_sz;
@@ -353,6 +423,7 @@ void GC_merge_unmapped(void)
              } else {
                GC_remap((ptr_t)h, size);
                hhdr -> hb_flags &= ~WAS_UNMAPPED;
+               hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
              }
            } else {
              /* Unmap any gap in the middle */
@@ -398,12 +469,16 @@ int index;
     GC_remove_from_fl(hhdr, index);
     if (total_size == bytes) return h;
     rest = (struct hblk *)((word)h + bytes);
-    if (!GC_install_header(rest)) return(0);
-    rest_hdr = HDR(rest);
+    rest_hdr = GC_install_header(rest);
+    if (0 == rest_hdr) {
+       /* This may be very bad news ... */
+       WARN("Header allocation failed: Dropping block.\n", 0);
+       return(0);
+    }
     rest_hdr -> hb_sz = total_size - bytes;
     rest_hdr -> hb_flags = 0;
 #   ifdef GC_ASSERTIONS
-      // Mark h not free, to avoid assertion about adjacent free blocks.
+      /* Mark h not free, to avoid assertion about adjacent free blocks. */
         hhdr -> hb_map = 0;
 #   endif
     GC_add_to_fl(rest, rest_hdr);
@@ -447,6 +522,8 @@ int index;  /* Index of free list */
       if (0 != next) {
        HDR(next) -> hb_prev = n;
       }
+      INCR_FREE_BYTES(index, -(signed_word)h_size);
+      FREE_ASSERT(GC_free_bytes[index] > 0);
 #     ifdef GC_ASSERTIONS
        nhdr -> hb_map = 0;     /* Don't fail test for consecutive      */
                                /* free blocks in GC_add_to_fl.         */
@@ -468,20 +545,23 @@ struct hblk * GC_allochblk_nth();
  * NOTE: We set obj_map field in header correctly.
  *       Caller is responsible for building an object freelist in block.
  *
- * We clear the block if it is destined for large objects, and if
- * kind requires that newly allocated objects be cleared.
+ * Unlike older versions of the collectors, the client is responsible
+ * for clearing the block, if necessary.
  */
 struct hblk *
 GC_allochblk(sz, kind, flags)
 word sz;
 int kind;
-unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */
+unsigned flags;  /* IGNORE_OFF_PAGE or 0 */
 {
-    int start_list = GC_hblk_fl_from_blocks(OBJ_SZ_TO_BLOCKS(sz));
+    word blocks = OBJ_SZ_TO_BLOCKS(sz);
+    int start_list = GC_hblk_fl_from_blocks(blocks);
     int i;
     for (i = start_list; i <= N_HBLK_FLS; ++i) {
        struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
-       if (0 != result) return result;
+       if (0 != result) {
+           return result;
+       }
     }
     return 0;
 }
@@ -506,16 +586,35 @@ int n;
 
     /* search for a big enough block in free list */
        hbp = GC_hblkfreelist[n];
-       hhdr = HDR(hbp);
-       for(; 0 != hbp; hbp = hhdr -> hb_next, hhdr = HDR(hbp)) {
+       for(; 0 != hbp; hbp = hhdr -> hb_next) {
+           GET_HDR(hbp, hhdr);
            size_avail = hhdr->hb_sz;
            if (size_avail < size_needed) continue;
-#          ifdef PRESERVE_LAST
-               if (size_avail != size_needed
-                   && !GC_incremental && GC_should_collect()) {
+           if (size_avail != size_needed
+               && !GC_use_entire_heap
+               && !GC_dont_gc
+               && USED_HEAP_SIZE >= GC_requested_heapsize
+               && !TRUE_INCREMENTAL && GC_should_collect()) {
+#              ifdef USE_MUNMAP
                    continue;
-               } 
-#          endif
+#              else
+                   /* If we have enough large blocks left to cover any */
+                   /* previous request for large blocks, we go ahead   */
+                   /* and split.  Assuming a steady state, that should */
+                   /* be safe.  It means that we can use the full      */
+                   /* heap if we allocate only small objects.          */
+                   if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) {
+                     continue;
+                   } 
+                   /* If we are deallocating lots of memory from       */
+                   /* finalizers, fail and collect sooner rather       */
+                   /* than later.                                      */
+                   if (WORDS_TO_BYTES(GC_finalizer_mem_freed)
+                       > (GC_heapsize >> 4))  {
+                     continue;
+                   }
+#              endif /* !USE_MUNMAP */
+           }
            /* If the next heap block is obviously better, go on.       */
            /* This prevents us from disassembling a single large block */
            /* to get tiny blocks.                                      */
@@ -524,7 +623,7 @@ int n;
              
              thishbp = hhdr -> hb_next;
              if (thishbp != 0) {
-               thishdr = HDR(thishbp);
+               GET_HDR(thishbp, thishdr);
                next_size = (signed_word)(thishdr -> hb_sz);
                if (next_size < size_avail
                  && next_size >= size_needed
@@ -545,22 +644,23 @@ int n;
              
              while ((ptr_t)lasthbp <= search_end
                     && (thishbp = GC_is_black_listed(lasthbp,
-                                                     (word)eff_size_needed))) {
+                                                     (word)eff_size_needed))
+                       != 0) {
                lasthbp = thishbp;
              }
              size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
              thishbp = lasthbp;
              if (size_avail >= size_needed) {
-               if (thishbp != hbp && GC_install_header(thishbp)) {
+               if (thishbp != hbp &&
+                   0 != (thishdr = GC_install_header(thishbp))) {
                  /* Make sure it's mapped before we mangle it. */
 #                  ifdef USE_MUNMAP
                      if (!IS_MAPPED(hhdr)) {
-                       GC_remap((ptr_t)hbp, size_avail);
+                       GC_remap((ptr_t)hbp, hhdr -> hb_sz);
                        hhdr -> hb_flags &= ~WAS_UNMAPPED;
                      }
 #                  endif
                  /* Split the block at thishbp */
-                     thishdr = HDR(thishbp);
                      GC_split_block(hbp, hhdr, thishbp, thishdr, n);
                  /* Advance to thishbp */
                      hbp = thishbp;
@@ -572,8 +672,14 @@ int n;
                         && orig_avail - size_needed
                            > (signed_word)BL_LIMIT) {
                /* Punt, since anything else risks unreasonable heap growth. */
-               WARN("Needed to allocate blacklisted block at 0x%lx\n",
-                    (word)hbp);
+               if (++GC_large_alloc_warn_suppressed
+                   >= GC_large_alloc_warn_interval) {
+                 WARN("Repeated allocation of very large block "
+                      "(appr. size %ld):\n"
+                      "\tMay lead to memory leak and poor performance.\n",
+                      size_needed);
+                 GC_large_alloc_warn_suppressed = 0;
+               }
                size_avail = orig_avail;
              } else if (size_avail == 0 && size_needed == HBLKSIZE
                         && IS_MAPPED(hhdr)) {
@@ -594,18 +700,17 @@ int n;
                      struct hblk * h;
                      struct hblk * prev = hhdr -> hb_prev;
                      
-                     GC_words_wasted += total_size;
+                     GC_words_wasted += BYTES_TO_WORDS(total_size);
                      GC_large_free_bytes -= total_size;
                      GC_remove_from_fl(hhdr, n);
                      for (h = hbp; h < limit; h++) {
-                       if (h == hbp || GC_install_header(h)) {
-                         hhdr = HDR(h);
+                       if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
                          (void) setup_header(
                                  hhdr,
-                                 BYTES_TO_WORDS(HBLKSIZE - HDR_BYTES),
+                                 BYTES_TO_WORDS(HBLKSIZE),
                                  PTRFREE, 0); /* Cant fail */
                          if (GC_debugging_started) {
-                           BZERO(h + HDR_BYTES, HBLKSIZE - HDR_BYTES);
+                           BZERO(h, HBLKSIZE);
                          }
                        }
                      }
@@ -622,7 +727,7 @@ int n;
            if( size_avail >= size_needed ) {
 #              ifdef USE_MUNMAP
                  if (!IS_MAPPED(hhdr)) {
-                   GC_remap((ptr_t)hbp, size_avail);
+                   GC_remap((ptr_t)hbp, hhdr -> hb_sz);
                    hhdr -> hb_flags &= ~WAS_UNMAPPED;
                  }
 #              endif
@@ -635,9 +740,6 @@ int n;
 
     if (0 == hbp) return 0;
        
-    /* Notify virtual dirty bit implementation that we are about to write. */
-       GC_write_hint(hbp);
-    
     /* Add it to map of valid blocks */
        if (!GC_install_counts(hbp, (word)size_needed)) return(0);
        /* This leaks memory under very rare conditions. */
@@ -647,13 +749,12 @@ int n;
             GC_remove_counts(hbp, (word)size_needed);
             return(0); /* ditto */
         }
-        
-    /* Clear block if necessary */
-       if (GC_debugging_started
-           || sz > MAXOBJSZ && GC_obj_kinds[kind].ok_init) {
-           BZERO(hbp + HDR_BYTES,  size_needed - HDR_BYTES);
-       }
 
+    /* Notify virtual dirty bit implementation that we are about to write.  */
+    /* Ensure that pointerfree objects are not protected if it's avoidable. */
+       GC_remove_protection(hbp, divHBLKSZ(size_needed),
+                            (hhdr -> hb_descr == 0) /* pointer-free */);
+        
     /* We just successfully allocated a block.  Restart count of       */
     /* consecutive failures.                                           */
     {
@@ -686,22 +787,26 @@ hdr *hhdr, *prevhdr, *nexthdr;
 signed_word size;
 
 
-    hhdr = HDR(hbp);
+    GET_HDR(hbp, hhdr);
     size = hhdr->hb_sz;
     size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
     GC_remove_counts(hbp, (word)size);
     hhdr->hb_sz = size;
+#   ifdef USE_MUNMAP
+      hhdr -> hb_last_reclaimed = GC_gc_no;
+#   endif
     
     /* Check for duplicate deallocation in the easy case */
       if (HBLK_IS_FREE(hhdr)) {
         GC_printf1("Duplicate large block deallocation of 0x%lx\n",
                   (unsigned long) hbp);
+       ABORT("Duplicate large block deallocation");
       }
 
     GC_ASSERT(IS_MAPPED(hhdr));
     GC_invalidate_map(hhdr);
     next = (struct hblk *)((word)hbp + size);
-    nexthdr = HDR(next);
+    GET_HDR(next, nexthdr);
     prev = GC_free_block_ending_at(hbp);
     /* Coalesce with successor, if possible */
       if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
@@ -715,11 +820,17 @@ signed_word size;
        if (IS_MAPPED(prevhdr)) {
          GC_remove_from_fl(prevhdr, FL_UNKNOWN);
          prevhdr -> hb_sz += hhdr -> hb_sz;
+#        ifdef USE_MUNMAP
+           prevhdr -> hb_last_reclaimed = GC_gc_no;
+#        endif
          GC_remove_header(hbp);
          hbp = prev;
          hhdr = prevhdr;
        }
       }
+    /* FIXME: It is not clear we really always want to do these merges */
+    /* with -DUSE_MUNMAP, since it updates ages and hence prevents     */
+    /* unmapping.                                                      */
 
     GC_large_free_bytes += size;
     GC_add_to_fl(hbp, hhdr);