OSDN Git Service

* Makefile.am: Add dummy install-pdf target.
[pf3gnuchains/gcc-fork.git] / boehm-gc / allchblk.c
index 5b7bcff..1a1efc6 100644 (file)
@@ -47,12 +47,16 @@ GC_bool GC_use_entire_heap = 0;
 struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
 
 #ifndef USE_MUNMAP
+
   word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
        /* Number of free bytes on each list.   */
 
   /* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS      */
   /* > GC_max_large_allocd_bytes?                                      */
-  GC_bool GC_enough_large_bytes_left(bytes,n)
+# ifdef __GNUC__
+  __inline__
+# endif
+  static GC_bool GC_enough_large_bytes_left(bytes,n)
   word bytes;
   int n;
   {
@@ -86,7 +90,6 @@ word blocks_needed;
     
 }
 
-# define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map)
 # define PHDR(hhdr) HDR(hhdr -> hb_prev)
 # define NHDR(hhdr) HDR(hhdr -> hb_next)
 
@@ -108,7 +111,7 @@ void GC_print_hblkfreelist()
     for (i = 0; i <= N_HBLK_FLS; ++i) {
       h = GC_hblkfreelist[i];
 #     ifdef USE_MUNMAP
-        if (0 != h) GC_printf1("Free list %ld (Total size %ld):\n",
+        if (0 != h) GC_printf1("Free list %ld:\n",
                               (unsigned long)i);
 #     else
         if (0 != h) GC_printf2("Free list %ld (Total size %ld):\n",
@@ -130,10 +133,12 @@ void GC_print_hblkfreelist()
         h = hhdr -> hb_next;
       }
     }
-    if (total_free != GC_large_free_bytes) {
+#   ifndef USE_MUNMAP
+      if (total_free != GC_large_free_bytes) {
        GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
                   (unsigned long) GC_large_free_bytes);
-    }
+      }
+#   endif
     GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
 }
 
@@ -178,7 +183,7 @@ void GC_dump_regions()
            hhdr = HDR(p);
            GC_printf1("\t0x%lx ", (unsigned long)p);
            if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
-               GC_printf1("Missing header!!\n", hhdr);
+               GC_printf1("Missing header!!(%ld)\n", hhdr);
                p += HBLKSIZE;
                continue;
            }
@@ -280,8 +285,8 @@ int n;
        GET_HDR(hhdr -> hb_prev, phdr);
        phdr -> hb_next = hhdr -> hb_next;
     }
+    FREE_ASSERT(GC_free_bytes[index] >= hhdr -> hb_sz);
     INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz));
-    FREE_ASSERT(GC_free_bytes[index] >= 0);
     if (0 != hhdr -> hb_next) {
        hdr * nhdr;
        GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
@@ -372,9 +377,8 @@ void GC_unmap_old(void)
        if (!IS_MAPPED(hhdr)) continue;
        threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
        last_rec = hhdr -> hb_last_reclaimed;
-       if (last_rec > GC_gc_no
-           || last_rec < threshold && threshold < GC_gc_no
-                                      /* not recently wrapped */) {
+       if ((last_rec > GC_gc_no || last_rec < threshold)
+           && threshold < GC_gc_no /* not recently wrapped */) {
           sz = hhdr -> hb_sz;
          GC_unmap((ptr_t)h, sz);
          hhdr -> hb_flags |= WAS_UNMAPPED;
@@ -419,6 +423,7 @@ void GC_merge_unmapped(void)
              } else {
                GC_remap((ptr_t)h, size);
                hhdr -> hb_flags &= ~WAS_UNMAPPED;
+               hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
              }
            } else {
              /* Unmap any gap in the middle */
@@ -465,7 +470,11 @@ int index;
     if (total_size == bytes) return h;
     rest = (struct hblk *)((word)h + bytes);
     rest_hdr = GC_install_header(rest);
-    if (0 == rest_hdr) return(0);
+    if (0 == rest_hdr) {
+       /* This may be very bad news ... */
+       WARN("Header allocation failed: Dropping block.\n", 0);
+       return(0);
+    }
     rest_hdr -> hb_sz = total_size - bytes;
     rest_hdr -> hb_flags = 0;
 #   ifdef GC_ASSERTIONS
@@ -581,14 +590,15 @@ int n;
            GET_HDR(hbp, hhdr);
            size_avail = hhdr->hb_sz;
            if (size_avail < size_needed) continue;
-           if (!GC_use_entire_heap
-               && size_avail != size_needed
+           if (size_avail != size_needed
+               && !GC_use_entire_heap
+               && !GC_dont_gc
                && USED_HEAP_SIZE >= GC_requested_heapsize
-               && !GC_incremental && GC_should_collect()) {
+               && !TRUE_INCREMENTAL && GC_should_collect()) {
 #              ifdef USE_MUNMAP
                    continue;
 #              else
-                   /* If we enough large blocks left to cover any      */
+                   /* If we have enough large blocks left to cover any */
                    /* previous request for large blocks, we go ahead   */
                    /* and split.  Assuming a steady state, that should */
                    /* be safe.  It means that we can use the full      */
@@ -596,6 +606,13 @@ int n;
                    if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) {
                      continue;
                    } 
+                   /* If we are deallocating lots of memory from       */
+                   /* finalizers, fail and collect sooner rather       */
+                   /* than later.                                      */
+                   if (WORDS_TO_BYTES(GC_finalizer_mem_freed)
+                       > (GC_heapsize >> 4))  {
+                     continue;
+                   }
 #              endif /* !USE_MUNMAP */
            }
            /* If the next heap block is obviously better, go on.       */
@@ -627,7 +644,8 @@ int n;
              
              while ((ptr_t)lasthbp <= search_end
                     && (thishbp = GC_is_black_listed(lasthbp,
-                                                     (word)eff_size_needed))) {
+                                                     (word)eff_size_needed))
+                       != 0) {
                lasthbp = thishbp;
              }
              size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
@@ -654,9 +672,13 @@ int n;
                         && orig_avail - size_needed
                            > (signed_word)BL_LIMIT) {
                /* Punt, since anything else risks unreasonable heap growth. */
-               if (0 != GETENV("GC_NO_BLACKLIST_WARNING")) {
-                 WARN("Needed to allocate blacklisted block at 0x%lx\n",
-                      (word)hbp);
+               if (++GC_large_alloc_warn_suppressed
+                   >= GC_large_alloc_warn_interval) {
+                 WARN("Repeated allocation of very large block "
+                      "(appr. size %ld):\n"
+                      "\tMay lead to memory leak and poor performance.\n",
+                      size_needed);
+                 GC_large_alloc_warn_suppressed = 0;
                }
                size_avail = orig_avail;
              } else if (size_avail == 0 && size_needed == HBLKSIZE
@@ -678,7 +700,7 @@ int n;
                      struct hblk * h;
                      struct hblk * prev = hhdr -> hb_prev;
                      
-                     GC_words_wasted += total_size;
+                     GC_words_wasted += BYTES_TO_WORDS(total_size);
                      GC_large_free_bytes -= total_size;
                      GC_remove_from_fl(hhdr, n);
                      for (h = hbp; h < limit; h++) {
@@ -718,9 +740,6 @@ int n;
 
     if (0 == hbp) return 0;
        
-    /* Notify virtual dirty bit implementation that we are about to write. */
-       GC_write_hint(hbp);
-    
     /* Add it to map of valid blocks */
        if (!GC_install_counts(hbp, (word)size_needed)) return(0);
        /* This leaks memory under very rare conditions. */
@@ -730,6 +749,11 @@ int n;
             GC_remove_counts(hbp, (word)size_needed);
             return(0); /* ditto */
         }
+
+    /* Notify virtual dirty bit implementation that we are about to write.  */
+    /* Ensure that pointerfree objects are not protected if it's avoidable. */
+       GC_remove_protection(hbp, divHBLKSZ(size_needed),
+                            (hhdr -> hb_descr == 0) /* pointer-free */);
         
     /* We just successfully allocated a block.  Restart count of       */
     /* consecutive failures.                                           */
@@ -768,11 +792,15 @@ signed_word size;
     size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
     GC_remove_counts(hbp, (word)size);
     hhdr->hb_sz = size;
+#   ifdef USE_MUNMAP
+      hhdr -> hb_last_reclaimed = GC_gc_no;
+#   endif
     
     /* Check for duplicate deallocation in the easy case */
       if (HBLK_IS_FREE(hhdr)) {
         GC_printf1("Duplicate large block deallocation of 0x%lx\n",
                   (unsigned long) hbp);
+       ABORT("Duplicate large block deallocation");
       }
 
     GC_ASSERT(IS_MAPPED(hhdr));
@@ -792,11 +820,17 @@ signed_word size;
        if (IS_MAPPED(prevhdr)) {
          GC_remove_from_fl(prevhdr, FL_UNKNOWN);
          prevhdr -> hb_sz += hhdr -> hb_sz;
+#        ifdef USE_MUNMAP
+           prevhdr -> hb_last_reclaimed = GC_gc_no;
+#        endif
          GC_remove_header(hbp);
          hbp = prev;
          hhdr = prevhdr;
        }
       }
+    /* FIXME: It is not clear we really always want to do these merges */
+    /* with -DUSE_MUNMAP, since it updates ages and hence prevents     */
+    /* unmapping.                                                      */
 
     GC_large_free_bytes += size;
     GC_add_to_fl(hbp, hhdr);