OSDN Git Service

runtime: New lock/note implementation.
[pf3gnuchains/gcc-fork.git] / libgo / runtime / mprof.goc
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 // Malloc profiling.
6 // Patterned after tcmalloc's algorithms; shorter code.
7
8 package runtime
9 #include "runtime.h"
10 #include "arch.h"
11 #include "malloc.h"
12 #include "defs.h"
13 #include "go-type.h"
14
15 typedef struct __go_open_array Slice;
16
17 // NOTE(rsc): Everything here could use cas if contention became an issue.
18 static Lock proflock;
19
20 // Per-call-stack allocation information.
21 // Lookup by hashing call stack into a linked-list hash table.
22 typedef struct Bucket Bucket;
23 struct Bucket
24 {
25         Bucket  *next;  // next in hash list
26         Bucket  *allnext;       // next in list of all buckets
27         uintptr allocs;
28         uintptr frees;
29         uintptr alloc_bytes;
30         uintptr free_bytes;
31         uintptr hash;
32         uintptr nstk;
33         uintptr stk[1];
34 };
35 enum {
36         BuckHashSize = 179999,
37 };
38 static Bucket **buckhash;
39 static Bucket *buckets;
40 static uintptr bucketmem;
41
42 // Return the bucket for stk[0:nstk], allocating new bucket if needed.
43 static Bucket*
44 stkbucket(uintptr *stk, int32 nstk)
45 {
46         int32 i;
47         uintptr h;
48         Bucket *b;
49
50         if(buckhash == nil) {
51                 buckhash = runtime_SysAlloc(BuckHashSize*sizeof buckhash[0]);
52                 mstats.buckhash_sys += BuckHashSize*sizeof buckhash[0];
53         }
54
55         // Hash stack.
56         h = 0;
57         for(i=0; i<nstk; i++) {
58                 h += stk[i];
59                 h += h<<10;
60                 h ^= h>>6;
61         }
62         h += h<<3;
63         h ^= h>>11;
64
65         i = h%BuckHashSize;
66         for(b = buckhash[i]; b; b=b->next)
67                 if(b->hash == h && b->nstk == (uintptr)nstk &&
68                    runtime_mcmp((byte*)b->stk, (byte*)stk, nstk*sizeof stk[0]) == 0)
69                         return b;
70
71         b = runtime_mallocgc(sizeof *b + nstk*sizeof stk[0], FlagNoProfiling, 0, 1);
72         bucketmem += sizeof *b + nstk*sizeof stk[0];
73         runtime_memmove(b->stk, stk, nstk*sizeof stk[0]);
74         b->hash = h;
75         b->nstk = nstk;
76         b->next = buckhash[i];
77         buckhash[i] = b;
78         b->allnext = buckets;
79         buckets = b;
80         return b;
81 }
82
83 // Map from pointer to Bucket* that allocated it.
84 // Three levels:
85 //      Linked-list hash table for top N-20 bits.
86 //      Array index for next 13 bits.
87 //      Linked list for next 7 bits.
88 // This is more efficient than using a general map,
89 // because of the typical clustering of the pointer keys.
90
91 typedef struct AddrHash AddrHash;
92 typedef struct AddrEntry AddrEntry;
93
94 struct AddrHash
95 {
96         AddrHash *next; // next in top-level hash table linked list
97         uintptr addr;   // addr>>20
98         AddrEntry *dense[1<<13];
99 };
100
101 struct AddrEntry
102 {
103         AddrEntry *next;        // next in bottom-level linked list
104         uint32 addr;
105         Bucket *b;
106 };
107
108 enum {
109         AddrHashBits = 12       // 1MB per entry, so good for 4GB of used address space
110 };
111 static AddrHash *addrhash[1<<AddrHashBits];
112 static AddrEntry *addrfree;
113 static uintptr addrmem;
114
115 // Multiplicative hash function:
116 // hashMultiplier is the bottom 32 bits of int((sqrt(5)-1)/2 * (1<<32)).
117 // This is a good multiplier as suggested in CLR, Knuth.  The hash
118 // value is taken to be the top AddrHashBits bits of the bottom 32 bits
119 // of the multiplied value.
120 enum {
121         HashMultiplier = 2654435769U
122 };
123
124 // Set the bucket associated with addr to b.
125 static void
126 setaddrbucket(uintptr addr, Bucket *b)
127 {
128         int32 i;
129         uint32 h;
130         AddrHash *ah;
131         AddrEntry *e;
132
133         h = (uint32)((addr>>20)*HashMultiplier) >> (32-AddrHashBits);
134         for(ah=addrhash[h]; ah; ah=ah->next)
135                 if(ah->addr == (addr>>20))
136                         goto found;
137
138         ah = runtime_mallocgc(sizeof *ah, FlagNoProfiling, 0, 1);
139         addrmem += sizeof *ah;
140         ah->next = addrhash[h];
141         ah->addr = addr>>20;
142         addrhash[h] = ah;
143
144 found:
145         if((e = addrfree) == nil) {
146                 e = runtime_mallocgc(64*sizeof *e, FlagNoProfiling, 0, 0);
147                 addrmem += 64*sizeof *e;
148                 for(i=0; i+1<64; i++)
149                         e[i].next = &e[i+1];
150                 e[63].next = nil;
151         }
152         addrfree = e->next;
153         e->addr = (uint32)~(addr & ((1<<20)-1));
154         e->b = b;
155         h = (addr>>7)&(nelem(ah->dense)-1);     // entry in dense is top 13 bits of low 20.
156         e->next = ah->dense[h];
157         ah->dense[h] = e;
158 }
159
160 // Get the bucket associated with addr and clear the association.
161 static Bucket*
162 getaddrbucket(uintptr addr)
163 {
164         uint32 h;
165         AddrHash *ah;
166         AddrEntry *e, **l;
167         Bucket *b;
168
169         h = (uint32)((addr>>20)*HashMultiplier) >> (32-AddrHashBits);
170         for(ah=addrhash[h]; ah; ah=ah->next)
171                 if(ah->addr == (addr>>20))
172                         goto found;
173         return nil;
174
175 found:
176         h = (addr>>7)&(nelem(ah->dense)-1);     // entry in dense is top 13 bits of low 20.
177         for(l=&ah->dense[h]; (e=*l) != nil; l=&e->next) {
178                 if(e->addr == (uint32)~(addr & ((1<<20)-1))) {
179                         *l = e->next;
180                         b = e->b;
181                         e->next = addrfree;
182                         addrfree = e;
183                         return b;
184                 }
185         }
186         return nil;
187 }
188
189 // Called by malloc to record a profiled block.
190 void
191 runtime_MProf_Malloc(void *p, uintptr size)
192 {
193         int32 nstk;
194         uintptr stk[32];
195         Bucket *b;
196
197         if(!__sync_bool_compare_and_swap(&m->nomemprof, 0, 1))
198                 return;
199 #if 0
200         nstk = runtime_callers(1, stk, 32);
201 #else
202         nstk = 0;
203 #endif
204         runtime_lock(&proflock);
205         b = stkbucket(stk, nstk);
206         b->allocs++;
207         b->alloc_bytes += size;
208         setaddrbucket((uintptr)p, b);
209         runtime_unlock(&proflock);
210         __sync_bool_compare_and_swap(&m->nomemprof, 1, 0);
211
212         if(__sync_bool_compare_and_swap(&m->gcing_for_prof, 1, 0))
213                 __go_run_goroutine_gc(100);
214 }
215
216 // Called when freeing a profiled block.
217 void
218 runtime_MProf_Free(void *p, uintptr size)
219 {
220         Bucket *b;
221
222         if(!__sync_bool_compare_and_swap(&m->nomemprof, 0, 1))
223                 return;
224
225         runtime_lock(&proflock);
226         b = getaddrbucket((uintptr)p);
227         if(b != nil) {
228                 b->frees++;
229                 b->free_bytes += size;
230         }
231         runtime_unlock(&proflock);
232         __sync_bool_compare_and_swap(&m->nomemprof, 1, 0);
233
234         if(__sync_bool_compare_and_swap(&m->gcing_for_prof, 1, 0))
235                 __go_run_goroutine_gc(101);
236 }
237
238
239 // Go interface to profile data.  (Declared in extern.go)
240 // Assumes Go sizeof(int) == sizeof(int32)
241
242 // Must match MemProfileRecord in extern.go.
243 typedef struct Record Record;
244 struct Record {
245         int64 alloc_bytes, free_bytes;
246         int64 alloc_objects, free_objects;
247         uintptr stk[32];
248 };
249
250 // Write b's data to r.
251 static void
252 record(Record *r, Bucket *b)
253 {
254         uint32 i;
255
256         r->alloc_bytes = b->alloc_bytes;
257         r->free_bytes = b->free_bytes;
258         r->alloc_objects = b->allocs;
259         r->free_objects = b->frees;
260         for(i=0; i<b->nstk && i<nelem(r->stk); i++)
261                 r->stk[i] = b->stk[i];
262         for(; i<nelem(r->stk); i++)
263                 r->stk[i] = 0;
264 }
265
266 func MemProfile(p Slice, include_inuse_zero bool) (n int32, ok bool) {
267         Bucket *b;
268         Record *r;
269
270         __sync_bool_compare_and_swap(&m->nomemprof, 0, 1);
271
272         runtime_lock(&proflock);
273         n = 0;
274         for(b=buckets; b; b=b->allnext)
275                 if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
276                         n++;
277         ok = false;
278         if(n <= p.__count) {
279                 ok = true;
280                 r = (Record*)p.__values;
281                 for(b=buckets; b; b=b->allnext)
282                         if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
283                                 record(r++, b);
284         }
285         runtime_unlock(&proflock);
286
287         __sync_bool_compare_and_swap(&m->nomemprof, 1, 0);
288
289         if(__sync_bool_compare_and_swap(&m->gcing_for_prof, 1, 0))
290                 __go_run_goroutine_gc(102);
291 }
292
293 void
294 runtime_MProf_Mark(void (*scan)(byte *, int64))
295 {
296         // buckhash is not allocated via mallocgc.
297         scan((byte*)&buckets, sizeof buckets);
298         scan((byte*)&addrhash, sizeof addrhash);
299         scan((byte*)&addrfree, sizeof addrfree);
300 }