OSDN Git Service

Avoid deadlock creating new thread.
[pf3gnuchains/gcc-fork.git] / libgo / runtime / malloc.goc
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 // See malloc.h for overview.
6 //
7 // TODO(rsc): double-check stats.
8
9 package runtime
10 #include <stddef.h>
11 #include <errno.h>
12 #include <stdlib.h>
13 #include "go-alloc.h"
14 #include "runtime.h"
15 #include "malloc.h"
16 #include "go-string.h"
17 #include "interface.h"
18 #include "go-type.h"
19 typedef struct __go_empty_interface Eface;
20 typedef struct __go_type_descriptor Type;
21 typedef struct __go_func_type FuncType;
22
23 MHeap runtime_mheap;
24 extern MStats mstats;   // defined in extern.go
25
26 extern volatile int32 runtime_MemProfileRate
27   __asm__ ("libgo_runtime.runtime.MemProfileRate");
28
29 // Same algorithm from chan.c, but a different
30 // instance of the static uint32 x.
31 // Not protected by a lock - let the threads use
32 // the same random number if they like.
33 static uint32
34 fastrand1(void)
35 {
36         static uint32 x = 0x49f6428aUL;
37
38         x += x;
39         if(x & 0x80000000L)
40                 x ^= 0x88888eefUL;
41         return x;
42 }
43
44 // Allocate an object of at least size bytes.
45 // Small objects are allocated from the per-thread cache's free lists.
46 // Large objects (> 32 kB) are allocated straight from the heap.
47 void*
48 runtime_mallocgc(uintptr size, uint32 refflag, int32 dogc, int32 zeroed)
49 {
50         int32 sizeclass, rate;
51         MCache *c;
52         uintptr npages;
53         MSpan *s;
54         void *v;
55         uint32 *ref;
56
57         if(!__sync_bool_compare_and_swap(&m->mallocing, 0, 1))
58                 runtime_throw("malloc/free - deadlock");
59         if(size == 0)
60                 size = 1;
61
62         mstats.nmalloc++;
63         if(size <= MaxSmallSize) {
64                 // Allocate from mcache free lists.
65                 sizeclass = runtime_SizeToClass(size);
66                 size = runtime_class_to_size[sizeclass];
67                 c = m->mcache;
68                 v = runtime_MCache_Alloc(c, sizeclass, size, zeroed);
69                 if(v == nil)
70                         runtime_throw("out of memory");
71                 mstats.alloc += size;
72                 mstats.total_alloc += size;
73                 mstats.by_size[sizeclass].nmalloc++;
74
75                 if(!runtime_mlookup(v, nil, nil, nil, &ref)) {
76                         // runtime_printf("malloc %D; runtime_mlookup failed\n", (uint64)size);
77                         runtime_throw("malloc runtime_mlookup");
78                 }
79                 *ref = RefNone | refflag;
80         } else {
81                 // TODO(rsc): Report tracebacks for very large allocations.
82
83                 // Allocate directly from heap.
84                 npages = size >> PageShift;
85                 if((size & PageMask) != 0)
86                         npages++;
87                 s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, 1);
88                 if(s == nil)
89                         runtime_throw("out of memory");
90                 size = npages<<PageShift;
91                 mstats.alloc += size;
92                 mstats.total_alloc += size;
93                 v = (void*)(s->start << PageShift);
94
95                 // setup for mark sweep
96                 s->gcref0 = RefNone | refflag;
97                 ref = &s->gcref0;
98         }
99
100         __sync_bool_compare_and_swap(&m->mallocing, 1, 0);
101
102         if(__sync_bool_compare_and_swap(&m->gcing, 1, 0)) {
103                 if(!(refflag & RefNoProfiling))
104                         __go_run_goroutine_gc(0);
105                 else {
106                         // We are being called from the profiler.  Tell it
107                         // to invoke the garbage collector when it is
108                         // done.  No need to use a sync function here.
109                         m->gcing_for_prof = 1;
110                 }
111         }
112
113         if(!(refflag & RefNoProfiling) && (rate = runtime_MemProfileRate) > 0) {
114                 if(size >= (uint32) rate)
115                         goto profile;
116                 if((uint32) m->mcache->next_sample > size)
117                         m->mcache->next_sample -= size;
118                 else {
119                         // pick next profile time
120                         if(rate > 0x3fffffff)   // make 2*rate not overflow
121                                 rate = 0x3fffffff;
122                         m->mcache->next_sample = fastrand1() % (2*rate);
123                 profile:
124                         *ref |= RefProfiled;
125                         runtime_MProf_Malloc(v, size);
126                 }
127         }
128
129         if(dogc && mstats.heap_alloc >= mstats.next_gc)
130                 runtime_gc(0);
131         return v;
132 }
133
134 void*
135 __go_alloc(uintptr size)
136 {
137         return runtime_mallocgc(size, 0, 0, 1);
138 }
139
140 // Free the object whose base pointer is v.
141 void
142 __go_free(void *v)
143 {
144         int32 sizeclass, size;
145         MSpan *s;
146         MCache *c;
147         uint32 prof, *ref;
148
149         if(v == nil)
150                 return;
151
152         if(!__sync_bool_compare_and_swap(&m->mallocing, 0, 1))
153                 runtime_throw("malloc/free - deadlock");
154
155         if(!runtime_mlookup(v, nil, nil, &s, &ref)) {
156                 // runtime_printf("free %p: not an allocated block\n", v);
157                 runtime_throw("free runtime_mlookup");
158         }
159         prof = *ref & RefProfiled;
160         *ref = RefFree;
161
162         // Find size class for v.
163         sizeclass = s->sizeclass;
164         if(sizeclass == 0) {
165                 // Large object.
166                 if(prof)
167                         runtime_MProf_Free(v, s->npages<<PageShift);
168                 mstats.alloc -= s->npages<<PageShift;
169                 runtime_memclr(v, s->npages<<PageShift);
170                 runtime_MHeap_Free(&runtime_mheap, s, 1);
171         } else {
172                 // Small object.
173                 c = m->mcache;
174                 size = runtime_class_to_size[sizeclass];
175                 if(size > (int32)sizeof(uintptr))
176                         ((uintptr*)v)[1] = 1;   // mark as "needs to be zeroed"
177                 if(prof)
178                         runtime_MProf_Free(v, size);
179                 mstats.alloc -= size;
180                 mstats.by_size[sizeclass].nfree++;
181                 runtime_MCache_Free(c, v, sizeclass, size);
182         }
183         __sync_bool_compare_and_swap(&m->mallocing, 1, 0);
184
185         if(__sync_bool_compare_and_swap(&m->gcing, 1, 0))
186                 __go_run_goroutine_gc(1);
187 }
188
189 int32
190 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **sp, uint32 **ref)
191 {
192         uintptr n, nobj, i;
193         byte *p;
194         MSpan *s;
195
196         mstats.nlookup++;
197         s = runtime_MHeap_LookupMaybe(&runtime_mheap, (uintptr)v>>PageShift);
198         if(sp)
199                 *sp = s;
200         if(s == nil) {
201                 if(base)
202                         *base = nil;
203                 if(size)
204                         *size = 0;
205                 if(ref)
206                         *ref = 0;
207                 return 0;
208         }
209
210         p = (byte*)((uintptr)s->start<<PageShift);
211         if(s->sizeclass == 0) {
212                 // Large object.
213                 if(base)
214                         *base = p;
215                 if(size)
216                         *size = s->npages<<PageShift;
217                 if(ref)
218                         *ref = &s->gcref0;
219                 return 1;
220         }
221
222         if((byte*)v >= (byte*)s->gcref) {
223                 // pointers into the gc ref counts
224                 // do not count as pointers.
225                 return 0;
226         }
227
228         n = runtime_class_to_size[s->sizeclass];
229         i = ((byte*)v - p)/n;
230         if(base)
231                 *base = p + i*n;
232         if(size)
233                 *size = n;
234
235         // good for error checking, but expensive
236         if(0) {
237                 nobj = (s->npages << PageShift) / (n + RefcountOverhead);
238                 if((byte*)s->gcref < p || (byte*)(s->gcref+nobj) > p+(s->npages<<PageShift)) {
239                         // runtime_printf("odd span state=%d span=%p base=%p sizeclass=%d n=%D size=%D npages=%D\n",
240                         //      s->state, s, p, s->sizeclass, (uint64)nobj, (uint64)n, (uint64)s->npages);
241                         // runtime_printf("s->base sizeclass %d v=%p base=%p gcref=%p blocksize=%D nobj=%D size=%D end=%p end=%p\n",
242                         //      s->sizeclass, v, p, s->gcref, (uint64)s->npages<<PageShift,
243                         //      (uint64)nobj, (uint64)n, s->gcref + nobj, p+(s->npages<<PageShift));
244                         runtime_throw("bad gcref");
245                 }
246         }
247         if(ref)
248                 *ref = &s->gcref[i];
249
250         return 1;
251 }
252
253 MCache*
254 runtime_allocmcache(void)
255 {
256         MCache *c;
257
258         if(!__sync_bool_compare_and_swap(&m->mallocing, 0, 1))
259                 runtime_throw("allocmcache - deadlock");
260
261         runtime_lock(&runtime_mheap);
262         c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc);
263
264         // Clear the free list used by FixAlloc; assume the rest is zeroed.
265         c->list[0].list = nil;
266
267         mstats.mcache_inuse = runtime_mheap.cachealloc.inuse;
268         mstats.mcache_sys = runtime_mheap.cachealloc.sys;
269         runtime_unlock(&runtime_mheap);
270
271         __sync_bool_compare_and_swap(&m->mallocing, 1, 0);
272         if(__sync_bool_compare_and_swap(&m->gcing, 1, 0))
273                 __go_run_goroutine_gc(2);
274
275         return c;
276 }
277
278 extern int32 runtime_sizeof_C_MStats
279   __asm__ ("libgo_runtime.runtime.Sizeof_C_MStats");
280
281 void
282 runtime_mallocinit(void)
283 {
284         runtime_sizeof_C_MStats = sizeof(MStats);
285
286         runtime_initfintab();
287         runtime_Mprof_Init();
288
289         runtime_SysMemInit();
290         runtime_InitSizes();
291         runtime_MHeap_Init(&runtime_mheap, runtime_SysAlloc);
292         m->mcache = runtime_allocmcache();
293
294         // See if it works.
295         runtime_free(runtime_malloc(1));
296 }
297
298 // Runtime stubs.
299
300 void*
301 runtime_mal(uintptr n)
302 {
303         return runtime_mallocgc(n, 0, 1, 1);
304 }
305
306 func Alloc(n uintptr) (p *byte) {
307         p = runtime_malloc(n);
308 }
309
310 func Free(p *byte) {
311         runtime_free(p);
312 }
313
314 func Lookup(p *byte) (base *byte, size uintptr) {
315         runtime_mlookup(p, &base, &size, nil, nil);
316 }
317
318 func GC() {
319         runtime_gc(1);
320 }
321
322 func SetFinalizer(obj Eface, finalizer Eface) {
323         byte *base;
324         uintptr size;
325         const FuncType *ft;
326
327         if(obj.__type_descriptor == nil) {
328                 // runtime_printf("runtime.SetFinalizer: first argument is nil interface\n");
329         throw:
330                 runtime_throw("runtime.SetFinalizer");
331         }
332         if(obj.__type_descriptor->__code != GO_PTR) {
333                 // runtime_printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
334                 goto throw;
335         }
336         if(!runtime_mlookup(obj.__object, &base, &size, nil, nil) || obj.__object != base) {
337                 // runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
338                 goto throw;
339         }
340         ft = nil;
341         if(finalizer.__type_descriptor != nil) {
342                 if(finalizer.__type_descriptor->__code != GO_FUNC) {
343                 badfunc:
344                         // runtime_printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.type->string, *obj.type->string);
345                         goto throw;
346                 }
347                 ft = (const FuncType*)finalizer.__type_descriptor;
348                 if(ft->__dotdotdot || ft->__in.__count != 1 || !__go_type_descriptors_equal(*(Type**)ft->__in.__values, obj.__type_descriptor))
349                         goto badfunc;
350
351                 if(runtime_getfinalizer(obj.__object, 0)) {
352                         // runtime_printf("runtime.SetFinalizer: finalizer already set");
353                         goto throw;
354                 }
355         }
356         runtime_addfinalizer(obj.__object, finalizer.__type_descriptor != nil ? *(void**)finalizer.__object : nil, ft);
357 }