1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // See malloc.h for overview.
7 // TODO(rsc): double-check stats.
17 #include "go-string.h"
18 #include "interface.h"
23 extern MStats mstats; // defined in extern.go
25 extern volatile int32 runtime_MemProfileRate
26 __asm__ ("libgo_runtime.runtime.MemProfileRate");
28 // Allocate an object of at least size bytes.
29 // Small objects are allocated from the per-thread cache's free lists.
30 // Large objects (> 32 kB) are allocated straight from the heap.
32 runtime_mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed)
36 int32 sizeclass, rate;
44 if(g->status == Gsyscall)
46 if(runtime_gcwaiting && g != m->g0 && m->locks == 0 && g->status != Gsyscall) {
51 runtime_throw("malloc/free - deadlock");
58 if(size <= MaxSmallSize) {
59 // Allocate from mcache free lists.
60 sizeclass = runtime_SizeToClass(size);
61 size = runtime_class_to_size[sizeclass];
62 v = runtime_MCache_Alloc(c, sizeclass, size, zeroed);
64 runtime_throw("out of memory");
65 c->local_alloc += size;
66 c->local_total_alloc += size;
67 c->local_by_size[sizeclass].nmalloc++;
69 // TODO(rsc): Report tracebacks for very large allocations.
71 // Allocate directly from heap.
72 npages = size >> PageShift;
73 if((size & PageMask) != 0)
75 s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, 1);
77 runtime_throw("out of memory");
78 size = npages<<PageShift;
79 c->local_alloc += size;
80 c->local_total_alloc += size;
81 v = (void*)(s->start << PageShift);
83 // setup for mark sweep
84 runtime_markspan(v, 0, 0, true);
86 if(!(flag & FlagNoGC))
87 runtime_markallocated(v, size, (flag&FlagNoPointers) != 0);
91 if(!(flag & FlagNoProfiling) && (rate = runtime_MemProfileRate) > 0) {
92 if(size >= (uint32) rate)
94 if((uint32) m->mcache->next_sample > size)
95 m->mcache->next_sample -= size;
97 // pick next profile time
98 // If you change this, also change allocmcache.
99 if(rate > 0x3fffffff) // make 2*rate not overflow
101 m->mcache->next_sample = runtime_fastrand1() % (2*rate);
103 runtime_setblockspecial(v, true);
104 runtime_MProf_Malloc(v, size);
108 if(dogc && mstats.heap_alloc >= mstats.next_gc)
114 __go_alloc(uintptr size)
116 return runtime_mallocgc(size, 0, 0, 1);
119 // Free the object whose base pointer is v.
133 // If you change this also change mgc0.c:/^sweep,
134 // which has a copy of the guts of free.
138 runtime_throw("malloc/free - deadlock");
141 if(!runtime_mlookup(v, nil, nil, &s)) {
142 // runtime_printf("free %p: not an allocated block\n", v);
143 runtime_throw("free runtime_mlookup");
145 prof = runtime_blockspecial(v);
147 // Find size class for v.
148 sizeclass = s->sizeclass;
152 size = s->npages<<PageShift;
153 *(uintptr*)(s->start<<PageShift) = 1; // mark as "needs to be zeroed"
154 // Must mark v freed before calling unmarkspan and MHeap_Free:
155 // they might coalesce v into other spans and change the bitmap further.
156 runtime_markfreed(v, size);
157 runtime_unmarkspan(v, 1<<PageShift);
158 runtime_MHeap_Free(&runtime_mheap, s, 1);
161 size = runtime_class_to_size[sizeclass];
162 if(size > sizeof(uintptr))
163 ((uintptr*)v)[1] = 1; // mark as "needs to be zeroed"
164 // Must mark v freed before calling MCache_Free:
165 // it might coalesce v and other blocks into a bigger span
166 // and change the bitmap further.
167 runtime_markfreed(v, size);
168 c->local_by_size[sizeclass].nfree++;
169 runtime_MCache_Free(c, v, sizeclass, size);
171 c->local_alloc -= size;
173 runtime_MProf_Free(v, size);
178 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
184 runtime_m()->mcache->local_nlookup++;
185 s = runtime_MHeap_LookupMaybe(&runtime_mheap, v);
189 runtime_checkfreed(v, 1);
197 p = (byte*)((uintptr)s->start<<PageShift);
198 if(s->sizeclass == 0) {
203 *size = s->npages<<PageShift;
207 if((byte*)v >= (byte*)s->limit) {
208 // pointers past the last block do not count as pointers.
212 n = runtime_class_to_size[s->sizeclass];
214 i = ((byte*)v - p)/n;
224 runtime_allocmcache(void)
229 runtime_lock(&runtime_mheap);
230 c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc);
231 mstats.mcache_inuse = runtime_mheap.cachealloc.inuse;
232 mstats.mcache_sys = runtime_mheap.cachealloc.sys;
233 runtime_unlock(&runtime_mheap);
235 // Set first allocation sample size.
236 rate = runtime_MemProfileRate;
237 if(rate > 0x3fffffff) // make 2*rate not overflow
240 c->next_sample = runtime_fastrand1() % (2*rate);
246 runtime_purgecachedstats(M* m)
250 // Protected by either heap or GC lock.
252 mstats.heap_alloc += c->local_cachealloc;
253 c->local_cachealloc = 0;
254 mstats.heap_objects += c->local_objects;
255 c->local_objects = 0;
256 mstats.nmalloc += c->local_nmalloc;
257 c->local_nmalloc = 0;
258 mstats.nfree += c->local_nfree;
260 mstats.nlookup += c->local_nlookup;
261 c->local_nlookup = 0;
262 mstats.alloc += c->local_alloc;
264 mstats.total_alloc += c->local_total_alloc;
265 c->local_total_alloc= 0;
268 extern uintptr runtime_sizeof_C_MStats
269 __asm__ ("libgo_runtime.runtime.Sizeof_C_MStats");
271 #define MaxArena32 (2U<<30)
274 runtime_mallocinit(void)
277 uintptr arena_size, bitmap_size;
282 runtime_sizeof_C_MStats = sizeof(MStats);
295 limit = runtime_memlimit();
297 // Set up the allocation arena, a contiguous area of memory where
298 // allocated data will be found. The arena begins with a bitmap large
299 // enough to hold 4 bits per allocated word.
300 if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) {
301 // On a 64-bit machine, allocate from a single contiguous reservation.
302 // 16 GB should be big enough for now.
304 // The code will work with the reservation at any address, but ask
305 // SysReserve to use 0x000000f800000000 if possible.
306 // Allocating a 16 GB region takes away 36 bits, and the amd64
307 // doesn't let us choose the top 17 bits, so that leaves the 11 bits
308 // in the middle of 0x00f8 for us to choose. Choosing 0x00f8 means
309 // that the valid memory addresses will begin 0x00f8, 0x00f9, 0x00fa, 0x00fb.
310 // None of the bytes f8 f9 fa fb can appear in valid UTF-8, and
311 // they are otherwise as far from ff (likely a common byte) as possible.
312 // Choosing 0x00 for the leading 6 bits was more arbitrary, but it
313 // is not a common ASCII code point either. Using 0x11f8 instead
314 // caused out of memory errors on OS X during thread allocations.
315 // These choices are both for debuggability and to reduce the
316 // odds of the conservative garbage collector not collecting memory
317 // because some non-pointer block of memory had a bit pattern
318 // that matched a memory address.
320 // Actually we reserve 17 GB (because the bitmap ends up being 1 GB)
321 // but it hardly matters: fc is not valid UTF-8 either, and we have to
322 // allocate 15 GB before we get that far.
324 // If this fails we fall back to the 32 bit memory mechanism
325 arena_size = (uintptr)(16LL<<30);
326 bitmap_size = arena_size / (sizeof(void*)*8/4);
327 p = runtime_SysReserve((void*)(0x00f8ULL<<32), bitmap_size + arena_size);
330 // On a 32-bit machine, we can't typically get away
331 // with a giant virtual address space reservation.
332 // Instead we map the memory information bitmap
333 // immediately after the data segment, large enough
334 // to handle another 2GB of mappings (256 MB),
335 // along with a reservation for another 512 MB of memory.
336 // When that gets used up, we'll start asking the kernel
337 // for any memory anywhere and hope it's in the 2GB
338 // following the bitmap (presumably the executable begins
339 // near the bottom of memory, so we'll have to use up
340 // most of memory before the kernel resorts to giving out
341 // memory before the beginning of the text segment).
343 // Alternatively we could reserve 512 MB bitmap, enough
344 // for 4GB of mappings, and then accept any memory the
345 // kernel threw at us, but normally that's a waste of 512 MB
346 // of address space, which is probably too much in a 32-bit world.
347 bitmap_size = MaxArena32 / (sizeof(void*)*8/4);
348 arena_size = 512<<20;
349 if(limit > 0 && arena_size+bitmap_size > limit) {
350 bitmap_size = (limit / 9) & ~((1<<PageShift) - 1);
351 arena_size = bitmap_size * 8;
354 // SysReserve treats the address we ask for, end, as a hint,
355 // not as an absolute requirement. If we ask for the end
356 // of the data segment but the operating system requires
357 // a little more space before we can start allocating, it will
358 // give out a slightly higher pointer. Except QEMU, which
359 // is buggy, as usual: it won't adjust the pointer upward.
360 // So adjust it upward a little bit ourselves: 1/4 MB to get
361 // away from the running binary image and then round up
363 want = (byte*)(((uintptr)end + (1<<18) + (1<<20) - 1)&~((1<<20)-1));
364 if(0xffffffff - (uintptr)want <= bitmap_size + arena_size)
366 p = runtime_SysReserve(want, bitmap_size + arena_size);
368 runtime_throw("runtime: cannot reserve arena virtual address space");
369 if((uintptr)p & (((uintptr)1<<PageShift)-1))
370 runtime_printf("runtime: SysReserve returned unaligned address %p; asked for %p", p, (void*)(bitmap_size+arena_size));
372 if((uintptr)p & (((uintptr)1<<PageShift)-1))
373 runtime_throw("runtime: SysReserve returned unaligned address");
375 runtime_mheap.bitmap = p;
376 runtime_mheap.arena_start = p + bitmap_size;
377 runtime_mheap.arena_used = runtime_mheap.arena_start;
378 runtime_mheap.arena_end = runtime_mheap.arena_start + arena_size;
380 // Initialize the rest of the allocator.
381 runtime_MHeap_Init(&runtime_mheap, runtime_SysAlloc);
382 runtime_m()->mcache = runtime_allocmcache();
385 runtime_free(runtime_malloc(1));
389 runtime_MHeap_SysAlloc(MHeap *h, uintptr n)
394 if(n > (uintptr)(h->arena_end - h->arena_used)) {
395 // We are in 32-bit mode, maybe we didn't use all possible address space yet.
396 // Reserve some more space.
400 needed = (uintptr)h->arena_used + n - (uintptr)h->arena_end;
401 // Round wanted arena size to a multiple of 256MB.
402 needed = (needed + (256<<20) - 1) & ~((256<<20)-1);
403 new_end = h->arena_end + needed;
404 if(new_end <= h->arena_start + MaxArena32) {
405 p = runtime_SysReserve(h->arena_end, new_end - h->arena_end);
406 if(p == h->arena_end)
407 h->arena_end = new_end;
410 if(n <= (uintptr)(h->arena_end - h->arena_used)) {
411 // Keep taking from our reservation.
413 runtime_SysMap(p, n);
415 runtime_MHeap_MapBits(h);
419 // If using 64-bit, our reservation is all we have.
420 if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU)
423 // On 32-bit, once the reservation is gone we can
424 // try to get memory at a location chosen by the OS
425 // and hope that it is in the range we allocated bitmap for.
426 p = runtime_SysAlloc(n);
430 if(p < h->arena_start || (uintptr)(p+n - h->arena_start) >= MaxArena32) {
431 runtime_printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n",
432 p, h->arena_start, h->arena_start+MaxArena32);
433 runtime_SysFree(p, n);
437 if(p+n > h->arena_used) {
439 if(h->arena_used > h->arena_end)
440 h->arena_end = h->arena_used;
441 runtime_MHeap_MapBits(h);
450 runtime_mal(uintptr n)
452 return runtime_mallocgc(n, 0, 1, 1);
455 func new(typ *Type) (ret *uint8) {
456 uint32 flag = typ->__code&GO_NO_POINTERS ? FlagNoPointers : 0;
457 ret = runtime_mallocgc(typ->__size, flag, 1, 1);
464 func SetFinalizer(obj Eface, finalizer Eface) {
469 if(obj.__type_descriptor == nil) {
470 // runtime·printf("runtime.SetFinalizer: first argument is nil interface\n");
473 if(obj.__type_descriptor->__code != GO_PTR) {
474 // runtime_printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
477 if(!runtime_mlookup(obj.__object, &base, &size, nil) || obj.__object != base) {
478 // runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
482 if(finalizer.__type_descriptor != nil) {
483 if(finalizer.__type_descriptor->__code != GO_FUNC)
485 ft = (const FuncType*)finalizer.__type_descriptor;
486 if(ft->__dotdotdot || ft->__in.__count != 1 || !__go_type_descriptors_equal(*(Type**)ft->__in.__values, obj.__type_descriptor))
490 if(!runtime_addfinalizer(obj.__object, finalizer.__type_descriptor != nil ? *(void**)finalizer.__object : nil, ft)) {
491 runtime_printf("runtime.SetFinalizer: finalizer already set\n");
497 // runtime_printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.type->string, *obj.type->string);
499 runtime_throw("runtime.SetFinalizer");