// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // See malloc.h for overview. // // TODO(rsc): double-check stats. package runtime #include #include #include #include "go-alloc.h" #include "runtime.h" #include "malloc.h" #include "go-string.h" #include "interface.h" #include "go-type.h" typedef struct __go_empty_interface Eface; typedef struct __go_type_descriptor Type; typedef struct __go_func_type FuncType; MHeap runtime_mheap; extern MStats mstats; // defined in extern.go extern volatile int32 runtime_MemProfileRate __asm__ ("libgo_runtime.runtime.MemProfileRate"); // Same algorithm from chan.c, but a different // instance of the static uint32 x. // Not protected by a lock - let the threads use // the same random number if they like. static uint32 fastrand1(void) { static uint32 x = 0x49f6428aUL; x += x; if(x & 0x80000000L) x ^= 0x88888eefUL; return x; } // Allocate an object of at least size bytes. // Small objects are allocated from the per-thread cache's free lists. // Large objects (> 32 kB) are allocated straight from the heap. void* runtime_mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed) { int32 sizeclass, rate; MCache *c; uintptr npages; MSpan *s; void *v; if(!__sync_bool_compare_and_swap(&m->mallocing, 0, 1)) runtime_throw("malloc/free - deadlock"); if(size == 0) size = 1; mstats.nmalloc++; if(size <= MaxSmallSize) { // Allocate from mcache free lists. sizeclass = runtime_SizeToClass(size); size = runtime_class_to_size[sizeclass]; c = m->mcache; v = runtime_MCache_Alloc(c, sizeclass, size, zeroed); if(v == nil) runtime_throw("out of memory"); mstats.alloc += size; mstats.total_alloc += size; mstats.by_size[sizeclass].nmalloc++; } else { // TODO(rsc): Report tracebacks for very large allocations. // Allocate directly from heap. npages = size >> PageShift; if((size & PageMask) != 0) npages++; s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, 1); if(s == nil) runtime_throw("out of memory"); size = npages<start << PageShift); // setup for mark sweep runtime_markspan(v, 0, 0, true); } if(!(flag & FlagNoGC)) runtime_markallocated(v, size, (flag&FlagNoPointers) != 0); __sync_bool_compare_and_swap(&m->mallocing, 1, 0); if(__sync_bool_compare_and_swap(&m->gcing, 1, 0)) { if(!(flag & FlagNoProfiling)) __go_run_goroutine_gc(0); else { // We are being called from the profiler. Tell it // to invoke the garbage collector when it is // done. No need to use a sync function here. m->gcing_for_prof = 1; } } if(!(flag & FlagNoProfiling) && (rate = runtime_MemProfileRate) > 0) { if(size >= (uint32) rate) goto profile; if((uint32) m->mcache->next_sample > size) m->mcache->next_sample -= size; else { // pick next profile time if(rate > 0x3fffffff) // make 2*rate not overflow rate = 0x3fffffff; m->mcache->next_sample = fastrand1() % (2*rate); profile: runtime_setblockspecial(v); runtime_MProf_Malloc(v, size); } } if(dogc && mstats.heap_alloc >= mstats.next_gc) runtime_gc(0); return v; } void* __go_alloc(uintptr size) { return runtime_mallocgc(size, 0, 0, 1); } // Free the object whose base pointer is v. void __go_free(void *v) { int32 sizeclass; MSpan *s; MCache *c; uint32 prof; uintptr size; if(v == nil) return; // If you change this also change mgc0.c:/^sweepspan, // which has a copy of the guts of free. if(!__sync_bool_compare_and_swap(&m->mallocing, 0, 1)) runtime_throw("malloc/free - deadlock"); if(!runtime_mlookup(v, nil, nil, &s)) { // runtime_printf("free %p: not an allocated block\n", v); runtime_throw("free runtime_mlookup"); } prof = runtime_blockspecial(v); // Find size class for v. sizeclass = s->sizeclass; if(sizeclass == 0) { // Large object. size = s->npages<start<mcache; size = runtime_class_to_size[sizeclass]; if(size > (int32)sizeof(uintptr)) ((uintptr*)v)[1] = 1; // mark as "needs to be zeroed" // Must mark v freed before calling MCache_Free: // it might coalesce v and other blocks into a bigger span // and change the bitmap further. runtime_markfreed(v, size); mstats.by_size[sizeclass].nfree++; runtime_MCache_Free(c, v, sizeclass, size); } mstats.alloc -= size; if(prof) runtime_MProf_Free(v, size); __sync_bool_compare_and_swap(&m->mallocing, 1, 0); if(__sync_bool_compare_and_swap(&m->gcing, 1, 0)) __go_run_goroutine_gc(1); } int32 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **sp) { uintptr n, i; byte *p; MSpan *s; mstats.nlookup++; s = runtime_MHeap_LookupMaybe(&runtime_mheap, v); if(sp) *sp = s; if(s == nil) { runtime_checkfreed(v, 1); if(base) *base = nil; if(size) *size = 0; return 0; } p = (byte*)((uintptr)s->start<sizeclass == 0) { // Large object. if(base) *base = p; if(size) *size = s->npages<= (byte*)s->limit) { // pointers past the last block do not count as pointers. return 0; } n = runtime_class_to_size[s->sizeclass]; i = ((byte*)v - p)/n; if(base) *base = p + i*n; if(size) *size = n; return 1; } MCache* runtime_allocmcache(void) { MCache *c; if(!__sync_bool_compare_and_swap(&m->mallocing, 0, 1)) runtime_throw("allocmcache - deadlock"); runtime_lock(&runtime_mheap); c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc); // Clear the free list used by FixAlloc; assume the rest is zeroed. c->list[0].list = nil; mstats.mcache_inuse = runtime_mheap.cachealloc.inuse; mstats.mcache_sys = runtime_mheap.cachealloc.sys; runtime_unlock(&runtime_mheap); __sync_bool_compare_and_swap(&m->mallocing, 1, 0); if(__sync_bool_compare_and_swap(&m->gcing, 1, 0)) __go_run_goroutine_gc(2); return c; } extern int32 runtime_sizeof_C_MStats __asm__ ("libgo_runtime.runtime.Sizeof_C_MStats"); #define MaxArena32 (2U<<30) void runtime_mallocinit(void) { byte *p; uintptr arena_size, bitmap_size; extern byte end[]; runtime_sizeof_C_MStats = sizeof(MStats); runtime_InitSizes(); // Set up the allocation arena, a contiguous area of memory where // allocated data will be found. The arena begins with a bitmap large // enough to hold 4 bits per allocated word. if(sizeof(void*) == 8) { // On a 64-bit machine, allocate from a single contiguous reservation. // 16 GB should be big enough for now. // // The code will work with the reservation at any address, but ask // SysReserve to use 0x000000f800000000 if possible. // Allocating a 16 GB region takes away 36 bits, and the amd64 // doesn't let us choose the top 17 bits, so that leaves the 11 bits // in the middle of 0x00f8 for us to choose. Choosing 0x00f8 means // that the valid memory addresses will begin 0x00f8, 0x00f9, 0x00fa, 0x00fb. // None of the bytes f8 f9 fa fb can appear in valid UTF-8, and // they are otherwise as far from ff (likely a common byte) as possible. // Choosing 0x00 for the leading 6 bits was more arbitrary, but it // is not a common ASCII code point either. Using 0x11f8 instead // caused out of memory errors on OS X during thread allocations. // These choices are both for debuggability and to reduce the // odds of the conservative garbage collector not collecting memory // because some non-pointer block of memory had a bit pattern // that matched a memory address. // // Actually we reserve 17 GB (because the bitmap ends up being 1 GB) // but it hardly matters: fc is not valid UTF-8 either, and we have to // allocate 15 GB before we get that far. arena_size = (uintptr)(16LL<<30); bitmap_size = arena_size / (sizeof(void*)*8/4); p = runtime_SysReserve((void*)(0x00f8ULL<<32), bitmap_size + arena_size); if(p == nil) runtime_throw("runtime: cannot reserve arena virtual address space"); } else { // On a 32-bit machine, we can't typically get away // with a giant virtual address space reservation. // Instead we map the memory information bitmap // immediately after the data segment, large enough // to handle another 2GB of mappings (256 MB), // along with a reservation for another 512 MB of memory. // When that gets used up, we'll start asking the kernel // for any memory anywhere and hope it's in the 2GB // following the bitmap (presumably the executable begins // near the bottom of memory, so we'll have to use up // most of memory before the kernel resorts to giving out // memory before the beginning of the text segment). // // Alternatively we could reserve 512 MB bitmap, enough // for 4GB of mappings, and then accept any memory the // kernel threw at us, but normally that's a waste of 512 MB // of address space, which is probably too much in a 32-bit world. bitmap_size = MaxArena32 / (sizeof(void*)*8/4); arena_size = 512<<20; // SysReserve treats the address we ask for, end, as a hint, // not as an absolute requirement. If we ask for the end // of the data segment but the operating system requires // a little more space before we can start allocating, it will // give out a slightly higher pointer. That's fine. // Run with what we get back. p = runtime_SysReserve(end, bitmap_size + arena_size); if(p == nil) runtime_throw("runtime: cannot reserve arena virtual address space"); } if((uintptr)p & (((uintptr)1<mcache = runtime_allocmcache(); // See if it works. runtime_free(runtime_malloc(1)); } void* runtime_MHeap_SysAlloc(MHeap *h, uintptr n) { byte *p; if(n <= (uintptr)(h->arena_end - h->arena_used)) { // Keep taking from our reservation. p = h->arena_used; runtime_SysMap(p, n); h->arena_used += n; runtime_MHeap_MapBits(h); return p; } // On 64-bit, our reservation is all we have. if(sizeof(void*) == 8) return nil; // On 32-bit, once the reservation is gone we can // try to get memory at a location chosen by the OS // and hope that it is in the range we allocated bitmap for. p = runtime_SysAlloc(n); if(p == nil) return nil; if(p < h->arena_start || (uintptr)(p+n - h->arena_start) >= MaxArena32) { runtime_printf("runtime: memory allocated by OS not in usable range"); runtime_SysFree(p, n); return nil; } if(p+n > h->arena_used) { h->arena_used = p+n; if(h->arena_used > h->arena_end) h->arena_end = h->arena_used; runtime_MHeap_MapBits(h); } return p; } // Runtime stubs. void* runtime_mal(uintptr n) { return runtime_mallocgc(n, 0, 1, 1); } func new(n uint32) (ret *uint8) { ret = runtime_mal(n); } func Alloc(n uintptr) (p *byte) { p = runtime_malloc(n); } func Free(p *byte) { runtime_free(p); } func Lookup(p *byte) (base *byte, size uintptr) { runtime_mlookup(p, &base, &size, nil); } func GC() { runtime_gc(1); } func SetFinalizer(obj Eface, finalizer Eface) { byte *base; uintptr size; const FuncType *ft; if(obj.__type_descriptor == nil) { // runtime_printf("runtime.SetFinalizer: first argument is nil interface\n"); throw: runtime_throw("runtime.SetFinalizer"); } if(obj.__type_descriptor->__code != GO_PTR) { // runtime_printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string); goto throw; } if(!runtime_mlookup(obj.__object, &base, &size, nil) || obj.__object != base) { // runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n"); goto throw; } ft = nil; if(finalizer.__type_descriptor != nil) { if(finalizer.__type_descriptor->__code != GO_FUNC) { badfunc: // runtime_printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.type->string, *obj.type->string); goto throw; } ft = (const FuncType*)finalizer.__type_descriptor; if(ft->__dotdotdot || ft->__in.__count != 1 || !__go_type_descriptors_equal(*(Type**)ft->__in.__values, obj.__type_descriptor)) goto badfunc; if(runtime_getfinalizer(obj.__object, 0)) { // runtime_printf("runtime.SetFinalizer: finalizer already set"); goto throw; } } runtime_addfinalizer(obj.__object, finalizer.__type_descriptor != nil ? *(void**)finalizer.__object : nil, ft); }