1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
17 #ifdef USING_SPLIT_STACK
19 /* FIXME: These are not declared anywhere. */
21 extern void __splitstack_getcontext(void *context[10]);
23 extern void __splitstack_setcontext(void *context[10]);
25 extern void *__splitstack_makecontext(size_t, void *context[10], size_t *);
27 extern void * __splitstack_resetcontext(void *context[10], size_t *);
29 extern void *__splitstack_find(void *, void *, size_t *, void **, void **,
34 #if defined(USING_SPLIT_STACK) && defined(LINKER_SUPPORTS_SPLIT_STACK)
35 # ifdef PTHREAD_STACK_MIN
36 # define StackMin PTHREAD_STACK_MIN
38 # define StackMin 8192
41 # define StackMin 2 * 1024 * 1024
44 static void schedule(G*);
45 static M *startm(void);
47 typedef struct Sched Sched;
50 G runtime_g0; // idle goroutine for m0
59 // We can not always refer to the TLS variables directly. The
60 // compiler will call tls_get_addr to get the address of the variable,
61 // and it may hold it in a register across a call to schedule. When
62 // we get back from the call we may be running in a different thread,
63 // in which case the register now points to the TLS variable for a
64 // different thread. We use non-inlinable functions to avoid this
67 G* runtime_g(void) __attribute__ ((noinline, no_split_stack));
75 M* runtime_m(void) __attribute__ ((noinline, no_split_stack));
83 int32 runtime_gcwaiting;
87 // The go scheduler's job is to match ready-to-run goroutines (`g's)
88 // with waiting-for-work schedulers (`m's). If there are ready g's
89 // and no waiting m's, ready() will start a new m running in a new
90 // OS thread, so that all ready g's can run simultaneously, up to a limit.
91 // For now, m's never go away.
93 // By default, Go keeps only one kernel thread (m) running user code
94 // at a single time; other threads may be blocked in the operating system.
95 // Setting the environment variable $GOMAXPROCS or calling
96 // runtime.GOMAXPROCS() will change the number of user threads
97 // allowed to execute simultaneously. $GOMAXPROCS is thus an
98 // approximation of the maximum number of cores to use.
100 // Even a program that can run without deadlock in a single process
101 // might use more m's if given the chance. For example, the prime
102 // sieve will use as many m's as there are primes (up to runtime_sched.mmax),
103 // allowing different stages of the pipeline to execute in parallel.
104 // We could revisit this choice, only kicking off new m's for blocking
105 // system calls, but that would limit the amount of parallel computation
106 // that go would try to do.
108 // In general, one could imagine all sorts of refinements to the
109 // scheduler, but the goal now is just to get something working on
115 G *gfree; // available g's (status == Gdead)
118 G *ghead; // g's waiting to run
120 int32 gwait; // number of g's waiting to run
121 int32 gcount; // number of g's that are alive
122 int32 grunning; // number of g's running on cpu or in syscall
124 M *mhead; // m's waiting for work
125 int32 mwait; // number of m's waiting for work
126 int32 mcount; // number of m's that have been created
128 volatile uint32 atomic; // atomic scheduling word (see below)
130 int32 profilehz; // cpu profiling rate
132 Note stopped; // one g can set waitstop and wait here for m's to stop
135 // The atomic word in sched is an atomic uint32 that
136 // holds these fields.
138 // [15 bits] mcpu number of m's executing on cpu
139 // [15 bits] mcpumax max number of m's allowed on cpu
140 // [1 bit] waitstop some g is waiting on stopped
141 // [1 bit] gwaiting gwait != 0
143 // These fields are the information needed by entersyscall
144 // and exitsyscall to decide whether to coordinate with the
145 // scheduler. Packing them into a single machine word lets
146 // them use a fast path with a single atomic read/write and
147 // no lock/unlock. This greatly reduces contention in
148 // syscall- or cgo-heavy multithreaded programs.
150 // Except for entersyscall and exitsyscall, the manipulations
151 // to these fields only happen while holding the schedlock,
152 // so the routines holding schedlock only need to worry about
153 // what entersyscall and exitsyscall do, not the other routines
154 // (which also use the schedlock).
156 // In particular, entersyscall and exitsyscall only read mcpumax,
157 // waitstop, and gwaiting. They never write them. Thus, writes to those
158 // fields can be done (holding schedlock) without fear of write conflicts.
159 // There may still be logic conflicts: for example, the set of waitstop must
160 // be conditioned on mcpu >= mcpumax or else the wait may be a
161 // spurious sleep. The Promela model in proc.p verifies these accesses.
164 mcpuMask = (1<<mcpuWidth) - 1,
166 mcpumaxShift = mcpuShift + mcpuWidth,
167 waitstopShift = mcpumaxShift + mcpuWidth,
168 gwaitingShift = waitstopShift+1,
170 // The max value of GOMAXPROCS is constrained
171 // by the max value we can store in the bit fields
172 // of the atomic word. Reserve a few high values
173 // so that we can detect accidental decrement
175 maxgomaxprocs = mcpuMask - 10,
178 #define atomic_mcpu(v) (((v)>>mcpuShift)&mcpuMask)
179 #define atomic_mcpumax(v) (((v)>>mcpumaxShift)&mcpuMask)
180 #define atomic_waitstop(v) (((v)>>waitstopShift)&1)
181 #define atomic_gwaiting(v) (((v)>>gwaitingShift)&1)
184 int32 runtime_gomaxprocs;
185 bool runtime_singleproc;
187 static bool canaddmcpu(void);
189 // An m that is waiting for notewakeup(&m->havenextg). This may
190 // only be accessed while the scheduler lock is held. This is used to
191 // minimize the number of times we call notewakeup while the scheduler
192 // lock is held, since the m will normally move quickly to lock the
193 // scheduler itself, producing lock contention.
196 // Scheduling helpers. Sched must be locked.
197 static void gput(G*); // put/get on ghead/gtail
198 static G* gget(void);
199 static void mput(M*); // put/get on mhead
201 static void gfput(G*); // put/get on gfree
202 static G* gfget(void);
203 static void matchmg(void); // match m's to g's
204 static void readylocked(G*); // ready, but sched is locked
205 static void mnextg(M*, G*);
206 static void mcommoninit(M*);
214 v = runtime_sched.atomic;
216 w &= ~(mcpuMask<<mcpumaxShift);
217 w |= n<<mcpumaxShift;
218 if(runtime_cas(&runtime_sched.atomic, v, w))
223 // First function run by a new goroutine. This replaces gogocall.
229 fn = (void (*)(void*))(g->entry);
234 // Switch context to a different goroutine. This is like longjmp.
235 static void runtime_gogo(G*) __attribute__ ((noinline));
237 runtime_gogo(G* newg)
239 #ifdef USING_SPLIT_STACK
240 __splitstack_setcontext(&newg->stack_context[0]);
243 newg->fromgogo = true;
244 setcontext(&newg->context);
247 // Save context and call fn passing g as a parameter. This is like
248 // setjmp. Because getcontext always returns 0, unlike setjmp, we use
249 // g->fromgogo as a code. It will be true if we got here via
250 // setcontext. g == nil the first time this is called in a new m.
251 static void runtime_mcall(void (*)(G*)) __attribute__ ((noinline));
253 runtime_mcall(void (*pfn)(G*))
255 #ifndef USING_SPLIT_STACK
259 // Ensure that all registers are on the stack for the garbage
261 __builtin_unwind_init();
264 runtime_throw("runtime: mcall called on m->g0 stack");
268 #ifdef USING_SPLIT_STACK
269 __splitstack_getcontext(&g->stack_context[0]);
274 getcontext(&g->context);
276 if (g == nil || !g->fromgogo) {
277 #ifdef USING_SPLIT_STACK
278 __splitstack_setcontext(&m->g0->stack_context[0]);
280 m->g0->entry = (byte*)pfn;
283 setcontext(&m->g0->context);
284 runtime_throw("runtime: mcall function returned");
288 // The bootstrap sequence is:
292 // make & queue new G
293 // call runtime_mstart
297 // call main_init_function
301 runtime_schedinit(void)
313 runtime_mallocinit();
320 // Allocate internal symbol table representation now,
321 // so that we don't need to call malloc when we crash.
322 // runtime_findfunc(0);
324 runtime_gomaxprocs = 1;
325 p = runtime_getenv("GOMAXPROCS");
326 if(p != nil && (n = runtime_atoi(p)) != 0) {
327 if(n > maxgomaxprocs)
329 runtime_gomaxprocs = n;
331 setmcpumax(runtime_gomaxprocs);
332 runtime_singleproc = runtime_gomaxprocs == 1;
334 canaddmcpu(); // mcpu++ to account for bootstrap m
335 m->helpgc = 1; // flag to tell schedule() to mcpu--
336 runtime_sched.grunning++;
338 // Can not enable GC until all roots are registered.
339 // mstats.enablegc = 1;
343 // Lock the scheduler.
347 runtime_lock(&runtime_sched);
350 // Unlock the scheduler.
358 runtime_unlock(&runtime_sched);
360 runtime_notewakeup(&m->havenextg);
366 g->status = Gmoribund;
371 runtime_goroutineheader(G *g)
390 status = g->waitreason;
401 runtime_printf("goroutine %d [%s]:\n", g->goid, status);
405 runtime_tracebackothers(G *me)
409 for(g = runtime_allg; g != nil; g = g->alllink) {
410 if(g == me || g->status == Gdead)
412 runtime_printf("\n");
413 runtime_goroutineheader(g);
414 // runtime_traceback(g->sched.pc, g->sched.sp, 0, g);
418 // Mark this g as m's idle goroutine.
419 // This functionality might be used in environments where programs
420 // are limited to a single thread, to simulate a select-driven
421 // network server. It is not exposed via the standard runtime API.
423 runtime_idlegoroutine(void)
426 runtime_throw("g is already an idle goroutine");
433 // Add to runtime_allm so garbage collector doesn't free m
434 // when it is just in a register or thread-local storage.
435 m->alllink = runtime_allm;
436 // runtime_Cgocalls() iterates over allm w/o schedlock,
437 // so we need to publish it safely.
438 runtime_atomicstorep((void**)&runtime_allm, m);
440 m->id = runtime_sched.mcount++;
441 m->fastrand = 0x49f6428aUL + m->id;
444 m->mcache = runtime_allocmcache();
447 // Try to increment mcpu. Report whether succeeded.
454 v = runtime_sched.atomic;
455 if(atomic_mcpu(v) >= atomic_mcpumax(v))
457 if(runtime_cas(&runtime_sched.atomic, v, v+(1<<mcpuShift)))
462 // Put on `g' queue. Sched must be locked.
468 // If g is wired, hand it off directly.
469 if((m = g->lockedm) != nil && canaddmcpu()) {
474 // If g is the idle goroutine for an m, hand it off.
475 if(g->idlem != nil) {
476 if(g->idlem->idleg != nil) {
477 runtime_printf("m%d idle out of sync: g%d g%d\n",
479 g->idlem->idleg->goid, g->goid);
480 runtime_throw("runtime: double idle");
487 if(runtime_sched.ghead == nil)
488 runtime_sched.ghead = g;
490 runtime_sched.gtail->schedlink = g;
491 runtime_sched.gtail = g;
494 // if it transitions to nonzero, set atomic gwaiting bit.
495 if(runtime_sched.gwait++ == 0)
496 runtime_xadd(&runtime_sched.atomic, 1<<gwaitingShift);
499 // Report whether gget would return something.
503 return runtime_sched.ghead != nil || m->idleg != nil;
506 // Get from `g' queue. Sched must be locked.
512 g = runtime_sched.ghead;
514 runtime_sched.ghead = g->schedlink;
515 if(runtime_sched.ghead == nil)
516 runtime_sched.gtail = nil;
518 // if it transitions to zero, clear atomic gwaiting bit.
519 if(--runtime_sched.gwait == 0)
520 runtime_xadd(&runtime_sched.atomic, -1<<gwaitingShift);
521 } else if(m->idleg != nil) {
528 // Put on `m' list. Sched must be locked.
532 m->schedlink = runtime_sched.mhead;
533 runtime_sched.mhead = m;
534 runtime_sched.mwait++;
537 // Get an `m' to run `g'. Sched must be locked.
543 // if g has its own m, use it.
544 if(g && (m = g->lockedm) != nil)
547 // otherwise use general m pool.
548 if((m = runtime_sched.mhead) != nil){
549 runtime_sched.mhead = m->schedlink;
550 runtime_sched.mwait--;
555 // Mark g ready to run.
564 // Mark g ready to run. Sched is already locked.
565 // G might be running already and about to stop.
566 // The sched lock protects g->status from changing underfoot.
571 // Running on another machine.
572 // Ready it when it stops.
578 if(g->status == Grunnable || g->status == Grunning) {
579 runtime_printf("goroutine %d has status %d\n", g->goid, g->status);
580 runtime_throw("bad g->status in ready");
582 g->status = Grunnable;
588 // Same as readylocked but a different symbol so that
589 // debuggers can set a breakpoint here and catch all
592 newprocreadylocked(G *g)
597 // Pass g to m for running.
598 // Caller has already incremented mcpu.
602 runtime_sched.grunning++;
607 runtime_notewakeup(&mwakeup->havenextg);
612 // Get the next goroutine that m should run.
613 // Sched must be locked on entry, is unlocked on exit.
614 // Makes sure that at most $GOMAXPROCS g's are
615 // running on cpus (not in system calls) at any given time.
623 if(atomic_mcpu(runtime_sched.atomic) >= maxgomaxprocs)
624 runtime_throw("negative mcpu");
626 // If there is a g waiting as m->nextg, the mcpu++
627 // happened before it was passed to mnextg.
628 if(m->nextg != nil) {
635 if(m->lockedg != nil) {
636 // We can only run one g, and it's not available.
637 // Make sure some other cpu is running to handle
638 // the ordinary run queue.
639 if(runtime_sched.gwait != 0) {
641 // m->lockedg might have been on the queue.
642 if(m->nextg != nil) {
650 // Look for work on global queue.
651 while(haveg() && canaddmcpu()) {
654 runtime_throw("gget inconsistency");
657 mnextg(gp->lockedm, gp);
660 runtime_sched.grunning++;
665 // The while loop ended either because the g queue is empty
666 // or because we have maxed out our m procs running go
667 // code (mcpu >= mcpumax). We need to check that
668 // concurrent actions by entersyscall/exitsyscall cannot
669 // invalidate the decision to end the loop.
671 // We hold the sched lock, so no one else is manipulating the
672 // g queue or changing mcpumax. Entersyscall can decrement
673 // mcpu, but if does so when there is something on the g queue,
674 // the gwait bit will be set, so entersyscall will take the slow path
675 // and use the sched lock. So it cannot invalidate our decision.
677 // Wait on global m queue.
681 v = runtime_atomicload(&runtime_sched.atomic);
682 if(runtime_sched.grunning == 0)
683 runtime_throw("all goroutines are asleep - deadlock!");
686 runtime_noteclear(&m->havenextg);
688 // Stoptheworld is waiting for all but its cpu to go to stop.
689 // Entersyscall might have decremented mcpu too, but if so
690 // it will see the waitstop and take the slow path.
691 // Exitsyscall never increments mcpu beyond mcpumax.
692 if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
693 // set waitstop = 0 (known to be 1)
694 runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift);
695 runtime_notewakeup(&runtime_sched.stopped);
699 runtime_notesleep(&m->havenextg);
703 runtime_lock(&runtime_sched);
706 if((gp = m->nextg) == nil)
707 runtime_throw("bad m->nextg in nextgoroutine");
713 runtime_helpgc(bool *extra)
718 // Figure out how many CPUs to use.
719 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
720 max = runtime_gomaxprocs;
721 if(max > runtime_ncpu)
722 max = runtime_ncpu > 0 ? runtime_ncpu : 1;
726 // We're going to use one CPU no matter what.
727 // Figure out the max number of additional CPUs.
730 runtime_lock(&runtime_sched);
732 while(n < max && (mp = mget(nil)) != nil) {
736 runtime_notewakeup(&mp->havenextg);
738 runtime_unlock(&runtime_sched);
745 runtime_stoptheworld(void)
750 runtime_gcwaiting = 1;
756 v = runtime_sched.atomic;
757 if(atomic_mcpu(v) <= 1)
760 // It would be unsafe for multiple threads to be using
761 // the stopped note at once, but there is only
762 // ever one thread doing garbage collection.
763 runtime_noteclear(&runtime_sched.stopped);
764 if(atomic_waitstop(v))
765 runtime_throw("invalid waitstop");
767 // atomic { waitstop = 1 }, predicated on mcpu <= 1 check above
769 if(!runtime_cas(&runtime_sched.atomic, v, v+(1<<waitstopShift)))
773 runtime_notesleep(&runtime_sched.stopped);
776 runtime_singleproc = runtime_gomaxprocs == 1;
781 runtime_starttheworld(bool extra)
786 runtime_gcwaiting = 0;
787 setmcpumax(runtime_gomaxprocs);
789 if(extra && canaddmcpu()) {
790 // Start a new m that will (we hope) be idle
791 // and so available to help when the next
792 // garbage collection happens.
793 // canaddmcpu above did mcpu++
794 // (necessary, because m will be doing various
795 // initialization work so is definitely running),
796 // but m is not running a specific goroutine,
797 // so set the helpgc flag as a signal to m's
798 // first schedule(nil) to mcpu-- and grunning--.
801 runtime_sched.grunning++;
806 // Called to start an M.
808 runtime_mstart(void* mp)
816 // Record top of stack for use by mcall.
817 // Once we call schedule we're never coming back,
818 // so other calls can reuse this stack space.
819 #ifdef USING_SPLIT_STACK
820 __splitstack_getcontext(&g->stack_context[0]);
822 g->gcinitial_sp = ∓
823 g->gcstack_size = StackMin;
826 getcontext(&g->context);
828 if(g->entry != nil) {
829 // Got here from mcall.
830 void (*pfn)(G*) = (void (*)(G*))g->entry;
831 G* gp = (G*)g->param;
840 typedef struct CgoThreadStart CgoThreadStart;
841 struct CgoThreadStart
848 // Kick off new m's as needed (up to mcpumax).
849 // There are already `other' other cpus that will
850 // start looking for goroutines shortly.
858 if(m->mallocing || m->gcing)
861 while(haveg() && canaddmcpu()) {
864 runtime_throw("gget inconsistency");
866 // Find the m that will run gp.
867 if((mp = mget(gp)) == nil)
880 m = runtime_malloc(sizeof(M));
882 m->g0 = runtime_malg(-1, nil, nil);
884 if(pthread_attr_init(&attr) != 0)
885 runtime_throw("pthread_attr_init");
886 if(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) != 0)
887 runtime_throw("pthread_attr_setdetachstate");
889 #ifndef PTHREAD_STACK_MIN
890 #define PTHREAD_STACK_MIN 8192
892 if(pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN) != 0)
893 runtime_throw("pthread_attr_setstacksize");
895 if(pthread_create(&tid, &attr, runtime_mstart, m) != 0)
896 runtime_throw("pthread_create");
901 // One round of scheduler: find a goroutine and run it.
902 // The argument is the goroutine that was running before
903 // schedule was called, or nil if this is the first call.
913 // Just finished running gp.
915 runtime_sched.grunning--;
918 v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift);
919 if(atomic_mcpu(v) > maxgomaxprocs)
920 runtime_throw("negative mcpu in scheduler");
925 // Shouldn't have been running!
926 runtime_throw("bad gp->status in sched");
928 gp->status = Grunnable;
939 if(--runtime_sched.gcount == 0)
947 } else if(m->helpgc) {
948 // Bootstrap m or new m started by starttheworld.
950 v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift);
951 if(atomic_mcpu(v) > maxgomaxprocs)
952 runtime_throw("negative mcpu in scheduler");
953 // Compensate for increment in starttheworld().
954 runtime_sched.grunning--;
956 } else if(m->nextg != nil) {
957 // New m started by matchmg.
959 runtime_throw("invalid m state in scheduler");
962 // Find (or wait for) g to run. Unlocks runtime_sched.
963 gp = nextgandunlock();
965 gp->status = Grunning;
969 // Check whether the profiler needs to be turned on or off.
970 hz = runtime_sched.profilehz;
971 if(m->profilehz != hz)
972 runtime_resetcpuprofiler(hz);
977 // Enter scheduler. If g->status is Grunning,
978 // re-queues g and runs everyone else who is waiting
979 // before running g again. If g->status is Gmoribund,
982 runtime_gosched(void)
985 runtime_throw("gosched holding locks");
987 runtime_throw("gosched of g0");
988 runtime_mcall(schedule);
991 // The goroutine g is about to enter a system call.
992 // Record that it's not using the cpu anymore.
993 // This is called only from the go syscall library and cgocall,
994 // not from the low-level system calls used by the runtime.
996 // Entersyscall cannot split the stack: the runtime_gosave must
997 // make g->sched refer to the caller's stack segment, because
998 // entersyscall is going to return immediately after.
999 // It's okay to call matchmg and notewakeup even after
1000 // decrementing mcpu, because we haven't released the
1001 // sched lock yet, so the garbage collector cannot be running.
1003 void runtime_entersyscall(void) __attribute__ ((no_split_stack));
1006 runtime_entersyscall(void)
1010 // Leave SP around for gc and traceback.
1011 #ifdef USING_SPLIT_STACK
1012 g->gcstack = __splitstack_find(NULL, NULL, &g->gcstack_size,
1013 &g->gcnext_segment, &g->gcnext_sp,
1016 g->gcnext_sp = (byte *) &v;
1019 // Save the registers in the g structure so that any pointers
1020 // held in registers will be seen by the garbage collector.
1021 // We could use getcontext here, but setjmp is more efficient
1022 // because it doesn't need to save the signal mask.
1025 g->status = Gsyscall;
1028 // The slow path inside the schedlock/schedunlock will get
1029 // through without stopping if it does:
1032 // waitstop && mcpu <= mcpumax not true
1033 // If we can do the same with a single atomic add,
1034 // then we can skip the locks.
1035 v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift);
1036 if(!atomic_gwaiting(v) && (!atomic_waitstop(v) || atomic_mcpu(v) > atomic_mcpumax(v)))
1040 v = runtime_atomicload(&runtime_sched.atomic);
1041 if(atomic_gwaiting(v)) {
1043 v = runtime_atomicload(&runtime_sched.atomic);
1045 if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
1046 runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift);
1047 runtime_notewakeup(&runtime_sched.stopped);
1053 // The goroutine g exited its system call.
1054 // Arrange for it to run on a cpu again.
1055 // This is called only from the go syscall library, not
1056 // from the low-level system calls used by the runtime.
1058 runtime_exitsyscall(void)
1064 // If we can do the mcpu++ bookkeeping and
1065 // find that we still have mcpu <= mcpumax, then we can
1066 // start executing Go code immediately, without having to
1067 // schedlock/schedunlock.
1069 v = runtime_xadd(&runtime_sched.atomic, (1<<mcpuShift));
1070 if(m->profilehz == runtime_sched.profilehz && atomic_mcpu(v) <= atomic_mcpumax(v)) {
1071 // There's a cpu for us, so we can run.
1072 gp->status = Grunning;
1073 // Garbage collector isn't running (since we are),
1074 // so okay to clear gcstack.
1075 #ifdef USING_SPLIT_STACK
1078 gp->gcnext_sp = nil;
1079 runtime_memclr(gp->gcregs, sizeof gp->gcregs);
1083 // Tell scheduler to put g back on the run queue:
1084 // mostly equivalent to g->status = Grunning,
1085 // but keeps the garbage collector from thinking
1086 // that g is running right now, which it's not.
1087 gp->readyonstop = 1;
1089 // All the cpus are taken.
1090 // The scheduler will ready g and put this m to sleep.
1091 // When the scheduler takes g away from m,
1092 // it will undo the runtime_sched.mcpu++ above.
1095 // Gosched returned, so we're allowed to run now.
1096 // Delete the gcstack information that we left for
1097 // the garbage collector during the system call.
1098 // Must wait until now because until gosched returns
1099 // we don't know for sure that the garbage collector
1101 #ifdef USING_SPLIT_STACK
1104 gp->gcnext_sp = nil;
1105 runtime_memclr(gp->gcregs, sizeof gp->gcregs);
1109 runtime_malg(int32 stacksize, byte** ret_stack, size_t* ret_stacksize)
1113 newg = runtime_malloc(sizeof(G));
1114 if(stacksize >= 0) {
1115 #if USING_SPLIT_STACK
1116 *ret_stack = __splitstack_makecontext(stacksize,
1117 &newg->stack_context[0],
1120 *ret_stack = runtime_mallocgc(stacksize, FlagNoProfiling|FlagNoGC, 0, 0);
1121 *ret_stacksize = stacksize;
1122 newg->gcinitial_sp = *ret_stack;
1123 newg->gcstack_size = stacksize;
1130 __go_go(void (*fn)(void*), void* arg)
1134 G * volatile newg; // volatile to avoid longjmp warning
1138 if((newg = gfget()) != nil){
1139 #ifdef USING_SPLIT_STACK
1140 sp = __splitstack_resetcontext(&newg->stack_context[0],
1143 sp = newg->gcinitial_sp;
1144 spsize = newg->gcstack_size;
1145 newg->gcnext_sp = sp;
1148 newg = runtime_malg(StackMin, &sp, &spsize);
1149 if(runtime_lastg == nil)
1150 runtime_allg = newg;
1152 runtime_lastg->alllink = newg;
1153 runtime_lastg = newg;
1155 newg->status = Gwaiting;
1156 newg->waitreason = "new goroutine";
1158 newg->entry = (byte*)fn;
1160 newg->gopc = (uintptr)__builtin_return_address(0);
1162 runtime_sched.gcount++;
1163 runtime_sched.goidgen++;
1164 newg->goid = runtime_sched.goidgen;
1167 runtime_throw("nil g->stack0");
1169 getcontext(&newg->context);
1170 newg->context.uc_stack.ss_sp = sp;
1171 newg->context.uc_stack.ss_size = spsize;
1172 makecontext(&newg->context, kickoff, 0);
1174 newprocreadylocked(newg);
1178 //printf(" goid=%d\n", newg->goid);
1181 // Put on gfree list. Sched must be locked.
1185 g->schedlink = runtime_sched.gfree;
1186 runtime_sched.gfree = g;
1189 // Get from gfree list. Sched must be locked.
1195 g = runtime_sched.gfree;
1197 runtime_sched.gfree = g->schedlink;
1201 // Run all deferred functions for the current goroutine.
1207 while((d = g->defer) != nil) {
1214 g->defer = d->__next;
1219 void runtime_Goexit (void) asm ("libgo_runtime.runtime.Goexit");
1222 runtime_Goexit(void)
1228 void runtime_Gosched (void) asm ("libgo_runtime.runtime.Gosched");
1231 runtime_Gosched(void)
1236 void runtime_LockOSThread (void)
1237 __asm__ ("libgo_runtime.runtime.LockOSThread");
1240 runtime_LockOSThread(void)
1246 // delete when scheduler is stronger
1248 runtime_gomaxprocsfunc(int32 n)
1254 ret = runtime_gomaxprocs;
1257 if(n > maxgomaxprocs)
1259 runtime_gomaxprocs = n;
1260 if(runtime_gomaxprocs > 1)
1261 runtime_singleproc = false;
1262 if(runtime_gcwaiting != 0) {
1263 if(atomic_mcpumax(runtime_sched.atomic) != 1)
1264 runtime_throw("invalid mcpumax during gc");
1271 // If there are now fewer allowed procs
1272 // than procs running, stop.
1273 v = runtime_atomicload(&runtime_sched.atomic);
1274 if((int32)atomic_mcpu(v) > n) {
1279 // handle more procs
1285 void runtime_UnlockOSThread (void)
1286 __asm__ ("libgo_runtime.runtime.UnlockOSThread");
1289 runtime_UnlockOSThread(void)
1296 runtime_lockedOSThread(void)
1298 return g->lockedm != nil && m->lockedg != nil;
1301 // for testing of wire, unwire
1308 int32 runtime_Goroutines (void)
1309 __asm__ ("libgo_runtime.runtime.Goroutines");
1312 runtime_Goroutines()
1314 return runtime_sched.gcount;
1318 runtime_mcount(void)
1320 return runtime_sched.mcount;
1325 void (*fn)(uintptr*, int32);
1331 runtime_sigprof(uint8 *pc __attribute__ ((unused)),
1332 uint8 *sp __attribute__ ((unused)),
1333 uint8 *lr __attribute__ ((unused)),
1334 G *gp __attribute__ ((unused)))
1338 if(prof.fn == nil || prof.hz == 0)
1341 runtime_lock(&prof);
1342 if(prof.fn == nil) {
1343 runtime_unlock(&prof);
1346 // n = runtime_gentraceback(pc, sp, lr, gp, 0, prof.pcbuf, nelem(prof.pcbuf));
1348 // prof.fn(prof.pcbuf, n);
1349 runtime_unlock(&prof);
1353 runtime_setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz)
1355 // Force sane arguments.
1363 // Stop profiler on this cpu so that it is safe to lock prof.
1364 // if a profiling signal came in while we had prof locked,
1365 // it would deadlock.
1366 runtime_resetcpuprofiler(0);
1368 runtime_lock(&prof);
1371 runtime_unlock(&prof);
1372 runtime_lock(&runtime_sched);
1373 runtime_sched.profilehz = hz;
1374 runtime_unlock(&runtime_sched);
1377 runtime_resetcpuprofiler(hz);