extern void *__splitstack_find(void *, void *, size_t *, void **, void **,
void **);
+extern void __splitstack_block_signals (int *, int *);
+
+extern void __splitstack_block_signals_context (void *context[10], int *,
+ int *);
+
#endif
#if defined(USING_SPLIT_STACK) && defined(LINKER_SUPPORTS_SPLIT_STACK)
#endif
static void schedule(G*);
-static M *startm(void);
typedef struct Sched Sched;
static __thread G *g;
static __thread M *m;
+#ifndef SETCONTEXT_CLOBBERS_TLS
+
+static inline void
+initcontext(void)
+{
+}
+
+static inline void
+fixcontext(ucontext_t *c __attribute__ ((unused)))
+{
+}
+
+# else
+
+# if defined(__x86_64__) && defined(__sun__)
+
+// x86_64 Solaris 10 and 11 have a bug: setcontext switches the %fs
+// register to that of the thread which called getcontext. The effect
+// is that the address of all __thread variables changes. This bug
+// also affects pthread_self() and pthread_getspecific. We work
+// around it by clobbering the context field directly to keep %fs the
+// same.
+
+static __thread greg_t fs;
+
+static inline void
+initcontext(void)
+{
+ ucontext_t c;
+
+ getcontext(&c);
+ fs = c.uc_mcontext.gregs[REG_FSBASE];
+}
+
+static inline void
+fixcontext(ucontext_t* c)
+{
+ c->uc_mcontext.gregs[REG_FSBASE] = fs;
+}
+
+# else
+
+# error unknown case for SETCONTEXT_CLOBBERS_TLS
+
+# endif
+
+#endif
+
// We can not always refer to the TLS variables directly. The
// compiler will call tls_get_addr to get the address of the variable,
// and it may hold it in a register across a call to schedule. When
volatile uint32 atomic; // atomic scheduling word (see below)
int32 profilehz; // cpu profiling rate
-
+
bool init; // running initialization
bool lockmain; // init called runtime.LockOSThread
#endif
g = newg;
newg->fromgogo = true;
+ fixcontext(&newg->context);
setcontext(&newg->context);
+ runtime_throw("gogo setcontext returned");
}
// Save context and call fn passing g as a parameter. This is like
static void
runtime_mcall(void (*pfn)(G*))
{
+ M *mp;
+ G *gp;
#ifndef USING_SPLIT_STACK
int i;
#endif
// collector.
__builtin_unwind_init();
- if(g == m->g0)
+ mp = m;
+ gp = g;
+ if(gp == mp->g0)
runtime_throw("runtime: mcall called on m->g0 stack");
- if(g != nil) {
+ if(gp != nil) {
#ifdef USING_SPLIT_STACK
__splitstack_getcontext(&g->stack_context[0]);
#else
- g->gcnext_sp = &i;
+ gp->gcnext_sp = &i;
#endif
- g->fromgogo = false;
- getcontext(&g->context);
+ gp->fromgogo = false;
+ getcontext(&gp->context);
+
+ // When we return from getcontext, we may be running
+ // in a new thread. That means that m and g may have
+ // changed. They are global variables so we will
+ // reload them, but the addresses of m and g may be
+ // cached in our local stack frame, and those
+ // addresses may be wrong. Call functions to reload
+ // the values for this thread.
+ mp = runtime_m();
+ gp = runtime_g();
}
- if (g == nil || !g->fromgogo) {
+ if (gp == nil || !gp->fromgogo) {
#ifdef USING_SPLIT_STACK
- __splitstack_setcontext(&m->g0->stack_context[0]);
+ __splitstack_setcontext(&mp->g0->stack_context[0]);
#endif
- m->g0->entry = (byte*)pfn;
- m->g0->param = g;
- g = m->g0;
- setcontext(&m->g0->context);
+ mp->g0->entry = (byte*)pfn;
+ mp->g0->param = gp;
+
+ // It's OK to set g directly here because this case
+ // can not occur if we got here via a setcontext to
+ // the getcontext call just above.
+ g = mp->g0;
+
+ fixcontext(&mp->g0->context);
+ setcontext(&mp->g0->context);
runtime_throw("runtime: mcall function returned");
}
}
m->curg = g;
g->m = m;
+ initcontext();
+
m->nomemprof++;
runtime_mallocinit();
mcommoninit(m);
runtime_atomicstorep((void**)&runtime_allm, m);
m->id = runtime_sched.mcount++;
- m->fastrand = 0x49f6428aUL + m->id;
+ m->fastrand = 0x49f6428aUL + m->id + runtime_cputicks();
if(m->mcache == nil)
m->mcache = runtime_allocmcache();
// but m is not running a specific goroutine,
// so set the helpgc flag as a signal to m's
// first schedule(nil) to mcpu-- and grunning--.
- m = startm();
+ m = runtime_newm();
m->helpgc = 1;
runtime_sched.grunning++;
}
m = (M*)mp;
g = m->g0;
+ initcontext();
+
g->entry = nil;
g->param = nil;
__splitstack_getcontext(&g->stack_context[0]);
#else
g->gcinitial_sp = ∓
- g->gcstack_size = StackMin;
+ // Setting gcstack_size to 0 is a marker meaning that gcinitial_sp
+ // is the top of the stack, not the bottom.
+ g->gcstack_size = 0;
g->gcnext_sp = ∓
#endif
getcontext(&g->context);
*(int*)0x21 = 0x21;
}
runtime_minit();
+
+#ifdef USING_SPLIT_STACK
+ {
+ int dont_block_signals = 0;
+ __splitstack_block_signals(&dont_block_signals, nil);
+ }
+#endif
+
schedule(nil);
return nil;
}
};
// Kick off new m's as needed (up to mcpumax).
-// There are already `other' other cpus that will
-// start looking for goroutines shortly.
// Sched is locked.
static void
matchmg(void)
// Find the m that will run gp.
if((mp = mget(gp)) == nil)
- mp = startm();
+ mp = runtime_newm();
mnextg(mp, gp);
}
}
-static M*
-startm(void)
+// Create a new m. It will start off with a call to runtime_mstart.
+M*
+runtime_newm(void)
{
M *m;
pthread_attr_t attr;
runtime_memclr(gp->gcregs, sizeof gp->gcregs);
}
+// Allocate a new g, with a stack big enough for stacksize bytes.
G*
runtime_malg(int32 stacksize, byte** ret_stack, size_t* ret_stacksize)
{
newg = runtime_malloc(sizeof(G));
if(stacksize >= 0) {
#if USING_SPLIT_STACK
+ int dont_block_signals = 0;
+
*ret_stack = __splitstack_makecontext(stacksize,
&newg->stack_context[0],
ret_stacksize);
+ __splitstack_block_signals_context(&newg->stack_context[0],
+ &dont_block_signals, nil);
#else
*ret_stack = runtime_mallocgc(stacksize, FlagNoProfiling|FlagNoGC, 0, 0);
*ret_stacksize = stacksize;
if((newg = gfget()) != nil){
#ifdef USING_SPLIT_STACK
+ int dont_block_signals = 0;
+
sp = __splitstack_resetcontext(&newg->stack_context[0],
&spsize);
+ __splitstack_block_signals_context(&newg->stack_context[0],
+ &dont_block_signals, nil);
#else
sp = newg->gcinitial_sp;
spsize = newg->gcstack_size;
+ if(spsize == 0)
+ runtime_throw("bad spsize in __go_go");
newg->gcnext_sp = sp;
#endif
} else {
runtime_gosched();
}
+// Implementation of runtime.GOMAXPROCS.
// delete when scheduler is stronger
int32
runtime_gomaxprocsfunc(int32 n)
uintptr pcbuf[100];
} prof;
+// Called if we receive a SIGPROF signal.
void
runtime_sigprof(uint8 *pc __attribute__ ((unused)),
uint8 *sp __attribute__ ((unused)),
runtime_unlock(&prof);
}
+// Arrange to call fn with a traceback hz times a second.
void
runtime_setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz)
{