1 /* go-go.c -- the go function.
3 Copyright 2009 The Go Authors. All rights reserved.
4 Use of this source code is governed by a BSD-style
5 license that can be found in the LICENSE file. */
13 #include <semaphore.h>
16 #include "go-assert.h"
22 #ifdef USING_SPLIT_STACK
23 /* FIXME: This is not declared anywhere. */
24 extern void *__splitstack_find (void *, void *, size_t *, void **, void **,
28 /* We stop the threads by sending them the signal GO_SIG_STOP and we
29 start them by sending them the signal GO_SIG_START. */
31 #define GO_SIG_START (SIGRTMIN + 1)
32 #define GO_SIG_STOP (SIGRTMIN + 2)
38 /* A doubly linked list of the threads we have started. */
43 struct __go_thread_id *prev;
44 struct __go_thread_id *next;
45 /* True if the thread ID has not yet been filled in. */
49 /* Thread's M structure. */
51 /* If the thread ID has not been filled in, the function we are
54 /* If the thread ID has not been filled in, the argument to the
59 static struct __go_thread_id *__go_all_thread_ids;
61 /* A lock to control access to ALL_THREAD_IDS. */
63 static pthread_mutex_t __go_thread_ids_lock = PTHREAD_MUTEX_INITIALIZER;
65 /* A semaphore used to wait until all the threads have stopped. */
67 static sem_t __go_thread_ready_sem;
69 /* A signal set used to wait until garbage collection is complete. */
71 static sigset_t __go_thread_wait_sigset;
73 /* Remove the current thread from the list of threads. */
76 remove_current_thread (void)
78 struct __go_thread_id *list_entry;
82 list_entry = m->list_entry;
85 i = pthread_mutex_lock (&__go_thread_ids_lock);
88 if (list_entry->prev != NULL)
89 list_entry->prev->next = list_entry->next;
91 __go_all_thread_ids = list_entry->next;
92 if (list_entry->next != NULL)
93 list_entry->next->prev = list_entry->prev;
95 runtime_MCache_ReleaseAll (mcache);
97 i = pthread_mutex_unlock (&__go_thread_ids_lock);
100 runtime_lock (&runtime_mheap);
101 mstats.heap_alloc += mcache->local_alloc;
102 mstats.heap_objects += mcache->local_objects;
103 __builtin_memset (mcache, 0, sizeof (struct MCache));
104 runtime_FixAlloc_Free (&runtime_mheap.cachealloc, mcache);
105 runtime_unlock (&runtime_mheap);
110 /* Start the thread. */
113 start_go_thread (void *thread_arg)
115 struct M *newm = (struct M *) thread_arg;
116 void (*pfn) (void *);
118 struct __go_thread_id *list_entry;
122 __wrap_rtems_task_variable_add ((void **) &m);
123 __wrap_rtems_task_variable_add ((void **) &__go_panic_defer);
128 list_entry = newm->list_entry;
130 pfn = list_entry->pfn;
131 arg = list_entry->arg;
133 #ifndef USING_SPLIT_STACK
134 /* If we don't support split stack, record the current stack as the
135 top of the stack. There shouldn't be anything relevant to the
136 garbage collector above this point. */
137 m->gc_sp = (void *) &arg;
140 /* Finish up the entry on the thread list. */
142 i = pthread_mutex_lock (&__go_thread_ids_lock);
143 __go_assert (i == 0);
145 list_entry->id = pthread_self ();
146 list_entry->pfn = NULL;
147 list_entry->arg = NULL;
148 list_entry->tentative = 0;
150 i = pthread_mutex_unlock (&__go_thread_ids_lock);
151 __go_assert (i == 0);
155 remove_current_thread ();
160 /* The runtime.Goexit function. */
162 void Goexit (void) asm ("libgo_runtime.runtime.Goexit");
167 remove_current_thread ();
172 /* Implement the go statement. */
175 __go_go (void (*pfn) (void*), void *arg)
180 struct __go_thread_id *list_entry;
183 i = pthread_attr_init (&attr);
184 __go_assert (i == 0);
185 i = pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
186 __go_assert (i == 0);
188 #ifdef LINKER_SUPPORTS_SPLIT_STACK
189 /* The linker knows how to handle calls between code which uses
190 -fsplit-stack and code which does not. That means that we can
191 run with a smaller stack and rely on the -fsplit-stack support to
192 save us. The GNU/Linux glibc library won't let us have a very
193 small stack, but we make it as small as we can. */
194 #ifndef PTHREAD_STACK_MIN
195 #define PTHREAD_STACK_MIN 8192
197 i = pthread_attr_setstacksize (&attr, PTHREAD_STACK_MIN);
198 __go_assert (i == 0);
201 newm = __go_alloc (sizeof (M));
203 list_entry = malloc (sizeof (struct __go_thread_id));
204 list_entry->prev = NULL;
205 list_entry->next = NULL;
206 list_entry->tentative = 1;
207 list_entry->m = newm;
208 list_entry->pfn = pfn;
209 list_entry->arg = arg;
211 newm->list_entry = list_entry;
213 newm->mcache = runtime_allocmcache ();
215 /* Add the thread to the list of all threads, marked as tentative
216 since it is not yet ready to go. */
217 i = pthread_mutex_lock (&__go_thread_ids_lock);
218 __go_assert (i == 0);
220 if (__go_all_thread_ids != NULL)
221 __go_all_thread_ids->prev = list_entry;
222 list_entry->next = __go_all_thread_ids;
223 __go_all_thread_ids = list_entry;
225 i = pthread_mutex_unlock (&__go_thread_ids_lock);
226 __go_assert (i == 0);
228 /* Start the thread. */
229 i = pthread_create (&tid, &attr, start_go_thread, newm);
230 __go_assert (i == 0);
232 i = pthread_attr_destroy (&attr);
233 __go_assert (i == 0);
236 /* This is the signal handler for GO_SIG_START. The garbage collector
237 will send this signal to a thread when it wants the thread to
238 start. We don't have to actually do anything here, but we need a
239 signal handler since ignoring the signal will mean that the
240 sigsuspend will never see it. */
243 gc_start_handler (int sig __attribute__ ((unused)))
247 /* Tell the garbage collector that we are ready, and wait for the
248 garbage collector to tell us that it is done. This may be called
249 by a signal handler, so it is restricted to using functions which
250 are async cancel safe. */
257 /* Tell the garbage collector about our stack. */
258 #ifdef USING_SPLIT_STACK
259 m->gc_sp = __splitstack_find (NULL, NULL, &m->gc_len,
260 &m->gc_next_segment, &m->gc_next_sp,
264 uintptr_t top = (uintptr_t) m->gc_sp;
265 uintptr_t bottom = (uintptr_t) ⊤
268 m->gc_next_sp = m->gc_sp;
269 m->gc_len = bottom - top;
273 m->gc_next_sp = (void *) bottom;
274 m->gc_len = top - bottom;
279 /* FIXME: Perhaps we should just move __go_panic_defer into M. */
280 m->gc_panic_defer = __go_panic_defer;
282 /* Tell the garbage collector that we are ready by posting to the
284 i = sem_post (&__go_thread_ready_sem);
285 __go_assert (i == 0);
287 /* Wait for the garbage collector to tell us to continue. */
288 sigsuspend (&__go_thread_wait_sigset);
291 /* This is the signal handler for GO_SIG_STOP. The garbage collector
292 will send this signal to a thread when it wants the thread to
296 gc_stop_handler (int sig __attribute__ ((unused)))
300 if (__sync_bool_compare_and_swap (&pm->mallocing, 1, 1))
302 /* m->mallocing was already non-zero. We can't interrupt the
303 thread while it is running an malloc. Instead, tell it to
304 call back to us when done. */
305 __sync_bool_compare_and_swap (&pm->gcing, 0, 1);
309 if (__sync_bool_compare_and_swap (&pm->nomemprof, 1, 1))
311 /* Similarly, we can't interrupt the thread while it is building
312 profiling information. Otherwise we can get into a deadlock
313 when sweepspan calls MProf_Free. */
314 __sync_bool_compare_and_swap (&pm->gcing_for_prof, 0, 1);
321 /* This is called by malloc when it gets a signal during the malloc
325 __go_run_goroutine_gc (int r)
327 /* Force callee-saved registers to be saved on the stack. This is
328 not needed if we are invoked from the signal handler, but it is
329 needed if we are called directly, since otherwise we might miss
330 something that a function somewhere up the call stack is holding
332 __builtin_unwind_init ();
336 /* This avoids tail recursion, to make sure that the saved registers
341 /* Stop all the other threads for garbage collection. */
344 runtime_stoptheworld (void)
349 struct __go_thread_id *p;
351 i = pthread_mutex_lock (&__go_thread_ids_lock);
352 __go_assert (i == 0);
354 me = pthread_self ();
356 p = __go_all_thread_ids;
359 if (p->tentative || pthread_equal (me, p->id))
363 i = pthread_kill (p->id, GO_SIG_STOP);
371 struct __go_thread_id *next;
373 /* This thread died somehow. Remove it from the
377 p->prev->next = next;
379 __go_all_thread_ids = next;
381 next->prev = p->prev;
390 /* Wait for each thread to receive the signal and post to the
391 semaphore. If a thread receives the signal but contrives to die
392 before it posts to the semaphore, then we will hang forever
397 i = sem_wait (&__go_thread_ready_sem);
398 if (i < 0 && errno == EINTR)
400 __go_assert (i == 0);
404 /* The gc_panic_defer field should now be set for all M's except the
405 one in this thread. Set this one now. */
406 m->gc_panic_defer = __go_panic_defer;
408 /* Leave with __go_thread_ids_lock held. */
411 /* Scan all the stacks for garbage collection. This should be called
412 with __go_thread_ids_lock held. */
415 __go_scanstacks (void (*scan) (byte *, int64))
418 struct __go_thread_id *p;
420 /* Make sure all the registers for this thread are on the stack. */
421 __builtin_unwind_init ();
423 me = pthread_self ();
424 for (p = __go_all_thread_ids; p != NULL; p = p->next)
428 /* The goroutine function and argument can be allocated on
429 the heap, so we have to scan them for a thread that has
431 scan ((void *) &p->pfn, sizeof (void *));
432 scan ((void *) &p->arg, sizeof (void *));
433 scan ((void *) &p->m, sizeof (void *));
437 #ifdef USING_SPLIT_STACK
445 if (pthread_equal (me, p->id))
450 sp = __splitstack_find (NULL, NULL, &len, &next_segment,
451 &next_sp, &initial_sp);
457 next_segment = p->m->gc_next_segment;
458 next_sp = p->m->gc_next_sp;
459 initial_sp = p->m->gc_initial_sp;
465 sp = __splitstack_find (next_segment, next_sp, &len,
466 &next_segment, &next_sp, &initial_sp);
469 #else /* !defined(USING_SPLIT_STACK) */
471 if (pthread_equal (me, p->id))
473 uintptr_t top = (uintptr_t) m->gc_sp;
474 uintptr_t bottom = (uintptr_t) ⊤
476 scan (m->gc_sp, bottom - top);
478 scan ((void *) bottom, top - bottom);
482 scan (p->m->gc_next_sp, p->m->gc_len);
485 #endif /* !defined(USING_SPLIT_STACK) */
487 /* Also scan the M structure while we're at it. */
489 scan ((void *) &p->m, sizeof (void *));
493 /* Release all the memory caches. This is called with
494 __go_thread_ids_lock held. */
497 __go_stealcache (void)
499 struct __go_thread_id *p;
501 for (p = __go_all_thread_ids; p != NULL; p = p->next)
502 runtime_MCache_ReleaseAll (p->m->mcache);
505 /* Gather memory cache statistics. This is called with
506 __go_thread_ids_lock held. */
509 __go_cachestats (void)
511 struct __go_thread_id *p;
513 for (p = __go_all_thread_ids; p != NULL; p = p->next)
518 mstats.heap_alloc += c->local_alloc;
520 mstats.heap_objects += c->local_objects;
521 c->local_objects = 0;
525 /* Start the other threads after garbage collection. */
528 runtime_starttheworld (void)
532 struct __go_thread_id *p;
534 /* Here __go_thread_ids_lock should be held. */
536 me = pthread_self ();
537 p = __go_all_thread_ids;
540 if (p->tentative || pthread_equal (me, p->id))
544 i = pthread_kill (p->id, GO_SIG_START);
552 i = pthread_mutex_unlock (&__go_thread_ids_lock);
553 __go_assert (i == 0);
556 /* Initialize the interaction between goroutines and the garbage
560 __go_gc_goroutine_init (void *sp __attribute__ ((unused)))
562 struct __go_thread_id *list_entry;
565 struct sigaction act;
567 /* Add the initial thread to the list of all threads. */
569 list_entry = malloc (sizeof (struct __go_thread_id));
570 list_entry->prev = NULL;
571 list_entry->next = NULL;
572 list_entry->tentative = 0;
573 list_entry->id = pthread_self ();
575 list_entry->pfn = NULL;
576 list_entry->arg = NULL;
577 __go_all_thread_ids = list_entry;
579 /* Initialize the semaphore which signals when threads are ready for
582 i = sem_init (&__go_thread_ready_sem, 0, 0);
583 __go_assert (i == 0);
585 /* Fetch the current signal mask. */
587 i = sigemptyset (&sset);
588 __go_assert (i == 0);
589 i = sigprocmask (SIG_BLOCK, NULL, &sset);
590 __go_assert (i == 0);
592 /* Make sure that GO_SIG_START is not blocked and GO_SIG_STOP is
593 blocked, and save that set for use with later calls to sigsuspend
594 while waiting for GC to complete. */
596 i = sigdelset (&sset, GO_SIG_START);
597 __go_assert (i == 0);
598 i = sigaddset (&sset, GO_SIG_STOP);
599 __go_assert (i == 0);
600 __go_thread_wait_sigset = sset;
602 /* Block SIG_SET_START and unblock SIG_SET_STOP, and use that for
603 the process signal mask. */
605 i = sigaddset (&sset, GO_SIG_START);
606 __go_assert (i == 0);
607 i = sigdelset (&sset, GO_SIG_STOP);
608 __go_assert (i == 0);
609 i = sigprocmask (SIG_SETMASK, &sset, NULL);
610 __go_assert (i == 0);
612 /* Install the signal handlers. */
613 memset (&act, 0, sizeof act);
614 i = sigemptyset (&act.sa_mask);
615 __go_assert (i == 0);
617 act.sa_handler = gc_start_handler;
618 act.sa_flags = SA_RESTART;
619 i = sigaction (GO_SIG_START, &act, NULL);
620 __go_assert (i == 0);
622 /* We could consider using an alternate signal stack for this. The
623 function does not use much stack space, so it may be OK. */
624 act.sa_handler = gc_stop_handler;
625 i = sigaction (GO_SIG_STOP, &act, NULL);
626 __go_assert (i == 0);
628 #ifndef USING_SPLIT_STACK
629 /* If we don't support split stack, record the current stack as the