2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
4 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
5 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7 * Permission is hereby granted to use or copy this program
8 * for any purpose, provided the above notices are retained on all copies.
9 * Permission to modify the code and to distribute modified code is granted,
10 * provided the above notices are retained, and a notice that the code was
11 * modified is included with the above copyright notice.
14 * Support code for Solaris threads. Provides functionality we wish Sun
15 * had provided. Relies on some information we probably shouldn't rely on.
17 /* Boehm, September 14, 1994 4:44 pm PDT */
19 # if defined(GC_SOLARIS_THREADS) || defined(GC_SOLARIS_PTHREADS) \
20 || defined(GC_THREADS)
21 # include "private/gc_priv.h"
24 # if defined(GC_SOLARIS_THREADS) || defined(GC_SOLARIS_PTHREADS)
25 # include "private/solaris_threads.h"
30 # include <sys/types.h>
31 # include <sys/mman.h>
32 # include <sys/time.h>
33 # include <sys/resource.h>
34 # include <sys/stat.h>
35 # include <sys/syscall.h>
36 # include <sys/procfs.h>
39 # define _CLASSIC_XOPEN_TYPES
44 --> Not yet supported. Try porting the code from linux_threads.c.
48 * This is the default size of the LWP arrays. If there are more LWPs
49 * than this when a stop-the-world GC happens, set_max_lwps will be
51 * This must be higher than the number of LWPs at startup time.
52 * The threads library creates a thread early on, so the min. is 3
54 # define DEFAULT_MAX_LWPS 4
61 cond_t GC_prom_join_cv; /* Broadcast when any thread terminates */
62 cond_t GC_create_cv; /* Signalled when a new undetached */
68 #endif /* MMAP_STACKS */
70 /* We use the allocation lock to protect thread-related data structures. */
72 /* We stop the world using /proc primitives. This makes some */
73 /* minimal assumptions about the threads implementation. */
74 /* We don't play by the rules, since the rules make this */
75 /* impossible (as of Solaris 2.3). Also note that as of */
76 /* Solaris 2.3 the various thread and lwp suspension */
77 /* primitives failed to stop threads by the time the request */
81 static sigset_t old_mask;
83 /* Sleep for n milliseconds, n < 1000 */
84 void GC_msec_sleep(int n)
89 ts.tv_nsec = 1000000*n;
90 if (syscall(SYS_nanosleep, &ts, 0) < 0) {
91 ABORT("nanosleep failed");
94 /* Turn off preemption; gross but effective. */
95 /* Caller has allocation lock. */
96 /* Actually this is not needed under Solaris 2.3 and */
97 /* 2.4, but hopefully that'll change. */
102 (void)sigfillset(&set);
103 sigdelset(&set, SIGABRT);
104 syscall(SYS_sigprocmask, SIG_SETMASK, &set, &old_mask);
109 syscall(SYS_sigprocmask, SIG_SETMASK, &old_mask, NULL);
112 int GC_main_proc_fd = -1;
115 struct lwp_cache_entry {
117 int lc_descr; /* /proc file descriptor. */
118 } GC_lwp_cache_default[DEFAULT_MAX_LWPS];
120 static int max_lwps = DEFAULT_MAX_LWPS;
121 static struct lwp_cache_entry *GC_lwp_cache = GC_lwp_cache_default;
123 static prgregset_t GC_lwp_registers_default[DEFAULT_MAX_LWPS];
124 static prgregset_t *GC_lwp_registers = GC_lwp_registers_default;
126 /* Return a file descriptor for the /proc entry corresponding */
127 /* to the given lwp. The file descriptor may be stale if the */
128 /* lwp exited and a new one was forked. */
129 static int open_lwp(lwpid_t id)
132 static int next_victim = 0;
135 for (i = 0; i < max_lwps; i++) {
136 if (GC_lwp_cache[i].lc_id == id) return(GC_lwp_cache[i].lc_descr);
138 result = syscall(SYS_ioctl, GC_main_proc_fd, PIOCOPENLWP, &id);
140 * If PIOCOPENLWP fails, try closing fds in the cache until it succeeds.
142 if (result < 0 && errno == EMFILE) {
143 for (i = 0; i < max_lwps; i++) {
144 if (GC_lwp_cache[i].lc_id != 0) {
145 (void)syscall(SYS_close, GC_lwp_cache[i].lc_descr);
146 result = syscall(SYS_ioctl, GC_main_proc_fd, PIOCOPENLWP, &id);
147 if (result >= 0 || (result < 0 && errno != EMFILE))
153 if (errno == EMFILE) {
154 ABORT("Too many open files");
156 return(-1) /* exited? */;
158 if (GC_lwp_cache[next_victim].lc_id != 0)
159 (void)syscall(SYS_close, GC_lwp_cache[next_victim].lc_descr);
160 GC_lwp_cache[next_victim].lc_id = id;
161 GC_lwp_cache[next_victim].lc_descr = result;
162 if (++next_victim >= max_lwps)
167 static void uncache_lwp(lwpid_t id)
171 for (i = 0; i < max_lwps; i++) {
172 if (GC_lwp_cache[i].lc_id == id) {
173 (void)syscall(SYS_close, GC_lwp_cache[id].lc_descr);
174 GC_lwp_cache[i].lc_id = 0;
179 /* Sequence of current lwp ids */
180 static lwpid_t GC_current_ids_default[DEFAULT_MAX_LWPS + 1];
181 static lwpid_t *GC_current_ids = GC_current_ids_default;
183 /* Temporary used below (can be big if large number of LWPs) */
184 static lwpid_t last_ids_default[DEFAULT_MAX_LWPS + 1];
185 static lwpid_t *last_ids = last_ids_default;
188 #define ROUNDUP(n) WORDS_TO_BYTES(ROUNDED_UP_WORDS(n))
190 static void set_max_lwps(GC_word n)
194 int required_bytes = ROUNDUP(n * sizeof(struct lwp_cache_entry))
195 + ROUNDUP(n * sizeof(prgregset_t))
196 + ROUNDUP((n + 1) * sizeof(lwpid_t))
197 + ROUNDUP((n + 1) * sizeof(lwpid_t));
199 GC_expand_hp_inner(divHBLKSZ((word)required_bytes));
200 oldmem = mem = GC_scratch_alloc(required_bytes);
201 if (0 == mem) ABORT("No space for lwp data structures");
204 * We can either flush the old lwp cache or copy it over. Do the latter.
206 memcpy(mem, GC_lwp_cache, max_lwps * sizeof(struct lwp_cache_entry));
207 GC_lwp_cache = (struct lwp_cache_entry*)mem;
208 mem += ROUNDUP(n * sizeof(struct lwp_cache_entry));
210 BZERO(GC_lwp_registers, max_lwps * sizeof(GC_lwp_registers[0]));
211 GC_lwp_registers = (prgregset_t *)mem;
212 mem += ROUNDUP(n * sizeof(prgregset_t));
215 GC_current_ids = (lwpid_t *)mem;
216 mem += ROUNDUP((n + 1) * sizeof(lwpid_t));
218 last_ids = (lwpid_t *)mem;
219 mem += ROUNDUP((n + 1)* sizeof(lwpid_t));
221 if (mem > oldmem + required_bytes)
222 ABORT("set_max_lwps buffer overflow");
228 /* Stop all lwps in process. Assumes preemption is off. */
229 /* Caller has allocation lock (and any other locks he may */
231 static void stop_all_lwps()
238 lwpid_t me = _lwp_self();
240 if (GC_main_proc_fd == -1) {
241 sprintf(buf, "/proc/%d", getpid());
242 GC_main_proc_fd = syscall(SYS_open, buf, O_RDONLY);
243 if (GC_main_proc_fd < 0) {
245 ABORT("/proc open failed: too many open files");
246 GC_printf1("/proc open failed: errno %d", errno);
250 BZERO(GC_lwp_registers, sizeof (prgregset_t) * max_lwps);
251 for (i = 0; i < max_lwps; i++)
254 if (syscall(SYS_ioctl, GC_main_proc_fd, PIOCSTATUS, &status) < 0)
255 ABORT("Main PIOCSTATUS failed");
256 if (status.pr_nlwp < 1)
257 ABORT("Invalid number of lwps returned by PIOCSTATUS");
258 if (status.pr_nlwp >= max_lwps) {
259 set_max_lwps(status.pr_nlwp*2 + 10);
261 * The data in the old GC_current_ids and
262 * GC_lwp_registers has been trashed. Cleaning out last_ids
263 * will make sure every LWP gets re-examined.
265 for (i = 0; i < max_lwps; i++)
269 if (syscall(SYS_ioctl, GC_main_proc_fd, PIOCLWPIDS, GC_current_ids) < 0)
270 ABORT("PIOCLWPIDS failed");
272 for (i = 0; GC_current_ids[i] != 0 && i < max_lwps; i++) {
273 if (GC_current_ids[i] != last_ids[i]) {
275 if (GC_current_ids[i] != me) {
276 /* PIOCSTOP doesn't work without a writable */
277 /* descriptor. And that makes the process */
279 if (_lwp_suspend(GC_current_ids[i]) < 0) {
280 /* Could happen if the lwp exited */
281 uncache_lwp(GC_current_ids[i]);
282 GC_current_ids[i] = me; /* ignore */
288 * In the unlikely event something does a fork between the
289 * PIOCSTATUS and the PIOCLWPIDS.
293 /* All lwps in GC_current_ids != me have been suspended. Note */
294 /* that _lwp_suspend is idempotent. */
295 for (i = 0; GC_current_ids[i] != 0; i++) {
296 if (GC_current_ids[i] != last_ids[i]) {
297 if (GC_current_ids[i] != me) {
298 lwp_fd = open_lwp(GC_current_ids[i]);
301 GC_current_ids[i] = me;
304 /* LWP should be stopped. Empirically it sometimes */
305 /* isn't, and more frequently the PR_STOPPED flag */
306 /* is not set. Wait for PR_STOPPED. */
307 if (syscall(SYS_ioctl, lwp_fd,
308 PIOCSTATUS, &status) < 0) {
309 /* Possible if the descriptor was stale, or */
310 /* we encountered the 2.3 _lwp_suspend bug. */
311 uncache_lwp(GC_current_ids[i]);
312 GC_current_ids[i] = me; /* handle next time. */
314 while (!(status.pr_flags & PR_STOPPED)) {
316 if (syscall(SYS_ioctl, lwp_fd,
317 PIOCSTATUS, &status) < 0) {
318 ABORT("Repeated PIOCSTATUS failed");
320 if (status.pr_flags & PR_STOPPED) break;
323 if (syscall(SYS_ioctl, lwp_fd,
324 PIOCSTATUS, &status) < 0) {
325 ABORT("Repeated PIOCSTATUS failed");
328 if (status.pr_who != GC_current_ids[i]) {
329 /* can happen if thread was on death row */
330 uncache_lwp(GC_current_ids[i]);
331 GC_current_ids[i] = me; /* handle next time. */
334 /* Save registers where collector can */
336 BCOPY(status.pr_reg, GC_lwp_registers[i],
337 sizeof (prgregset_t));
343 for (i = 0; i < max_lwps; i++) last_ids[i] = GC_current_ids[i];
347 /* Restart all lwps in process. Assumes preemption is off. */
348 static void restart_all_lwps()
353 lwpid_t me = _lwp_self();
356 for (i = 0; GC_current_ids[i] != 0; i++) {
358 if (GC_current_ids[i] != me) {
359 int lwp_fd = open_lwp(GC_current_ids[i]);
362 if (lwp_fd < 0) ABORT("open_lwp failed");
363 if (syscall(SYS_ioctl, lwp_fd,
364 PIOCSTATUS, &status) < 0) {
365 ABORT("PIOCSTATUS failed in restart_all_lwps");
367 if (memcmp(status.pr_reg, GC_lwp_registers[i],
368 sizeof (prgregset_t)) != 0) {
371 for(j = 0; j < NPRGREG; j++)
373 GC_printf3("%i: %x -> %x\n", j,
374 GC_lwp_registers[i][j],
377 ABORT("Register contents changed");
379 if (!status.pr_flags & PR_STOPPED) {
380 ABORT("lwp no longer stopped");
385 if (syscall(SYS_ioctl, lwp_fd,
386 PIOCGWIN, &windows) < 0) {
387 ABORT("PIOCSTATUS failed in restart_all_lwps");
389 if (windows.wbcnt > 0) ABORT("unsaved register windows");
393 # endif /* PARANOID */
394 if (GC_current_ids[i] == me) continue;
395 if (_lwp_continue(GC_current_ids[i]) < 0) {
396 ABORT("Failed to restart lwp");
399 if (i >= max_lwps) ABORT("Too many lwps");
402 GC_bool GC_multithreaded = 0;
407 if (GC_multithreaded)
411 void GC_start_world()
413 if (GC_multithreaded)
418 void GC_thr_init(void);
420 GC_bool GC_thr_initialized = FALSE;
422 size_t GC_min_stack_sz;
426 * stack_head is stored at the top of free stacks
429 struct stack_head *next;
434 # define N_FREE_LISTS 25
435 struct stack_head *GC_stack_free_lists[N_FREE_LISTS] = { 0 };
436 /* GC_stack_free_lists[i] is free list for stacks of */
437 /* size GC_min_stack_sz*2**i. */
438 /* Free lists are linked through stack_head stored */ /* at top of stack. */
440 /* Return a stack of size at least *stack_size. *stack_size is */
441 /* replaced by the actual stack size. */
442 /* Caller holds allocation lock. */
443 ptr_t GC_stack_alloc(size_t * stack_size)
445 register size_t requested_sz = *stack_size;
446 register size_t search_sz = GC_min_stack_sz;
447 register int index = 0; /* = log2(search_sz/GC_min_stack_sz) */
449 register struct stack_head *result;
451 while (search_sz < requested_sz) {
455 if ((result = GC_stack_free_lists[index]) == 0
456 && (result = GC_stack_free_lists[index+1]) != 0) {
457 /* Try next size up. */
458 search_sz *= 2; index++;
461 base = GC_stack_free_lists[index]->base;
462 GC_stack_free_lists[index] = GC_stack_free_lists[index]->next;
465 base = (ptr_t)mmap(0, search_sz + GC_page_size,
466 PROT_READ|PROT_WRITE, MAP_PRIVATE |MAP_NORESERVE,
468 if (base == (ptr_t)-1)
474 mprotect(base, GC_page_size, PROT_NONE);
475 /* Should this use divHBLKSZ(search_sz + GC_page_size) ? -- cf */
476 GC_is_fresh((struct hblk *)base, divHBLKSZ(search_sz));
477 base += GC_page_size;
480 base = (ptr_t) GC_scratch_alloc(search_sz + 2*GC_page_size);
487 base = (ptr_t)(((word)base + GC_page_size) & ~(GC_page_size - 1));
488 /* Protect hottest page to detect overflow. */
489 # ifdef SOLARIS23_MPROTECT_BUG_FIXED
490 mprotect(base, GC_page_size, PROT_NONE);
492 GC_is_fresh((struct hblk *)base, divHBLKSZ(search_sz));
494 base += GC_page_size;
497 *stack_size = search_sz;
501 /* Caller holds allocationlock. */
502 void GC_stack_free(ptr_t stack, size_t size)
504 register int index = 0;
505 register size_t search_sz = GC_min_stack_sz;
506 register struct stack_head *head;
510 mmap(stack, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_NORESERVE|MAP_FIXED,
513 while (search_sz < size) {
517 if (search_sz != size) ABORT("Bad stack size");
519 head = (struct stack_head *)(stack + search_sz - sizeof(struct stack_head));
520 head->next = GC_stack_free_lists[index];
522 GC_stack_free_lists[index] = head;
525 void GC_my_stack_limits();
527 /* Notify virtual dirty bit implementation that known empty parts of */
528 /* stacks do not contain useful data. */
529 /* Caller holds allocation lock. */
530 void GC_old_stacks_are_fresh()
532 /* No point in doing this for MMAP stacks - and pointers are zero'd out */
533 /* by the mmap in GC_stack_free */
536 register struct stack_head *s;
539 register struct hblk * h;
542 for (i = 0, sz= GC_min_stack_sz; i < N_FREE_LISTS;
544 for (s = GC_stack_free_lists[i]; s != 0; s = s->next) {
546 h = (struct hblk *)(((word)p + HBLKSIZE-1) & ~(HBLKSIZE-1));
548 GC_is_fresh((struct hblk *)p, divHBLKSZ(sz));
550 GC_is_fresh((struct hblk *)p, divHBLKSZ(sz) - 1);
551 BZERO(p, (ptr_t)h - p);
555 #endif /* MMAP_STACKS */
556 GC_my_stack_limits();
559 /* The set of all known threads. We intercept thread creation and */
560 /* joins. We never actually create detached threads. We allocate all */
561 /* new thread stacks ourselves. These allow us to maintain this */
562 /* data structure. */
564 # define THREAD_TABLE_SZ 128 /* Must be power of 2 */
565 volatile GC_thread GC_threads[THREAD_TABLE_SZ];
567 void GC_push_thread_structures GC_PROTO((void))
569 GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
572 /* Add a thread to GC_threads. We assume it wasn't already there. */
573 /* Caller holds allocation lock. */
574 GC_thread GC_new_thread(thread_t id)
576 int hv = ((word)id) % THREAD_TABLE_SZ;
578 static struct GC_Thread_Rep first_thread;
579 static GC_bool first_thread_used = FALSE;
581 if (!first_thread_used) {
582 result = &first_thread;
583 first_thread_used = TRUE;
584 /* Dont acquire allocation lock, since we may already hold it. */
586 result = (struct GC_Thread_Rep *)
587 GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
589 if (result == 0) return(0);
591 result -> next = GC_threads[hv];
592 GC_threads[hv] = result;
593 /* result -> finished = 0; */
594 (void) cond_init(&(result->join_cv), USYNC_THREAD, 0);
598 /* Delete a thread from GC_threads. We assume it is there. */
599 /* (The code intentionally traps if it wasn't.) */
600 /* Caller holds allocation lock. */
601 void GC_delete_thread(thread_t id)
603 int hv = ((word)id) % THREAD_TABLE_SZ;
604 register GC_thread p = GC_threads[hv];
605 register GC_thread prev = 0;
607 while (p -> id != id) {
612 GC_threads[hv] = p -> next;
614 prev -> next = p -> next;
618 /* Return the GC_thread correpsonding to a given thread_t. */
619 /* Returns 0 if it's not there. */
620 /* Caller holds allocation lock. */
621 GC_thread GC_lookup_thread(thread_t id)
623 int hv = ((word)id) % THREAD_TABLE_SZ;
624 register GC_thread p = GC_threads[hv];
626 while (p != 0 && p -> id != id) p = p -> next;
630 /* Solaris 2/Intel uses an initial stack size limit slightly bigger than the
631 SPARC default of 8 MB. Account for this to warn only if the user has
632 raised the limit beyond the default.
634 This is identical to DFLSSIZ defined in <sys/vm_machparam.h>. This file
635 is installed in /usr/platform/`uname -m`/include, which is not in the
636 default include directory list, so copy the definition here. */
638 # define MAX_ORIG_STACK_SIZE (8 * 1024 * 1024 + ((USRSTACK) & 0x3FFFFF))
640 # define MAX_ORIG_STACK_SIZE (8 * 1024 * 1024)
643 word GC_get_orig_stack_size() {
645 static int warned = 0;
648 if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");
649 result = (word)rl.rlim_cur & ~(HBLKSIZE-1);
650 if (result > MAX_ORIG_STACK_SIZE) {
652 WARN("Large stack limit(%ld): only scanning 8 MB\n", result);
655 result = MAX_ORIG_STACK_SIZE;
660 /* Notify dirty bit implementation of unused parts of my stack. */
661 /* Caller holds allocation lock. */
662 void GC_my_stack_limits()
665 register ptr_t hottest = (ptr_t)((word)(&dummy) & ~(HBLKSIZE-1));
666 register GC_thread me = GC_lookup_thread(thr_self());
667 register size_t stack_size = me -> stack_size;
668 register ptr_t stack;
670 if (stack_size == 0) {
671 /* original thread */
672 /* Empirically, what should be the stack page with lowest */
673 /* address is actually inaccessible. */
674 stack_size = GC_get_orig_stack_size() - GC_page_size;
675 stack = GC_stackbottom - stack_size + GC_page_size;
679 if (stack > hottest || stack + stack_size < hottest) {
680 ABORT("sp out of bounds");
682 GC_is_fresh((struct hblk *)stack, divHBLKSZ(hottest - stack));
686 /* We hold allocation lock. Should do exactly the right thing if the */
687 /* world is stopped. Should not fail if it isn't. */
688 void GC_push_all_stacks()
691 register GC_thread p;
692 register ptr_t sp = GC_approx_sp();
693 register ptr_t bottom, top;
696 # define PUSH(bottom,top) \
697 if (GC_dirty_maintained) { \
698 GC_push_selected((bottom), (top), GC_page_was_ever_dirty, \
699 GC_push_all_stack); \
701 GC_push_all_stack((bottom), (top)); \
703 GC_push_all_stack((ptr_t)GC_lwp_registers,
704 (ptr_t)GC_lwp_registers
705 + max_lwps * sizeof(GC_lwp_registers[0]));
706 for (i = 0; i < THREAD_TABLE_SZ; i++) {
707 for (p = GC_threads[i]; p != 0; p = p -> next) {
708 if (p -> stack_size != 0) {
710 top = p -> stack + p -> stack_size;
712 /* The original stack. */
713 bottom = GC_stackbottom - GC_get_orig_stack_size() + GC_page_size;
714 top = GC_stackbottom;
716 if ((word)sp > (word)bottom && (word)sp < (word)top) bottom = sp;
723 int GC_is_thread_stack(ptr_t addr)
726 register GC_thread p;
727 register ptr_t bottom, top;
729 for (i = 0; i < THREAD_TABLE_SZ; i++) {
730 for (p = GC_threads[i]; p != 0; p = p -> next) {
731 if (p -> stack_size != 0) {
732 if (p -> stack <= addr &&
733 addr < p -> stack + p -> stack_size)
741 /* The only thread that ever really performs a thr_join. */
742 void * GC_thr_daemon(void * dummy)
746 register GC_thread t;
752 result = thr_join((thread_t)0, &departed, &status);
755 /* No more threads; wait for create. */
756 for (i = 0; i < THREAD_TABLE_SZ; i++) {
757 for (t = GC_threads[i]; t != 0; t = t -> next) {
758 if (!(t -> flags & (DETACHED | FINISHED))) {
760 goto start; /* Thread started just before we */
761 /* acquired the lock. */
765 cond_wait(&GC_create_cv, &GC_allocate_ml);
768 t = GC_lookup_thread(departed);
770 if (!(t -> flags & CLIENT_OWNS_STACK)) {
771 GC_stack_free(t -> stack, t -> stack_size);
773 if (t -> flags & DETACHED) {
774 GC_delete_thread(departed);
776 t -> status = status;
777 t -> flags |= FINISHED;
778 cond_signal(&(t -> join_cv));
779 cond_broadcast(&GC_prom_join_cv);
786 /* We hold the allocation lock, or caller ensures that 2 instances */
787 /* cannot be invoked concurrently. */
788 void GC_thr_init(void)
794 if (GC_thr_initialized)
796 GC_thr_initialized = TRUE;
797 GC_min_stack_sz = ((thr_min_stack() + 32*1024 + HBLKSIZE-1)
800 GC_zfd = open("/dev/zero", O_RDONLY);
802 ABORT("Can't open /dev/zero");
803 #endif /* MMAP_STACKS */
804 cond_init(&GC_prom_join_cv, USYNC_THREAD, 0);
805 cond_init(&GC_create_cv, USYNC_THREAD, 0);
806 /* Add the initial thread, so we can stop it. */
807 t = GC_new_thread(thr_self());
809 t -> flags = DETACHED | CLIENT_OWNS_STACK;
810 ret = thr_create(0 /* stack */, 0 /* stack_size */, GC_thr_daemon,
811 0 /* arg */, THR_DETACHED | THR_DAEMON,
812 &tid /* thread_id */);
814 GC_err_printf1("Thr_create returned %ld\n", ret);
815 ABORT("Cant fork daemon");
817 thr_setprio(tid, 126);
820 /* We acquire the allocation lock to prevent races with */
821 /* stopping/starting world. */
822 /* This is no more correct than the underlying Solaris 2.X */
823 /* implementation. Under 2.3 THIS IS BROKEN. */
824 int GC_thr_suspend(thread_t target_thread)
830 result = thr_suspend(target_thread);
832 t = GC_lookup_thread(target_thread);
833 if (t == 0) ABORT("thread unknown to GC");
834 t -> flags |= SUSPNDED;
840 int GC_thr_continue(thread_t target_thread)
846 result = thr_continue(target_thread);
848 t = GC_lookup_thread(target_thread);
849 if (t == 0) ABORT("thread unknown to GC");
850 t -> flags &= ~SUSPNDED;
856 int GC_thr_join(thread_t wait_for, thread_t *departed, void **status)
858 register GC_thread t;
864 register GC_bool thread_exists;
867 thread_exists = FALSE;
868 for (i = 0; i < THREAD_TABLE_SZ; i++) {
869 for (t = GC_threads[i]; t != 0; t = t -> next) {
870 if (!(t -> flags & DETACHED)) {
871 if (t -> flags & FINISHED) {
874 thread_exists = TRUE;
878 if (!thread_exists) {
882 cond_wait(&GC_prom_join_cv, &GC_allocate_ml);
885 t = GC_lookup_thread(wait_for);
886 if (t == 0 || t -> flags & DETACHED) {
890 if (wait_for == thr_self()) {
894 while (!(t -> flags & FINISHED)) {
895 cond_wait(&(t -> join_cv), &GC_allocate_ml);
900 if (status) *status = t -> status;
901 if (departed) *departed = t -> id;
902 cond_destroy(&(t -> join_cv));
903 GC_delete_thread(t -> id);
911 GC_thr_create(void *stack_base, size_t stack_size,
912 void *(*start_routine)(void *), void *arg, long flags,
913 thread_t *new_thread)
917 thread_t my_new_thread;
919 void * stack = stack_base;
922 if (!GC_is_initialized) GC_init_inner();
925 if (stack_size == 0) stack_size = 1024*1024;
926 stack = (void *)GC_stack_alloc(&stack_size);
933 my_flags |= CLIENT_OWNS_STACK;
935 if (flags & THR_DETACHED) my_flags |= DETACHED;
936 if (flags & THR_SUSPENDED) my_flags |= SUSPNDED;
937 result = thr_create(stack, stack_size, start_routine,
938 arg, flags & ~THR_DETACHED, &my_new_thread);
940 t = GC_new_thread(my_new_thread);
941 t -> flags = my_flags;
942 if (!(my_flags & DETACHED)) cond_init(&(t -> join_cv), USYNC_THREAD, 0);
944 t -> stack_size = stack_size;
945 if (new_thread != 0) *new_thread = my_new_thread;
946 cond_signal(&GC_create_cv);
949 if (!(my_flags & CLIENT_OWNS_STACK)) {
950 GC_stack_free(stack, stack_size);
957 # else /* !GC_SOLARIS_THREADS */
960 int GC_no_sunOS_threads;