1 /**********************************************************************
7 Copyright (C) 2004-2007 Koichi Sasada
9 **********************************************************************/
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
17 model 2: Native Thread with Giant VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
23 ------------------------------------------------------------------------
26 A thread has mutex (GVL: Global VM Lock) can run. When thread
27 scheduling, running thread release GVL. If running thread
28 try blocking operation, this thread must release GVL and another
29 thread can continue this flow. After blocking operation, thread
30 must check interrupt (RUBY_VM_CHECK_INTS).
32 Every VM can run parallel.
34 Ruby threads are scheduled by OS thread scheduler.
36 ------------------------------------------------------------------------
39 Every threads run concurrent or parallel and to access shared object
40 exclusive access control is needed. For example, to access String
41 object or Array object, fine grain lock must be locked every time.
47 #include "eval_intern.h"
50 #ifndef USE_NATIVE_THREAD_PRIORITY
51 #define USE_NATIVE_THREAD_PRIORITY 0
52 #define RUBY_THREAD_PRIORITY_MAX 3
53 #define RUBY_THREAD_PRIORITY_MIN -3
57 #define THREAD_DEBUG 0
63 static void sleep_timeval(rb_thread_t *th, struct timeval time);
64 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec);
65 static void sleep_forever(rb_thread_t *th, int nodeadlock);
66 static double timeofday(void);
67 struct timeval rb_time_interval(VALUE);
68 static int rb_thread_dead(rb_thread_t *th);
70 static void rb_check_deadlock(rb_vm_t *vm);
72 void rb_signal_exec(rb_thread_t *th, int sig);
73 void rb_disable_interrupt(void);
74 void rb_thread_stop_timer_thread(void);
76 static const VALUE eKillSignal = INT2FIX(0);
77 static const VALUE eTerminateSignal = INT2FIX(1);
78 static volatile int system_working = 1;
81 st_delete_wrap(st_table *table, st_data_t key)
83 st_delete(table, &key, 0);
86 /********************************************************************************/
88 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
90 struct rb_blocking_region_buffer {
91 enum rb_thread_status prev_status;
92 struct rb_unblock_callback oldubf;
95 static void set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
96 struct rb_unblock_callback *old);
97 static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old);
99 static void inline blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
100 rb_unblock_function_t *func, void *arg);
101 static void inline blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
103 #define GVL_UNLOCK_BEGIN() do { \
104 rb_thread_t *_th_stored = GET_THREAD(); \
105 rb_gc_save_machine_context(_th_stored); \
106 native_mutex_unlock(&_th_stored->vm->global_vm_lock)
108 #define GVL_UNLOCK_END() \
109 native_mutex_lock(&_th_stored->vm->global_vm_lock); \
110 rb_thread_set_current(_th_stored); \
113 #define BLOCKING_REGION_CORE(exec) do { \
114 GVL_UNLOCK_BEGIN(); {\
120 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
121 rb_thread_t *__th = GET_THREAD(); \
122 struct rb_blocking_region_buffer __region; \
123 blocking_region_begin(__th, &__region, ubf, ubfarg); \
125 blocking_region_end(__th, &__region); \
126 RUBY_VM_CHECK_INTS(); \
130 #ifdef HAVE_VA_ARGS_MACRO
131 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
132 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
133 #define POSITION_FORMAT "%s:%d:"
134 #define POSITION_ARGS ,file, line
136 void rb_thread_debug(const char *fmt, ...);
137 #define thread_debug rb_thread_debug
138 #define POSITION_FORMAT
139 #define POSITION_ARGS
142 # if THREAD_DEBUG < 0
143 static int rb_thread_debug_enabled;
146 rb_thread_s_debug(void)
148 return INT2NUM(rb_thread_debug_enabled);
152 rb_thread_s_debug_set(VALUE self, VALUE val)
154 rb_thread_debug_enabled = RTEST(val);
158 # define rb_thread_debug_enabled THREAD_DEBUG
161 #define thread_debug if(0)printf
165 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
167 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
168 VALUE *register_stack_start));
169 static void timer_thread_function(void *);
172 #include "thread_win32.c"
174 #define DEBUG_OUT() \
175 WaitForSingleObject(&debug_mutex, INFINITE); \
176 printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
178 ReleaseMutex(&debug_mutex);
180 #elif defined(HAVE_PTHREAD_H)
181 #include "thread_pthread.c"
183 #define DEBUG_OUT() \
184 pthread_mutex_lock(&debug_mutex); \
185 printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
187 pthread_mutex_unlock(&debug_mutex);
190 #error "unsupported thread type"
194 static int debug_mutex_initialized = 1;
195 static rb_thread_lock_t debug_mutex;
199 #ifdef HAVE_VA_ARGS_MACRO
200 const char *file, int line,
202 const char *fmt, ...)
207 if (!rb_thread_debug_enabled) return;
209 if (debug_mutex_initialized == 1) {
210 debug_mutex_initialized = 0;
211 native_mutex_initialize(&debug_mutex);
215 vsnprintf(buf, BUFSIZ, fmt, args);
224 set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
225 struct rb_unblock_callback *old)
228 RUBY_VM_CHECK_INTS(); /* check signal or so */
229 native_mutex_lock(&th->interrupt_lock);
230 if (th->interrupt_flag) {
231 native_mutex_unlock(&th->interrupt_lock);
235 if (old) *old = th->unblock;
236 th->unblock.func = func;
237 th->unblock.arg = arg;
239 native_mutex_unlock(&th->interrupt_lock);
243 reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old)
245 native_mutex_lock(&th->interrupt_lock);
247 native_mutex_unlock(&th->interrupt_lock);
251 rb_thread_interrupt(rb_thread_t *th)
253 native_mutex_lock(&th->interrupt_lock);
254 RUBY_VM_SET_INTERRUPT(th);
255 if (th->unblock.func) {
256 (th->unblock.func)(th->unblock.arg);
261 native_mutex_unlock(&th->interrupt_lock);
266 terminate_i(st_data_t key, st_data_t val, rb_thread_t *main_thread)
270 GetThreadPtr(thval, th);
272 if (th != main_thread) {
273 thread_debug("terminate_i: %p\n", (void *)th);
274 rb_thread_interrupt(th);
275 th->thrown_errinfo = eTerminateSignal;
276 th->status = THREAD_TO_KILL;
279 thread_debug("terminate_i: main thread (%p)\n", (void *)th);
284 typedef struct rb_mutex_struct
286 rb_thread_lock_t lock;
287 rb_thread_cond_t cond;
288 struct rb_thread_struct volatile *th;
289 volatile int cond_waiting, cond_notified;
290 struct rb_mutex_struct *next_mutex;
293 static void rb_mutex_unlock_all(mutex_t *mutex);
296 rb_thread_terminate_all(void)
298 rb_thread_t *th = GET_THREAD(); /* main thread */
299 rb_vm_t *vm = th->vm;
300 if (vm->main_thread != th) {
301 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
302 (void *)vm->main_thread, (void *)th);
305 /* unlock all locking mutexes */
306 if (th->keeping_mutexes) {
307 rb_mutex_unlock_all(th->keeping_mutexes);
310 thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
311 st_foreach(vm->living_threads, terminate_i, (st_data_t)th);
313 while (!rb_thread_alone()) {
315 if (EXEC_TAG() == 0) {
316 rb_thread_schedule();
319 /* ignore exception */
323 rb_thread_stop_timer_thread();
327 thread_cleanup_func_before_exec(void *th_ptr)
329 rb_thread_t *th = th_ptr;
330 th->status = THREAD_KILLED;
331 th->machine_stack_start = th->machine_stack_end = 0;
333 th->machine_register_stack_start = th->machine_register_stack_end = 0;
338 thread_cleanup_func(void *th_ptr)
340 rb_thread_t *th = th_ptr;
341 thread_cleanup_func_before_exec(th_ptr);
342 native_thread_destroy(th);
345 extern void ruby_error_print(void);
346 static VALUE rb_thread_raise(int, VALUE *, rb_thread_t *);
347 void rb_thread_recycle_stack_release(VALUE *);
350 ruby_thread_init_stack(rb_thread_t *th)
352 native_thread_init_stack(th);
356 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
359 VALUE args = th->first_args;
361 rb_thread_t *join_th;
362 rb_thread_t *main_th;
363 VALUE errinfo = Qnil;
365 ruby_thread_set_native(th);
367 th->machine_stack_start = stack_start;
369 th->machine_register_stack_start = register_stack_start;
371 thread_debug("thread start: %p\n", (void *)th);
373 native_mutex_lock(&th->vm->global_vm_lock);
375 thread_debug("thread start (get lock): %p\n", (void *)th);
376 rb_thread_set_current(th);
379 if ((state = EXEC_TAG()) == 0) {
380 SAVE_ROOT_JMPBUF(th, {
381 if (th->first_proc) {
382 GetProcPtr(th->first_proc, proc);
384 th->local_lfp = proc->block.lfp;
385 th->local_svar = Qnil;
386 th->value = vm_invoke_proc(th, proc, proc->block.self,
387 RARRAY_LEN(args), RARRAY_PTR(args), 0);
390 th->value = (*th->first_func)((void *)th->first_args);
395 errinfo = th->errinfo;
396 if (NIL_P(errinfo)) errinfo = rb_errinfo();
397 if (state == TAG_FATAL) {
398 /* fatal error within this thread, need to stop whole script */
400 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
401 if (th->safe_level >= 4) {
402 th->errinfo = rb_exc_new3(rb_eSecurityError,
403 rb_sprintf("Insecure exit at level %d", th->safe_level));
407 else if (th->safe_level < 4 &&
408 (th->vm->thread_abort_on_exception ||
409 th->abort_on_exception || RTEST(ruby_debug))) {
410 /* exit on main_thread */
418 th->status = THREAD_KILLED;
419 thread_debug("thread end: %p\n", (void *)th);
421 main_th = th->vm->main_thread;
423 if (TYPE(errinfo) == T_OBJECT) {
424 /* treat with normal error object */
425 rb_thread_raise(1, &errinfo, main_th);
430 /* locking_mutex must be Qfalse */
431 if (th->locking_mutex != Qfalse) {
432 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
433 (void *)th, th->locking_mutex);
436 /* unlock all locking mutexes */
437 if (th->keeping_mutexes) {
438 rb_mutex_unlock_all(th->keeping_mutexes);
439 th->keeping_mutexes = NULL;
442 /* delete self from living_threads */
443 st_delete_wrap(th->vm->living_threads, th->self);
445 /* wake up joinning threads */
446 join_th = th->join_list_head;
448 if (join_th == main_th) errinfo = Qnil;
449 rb_thread_interrupt(join_th);
450 switch (join_th->status) {
451 case THREAD_STOPPED: case THREAD_STOPPED_FOREVER:
452 join_th->status = THREAD_RUNNABLE;
455 join_th = join_th->join_list_next;
457 if (th != main_th) rb_check_deadlock(th->vm);
459 if (!th->root_fiber) {
460 rb_thread_recycle_stack_release(th->stack);
464 thread_cleanup_func(th);
465 if (th->vm->main_thread == th) {
466 rb_thread_stop_timer_thread();
468 native_mutex_unlock(&th->vm->global_vm_lock);
474 thread_create_core(VALUE thval, VALUE args, VALUE (*fn)(ANYARGS))
478 if (OBJ_FROZEN(GET_THREAD()->thgroup)) {
479 rb_raise(rb_eThreadError,
480 "can't start a new thread (frozen ThreadGroup)");
482 GetThreadPtr(thval, th);
484 /* setup thread environment */
486 th->first_proc = fn ? Qfalse : rb_block_proc();
487 th->first_args = args; /* GC: shouldn't put before above line */
489 th->priority = GET_THREAD()->priority;
490 th->thgroup = GET_THREAD()->thgroup;
492 native_mutex_initialize(&th->interrupt_lock);
494 st_insert(th->vm->living_threads, thval, (st_data_t) th->thread_id);
495 native_thread_create(th);
500 thread_s_new(int argc, VALUE *argv, VALUE klass)
503 VALUE thread = rb_thread_alloc(klass);
504 rb_obj_call_init(thread, argc, argv);
505 GetThreadPtr(thread, th);
506 if (!th->first_args) {
507 rb_raise(rb_eThreadError, "uninitialized thread - check `%s#initialize'",
508 rb_class2name(klass));
515 * Thread.start([args]*) {|args| block } => thread
516 * Thread.fork([args]*) {|args| block } => thread
518 * Basically the same as <code>Thread::new</code>. However, if class
519 * <code>Thread</code> is subclassed, then calling <code>start</code> in that
520 * subclass will not invoke the subclass's <code>initialize</code> method.
524 thread_start(VALUE klass, VALUE args)
526 return thread_create_core(rb_thread_alloc(klass), args, 0);
530 thread_initialize(VALUE thread, VALUE args)
533 if (!rb_block_given_p()) {
534 rb_raise(rb_eThreadError, "must be called with a block");
536 GetThreadPtr(thread, th);
537 if (th->first_args) {
538 VALUE rb_proc_location(VALUE self);
539 VALUE proc = th->first_proc, line, loc;
541 if (!proc || !RTEST(loc = rb_proc_location(proc))) {
542 rb_raise(rb_eThreadError, "already initialized thread");
544 file = RSTRING_PTR(RARRAY_PTR(loc)[0]);
545 if (NIL_P(line = RARRAY_PTR(loc)[1])) {
546 rb_raise(rb_eThreadError, "already initialized thread - %s",
549 rb_raise(rb_eThreadError, "already initialized thread - %s:%d",
550 file, NUM2INT(line));
552 return thread_create_core(thread, args, 0);
556 rb_thread_create(VALUE (*fn)(ANYARGS), void *arg)
558 return thread_create_core(rb_thread_alloc(rb_cThread), (VALUE)arg, fn);
562 /* +infty, for this purpose */
563 #define DELAY_INFTY 1E30
566 rb_thread_t *target, *waiting;
572 remove_from_join_list(VALUE arg)
574 struct join_arg *p = (struct join_arg *)arg;
575 rb_thread_t *target_th = p->target, *th = p->waiting;
577 if (target_th->status != THREAD_KILLED) {
578 rb_thread_t **pth = &target_th->join_list_head;
582 *pth = th->join_list_next;
585 pth = &(*pth)->join_list_next;
593 thread_join_sleep(VALUE arg)
595 struct join_arg *p = (struct join_arg *)arg;
596 rb_thread_t *target_th = p->target, *th = p->waiting;
597 double now, limit = p->limit;
599 while (target_th->status != THREAD_KILLED) {
601 sleep_forever(th, 1);
606 thread_debug("thread_join: timeout (thid: %p)\n",
607 (void *)target_th->thread_id);
610 sleep_wait_for_interrupt(th, limit - now);
612 thread_debug("thread_join: interrupted (thid: %p)\n",
613 (void *)target_th->thread_id);
619 thread_join(rb_thread_t *target_th, double delay)
621 rb_thread_t *th = GET_THREAD();
624 arg.target = target_th;
626 arg.limit = timeofday() + delay;
627 arg.forever = delay == DELAY_INFTY;
629 thread_debug("thread_join (thid: %p)\n", (void *)target_th->thread_id);
631 if (target_th->status != THREAD_KILLED) {
632 th->join_list_next = target_th->join_list_head;
633 target_th->join_list_head = th;
634 if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
635 remove_from_join_list, (VALUE)&arg)) {
640 thread_debug("thread_join: success (thid: %p)\n",
641 (void *)target_th->thread_id);
643 if (target_th->errinfo != Qnil) {
644 VALUE err = target_th->errinfo;
649 else if (TYPE(target_th->errinfo) == T_NODE) {
650 rb_exc_raise(vm_make_jump_tag_but_local_jump(
651 GET_THROWOBJ_STATE(err), GET_THROWOBJ_VAL(err)));
654 /* normal exception */
658 return target_th->self;
664 * thr.join(limit) => thr
666 * The calling thread will suspend execution and run <i>thr</i>. Does not
667 * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
668 * the time limit expires, <code>nil</code> will be returned, otherwise
669 * <i>thr</i> is returned.
671 * Any threads not joined will be killed when the main program exits. If
672 * <i>thr</i> had previously raised an exception and the
673 * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
674 * (so the exception has not yet been processed) it will be processed at this
677 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
678 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
679 * x.join # Let x thread finish, a will be killed on exit.
685 * The following example illustrates the <i>limit</i> parameter.
687 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
688 * puts "Waiting" until y.join(0.15)
702 thread_join_m(int argc, VALUE *argv, VALUE self)
704 rb_thread_t *target_th;
705 double delay = DELAY_INFTY;
708 GetThreadPtr(self, target_th);
710 rb_scan_args(argc, argv, "01", &limit);
712 delay = rb_num2dbl(limit);
715 return thread_join(target_th, delay);
722 * Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
725 * a = Thread.new { 2 + 2 }
730 thread_value(VALUE self)
733 GetThreadPtr(self, th);
734 thread_join(th, DELAY_INFTY);
742 static struct timeval
743 double2timeval(double d)
747 time.tv_sec = (int)d;
748 time.tv_usec = (int)((d - (int)d) * 1e6);
749 if (time.tv_usec < 0) {
750 time.tv_usec += (long)1e6;
757 sleep_forever(rb_thread_t *th, int deadlockable)
759 enum rb_thread_status prev_status = th->status;
761 th->status = deadlockable ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
765 rb_check_deadlock(th->vm);
771 RUBY_VM_CHECK_INTS();
772 } while (th->status == THREAD_STOPPED_FOREVER);
773 th->status = prev_status;
777 getclockofday(struct timeval *tp)
779 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
782 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
783 tp->tv_sec = ts.tv_sec;
784 tp->tv_usec = ts.tv_nsec / 1000;
788 gettimeofday(tp, NULL);
793 sleep_timeval(rb_thread_t *th, struct timeval tv)
795 struct timeval to, tvn;
796 enum rb_thread_status prev_status = th->status;
799 to.tv_sec += tv.tv_sec;
800 if ((to.tv_usec += tv.tv_usec) >= 1000000) {
802 to.tv_usec -= 1000000;
805 th->status = THREAD_STOPPED;
807 native_sleep(th, &tv);
808 RUBY_VM_CHECK_INTS();
810 if (to.tv_sec < tvn.tv_sec) break;
811 if (to.tv_sec == tvn.tv_sec && to.tv_usec <= tvn.tv_usec) break;
812 thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
813 (long)to.tv_sec, (long)to.tv_usec,
814 (long)tvn.tv_sec, (long)tvn.tv_usec);
815 tv.tv_sec = to.tv_sec - tvn.tv_sec;
816 if ((tv.tv_usec = to.tv_usec - tvn.tv_usec) < 0) {
818 tv.tv_usec += 1000000;
820 } while (th->status == THREAD_STOPPED);
821 th->status = prev_status;
825 rb_thread_sleep_forever()
827 thread_debug("rb_thread_sleep_forever\n");
828 sleep_forever(GET_THREAD(), 0);
832 rb_thread_sleep_deadly()
834 thread_debug("rb_thread_sleep_deadly\n");
835 sleep_forever(GET_THREAD(), 1);
841 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
844 if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
845 return (double)tp.tv_sec + (double)tp.tv_nsec * 1e-9;
850 gettimeofday(&tv, NULL);
851 return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
856 sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec)
858 sleep_timeval(th, double2timeval(sleepsec));
862 sleep_for_polling(rb_thread_t *th)
866 time.tv_usec = 100 * 1000; /* 0.1 sec */
867 sleep_timeval(th, time);
871 rb_thread_wait_for(struct timeval time)
873 rb_thread_t *th = GET_THREAD();
874 sleep_timeval(th, time);
878 rb_thread_polling(void)
880 RUBY_VM_CHECK_INTS();
881 if (!rb_thread_alone()) {
882 rb_thread_t *th = GET_THREAD();
883 sleep_for_polling(th);
888 * CAUTION: This function causes thread switching.
889 * rb_thread_check_ints() check ruby's interrupts.
890 * some interrupt needs thread switching/invoke handlers,
895 rb_thread_check_ints(void)
897 RUBY_VM_CHECK_INTS();
901 * Hidden API for tcl/tk wrapper.
902 * There is no guarantee to perpetuate it.
905 rb_thread_check_trap_pending(void)
907 return GET_THREAD()->exec_signal != 0;
910 /* This function can be called in blocking region. */
912 rb_thread_interrupted(VALUE thval)
915 GetThreadPtr(thval, th);
916 return RUBY_VM_INTERRUPTED(th);
919 struct timeval rb_time_timeval();
922 rb_thread_sleep(int sec)
924 rb_thread_wait_for(rb_time_timeval(INT2FIX(sec)));
928 rb_thread_schedule(void)
930 thread_debug("rb_thread_schedule\n");
931 if (!rb_thread_alone()) {
932 rb_thread_t *th = GET_THREAD();
934 thread_debug("rb_thread_schedule/switch start\n");
936 rb_gc_save_machine_context(th);
937 native_mutex_unlock(&th->vm->global_vm_lock);
939 native_thread_yield();
941 native_mutex_lock(&th->vm->global_vm_lock);
943 rb_thread_set_current(th);
944 thread_debug("rb_thread_schedule/switch done\n");
946 RUBY_VM_CHECK_INTS();
950 /* blocking region */
952 blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
953 rb_unblock_function_t *func, void *arg)
955 region->prev_status = th->status;
956 set_unblock_function(th, func, arg, ®ion->oldubf);
957 th->status = THREAD_STOPPED;
958 thread_debug("enter blocking region (%p)\n", (void *)th);
959 rb_gc_save_machine_context(th);
960 native_mutex_unlock(&th->vm->global_vm_lock);
964 blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
966 native_mutex_lock(&th->vm->global_vm_lock);
967 rb_thread_set_current(th);
968 thread_debug("leave blocking region (%p)\n", (void *)th);
969 remove_signal_thread_list(th);
970 reset_unblock_function(th, ®ion->oldubf);
971 if (th->status == THREAD_STOPPED) {
972 th->status = region->prev_status;
976 struct rb_blocking_region_buffer *
977 rb_thread_blocking_region_begin(void)
979 rb_thread_t *th = GET_THREAD();
980 struct rb_blocking_region_buffer *region = ALLOC(struct rb_blocking_region_buffer);
981 blocking_region_begin(th, region, ubf_select, th);
986 rb_thread_blocking_region_end(struct rb_blocking_region_buffer *region)
988 rb_thread_t *th = GET_THREAD();
989 blocking_region_end(th, region);
991 RUBY_VM_CHECK_INTS();
995 * rb_thread_blocking_region - permit concurrent/parallel execution.
997 * This function does:
999 * Other Ruby threads may run in parallel.
1000 * (2) call func with data1.
1002 * Other Ruby threads can not run in parallel any more.
1004 * If another thread interrupts this thread (Thread#kill, signal deliverly,
1005 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1006 * "un-blocking function"). `ubf()' should interrupt `func()' execution.
1008 * There are built-in ubfs and you can specify these ubfs.
1009 * However, we can not guarantee our built-in ubfs interrupt
1010 * your `func()' correctly. Be careful to use rb_thread_blocking_region().
1012 * * RUBY_UBF_IO: ubf for IO operation
1013 * * RUBY_UBF_PROCESS: ubf for process operation
1015 * NOTE: You can not execute most of Ruby C API and touch Ruby objects
1016 * in `func()' and `ubf()' because current thread doesn't acquire
1017 * GVL (cause synchronization problem). Especially, ALLOC*() are
1018 * forbidden because they are related to GC. If you need to do it,
1019 * read source code of C APIs and confirm by yourself.
1021 * NOTE: In short, this API is difficult to use safely. I recommend you
1022 * use other ways if you have. We lack experiences to use this API.
1023 * Please report your problem related on it.
1026 * * rb_thread_interrupted() - check interrupt flag
1029 rb_thread_blocking_region(
1030 rb_blocking_function_t *func, void *data1,
1031 rb_unblock_function_t *ubf, void *data2)
1034 rb_thread_t *th = GET_THREAD();
1036 if (ubf == RUBY_UBF_IO || ubf == RUBY_UBF_PROCESS) {
1048 /* alias of rb_thread_blocking_region() */
1051 rb_thread_call_without_gvl(
1052 rb_blocking_function_t *func, void *data1,
1053 rb_unblock_function_t *ubf, void *data2)
1055 return rb_thread_blocking_region(func, data1, ubf, data2);
1059 * rb_thread_call_with_gvl - re-enter into Ruby world while releasing GVL.
1061 * While releasing GVL using rb_thread_blocking_region() or
1062 * rb_thread_call_without_gvl(), you can not access Ruby values or invoke methods.
1063 * If you need to access it, you must use this function rb_thread_call_with_gvl().
1065 * This function rb_thread_call_with_gvl() does:
1067 * (2) call passed function `func'.
1069 * (4) return a value which is returned at (2).
1071 * NOTE: You should not return Ruby object at (2) because such Object
1074 * NOTE: If an exception is raised in `func', this function "DOES NOT"
1075 * protect (catch) the exception. If you have any resources
1076 * which should free before throwing exception, you need use
1077 * rb_protect() in `func' and return a value which represents
1078 * exception is raised.
1080 * NOTE: This functions should not be called by a thread which
1081 * is not created as Ruby thread (created by Thread.new or so).
1082 * In other words, this function *DOES NOT* associate
1083 * NON-Ruby thread to Ruby thread.
1086 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1088 rb_thread_t *th = ruby_thread_from_native();
1089 struct rb_blocking_region_buffer *brb;
1090 struct rb_unblock_callback prev_unblock;
1094 /* Error is occurred, but we can't use rb_bug()
1095 * because this thread is not Ruby's thread.
1096 * What should we do?
1099 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1103 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
1104 prev_unblock = th->unblock;
1106 blocking_region_end(th, brb);
1107 /* enter to Ruby world: You can access Ruby values, methods and so on. */
1109 /* levae from Ruby world: You can not access Ruby values, etc. */
1110 blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg);
1116 * Thread.pass => nil
1118 * Invokes the thread scheduler to pass execution to another thread.
1120 * a = Thread.new { print "a"; Thread.pass;
1121 * print "b"; Thread.pass;
1123 * b = Thread.new { print "x"; Thread.pass;
1124 * print "y"; Thread.pass;
1129 * <em>produces:</em>
1135 thread_s_pass(VALUE klass)
1137 rb_thread_schedule();
1146 rb_thread_execute_interrupts(rb_thread_t *th)
1148 if (th->raised_flag) return;
1150 while (th->interrupt_flag) {
1151 enum rb_thread_status status = th->status;
1152 int timer_interrupt = th->interrupt_flag & 0x01;
1153 int finalizer_interrupt = th->interrupt_flag & 0x04;
1155 th->status = THREAD_RUNNABLE;
1156 th->interrupt_flag = 0;
1158 /* signal handling */
1159 if (th->exec_signal) {
1160 int sig = th->exec_signal;
1161 th->exec_signal = 0;
1162 rb_signal_exec(th, sig);
1165 /* exception from another thread */
1166 if (th->thrown_errinfo) {
1167 VALUE err = th->thrown_errinfo;
1168 th->thrown_errinfo = 0;
1169 thread_debug("rb_thread_execute_interrupts: %ld\n", err);
1171 if (err == eKillSignal || err == eTerminateSignal) {
1172 th->errinfo = INT2FIX(TAG_FATAL);
1173 TH_JUMP_TAG(th, TAG_FATAL);
1179 th->status = status;
1181 if (finalizer_interrupt) {
1182 rb_gc_finalize_deferred();
1185 if (timer_interrupt) {
1186 EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0);
1188 if (th->slice > 0) {
1193 rb_thread_schedule();
1194 if (th->slice < 0) {
1199 th->slice = th->priority;
1208 rb_gc_mark_threads(void)
1213 /*****************************************************/
1216 rb_thread_ready(rb_thread_t *th)
1218 rb_thread_interrupt(th);
1222 rb_thread_raise(int argc, VALUE *argv, rb_thread_t *th)
1227 if (rb_thread_dead(th)) {
1231 if (th->thrown_errinfo != 0 || th->raised_flag) {
1232 rb_thread_schedule();
1236 exc = rb_make_exception(argc, argv);
1237 th->thrown_errinfo = exc;
1238 rb_thread_ready(th);
1243 rb_thread_signal_raise(void *thptr, int sig)
1246 rb_thread_t *th = thptr;
1248 argv[0] = rb_eSignal;
1249 argv[1] = INT2FIX(sig);
1250 rb_thread_raise(2, argv, th->vm->main_thread);
1254 rb_thread_signal_exit(void *thptr)
1257 rb_thread_t *th = thptr;
1259 argv[0] = rb_eSystemExit;
1260 argv[1] = rb_str_new2("exit");
1261 rb_thread_raise(2, argv, th->vm->main_thread);
1265 ruby_thread_stack_overflow(rb_thread_t *th)
1267 th->errinfo = sysstack_error;
1268 th->raised_flag = 0;
1269 TH_JUMP_TAG(th, TAG_RAISE);
1273 rb_thread_set_raised(rb_thread_t *th)
1275 if (th->raised_flag & RAISED_EXCEPTION) {
1278 th->raised_flag |= RAISED_EXCEPTION;
1283 rb_thread_reset_raised(rb_thread_t *th)
1285 if (!(th->raised_flag & RAISED_EXCEPTION)) {
1288 th->raised_flag &= ~RAISED_EXCEPTION;
1293 rb_thread_fd_close(int fd)
1300 * thr.raise(exception)
1302 * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
1303 * caller does not have to be <i>thr</i>.
1305 * Thread.abort_on_exception = true
1306 * a = Thread.new { sleep(200) }
1309 * <em>produces:</em>
1311 * prog.rb:3: Gotcha (RuntimeError)
1312 * from prog.rb:2:in `initialize'
1313 * from prog.rb:2:in `new'
1318 thread_raise_m(int argc, VALUE *argv, VALUE self)
1321 GetThreadPtr(self, th);
1322 rb_thread_raise(argc, argv, th);
1329 * thr.exit => thr or nil
1330 * thr.kill => thr or nil
1331 * thr.terminate => thr or nil
1333 * Terminates <i>thr</i> and schedules another thread to be run. If this thread
1334 * is already marked to be killed, <code>exit</code> returns the
1335 * <code>Thread</code>. If this is the main thread, or the last thread, exits
1340 rb_thread_kill(VALUE thread)
1344 GetThreadPtr(thread, th);
1346 if (th != GET_THREAD() && th->safe_level < 4) {
1349 if (th->status == THREAD_TO_KILL || th->status == THREAD_KILLED) {
1352 if (th == th->vm->main_thread) {
1353 rb_exit(EXIT_SUCCESS);
1356 thread_debug("rb_thread_kill: %p (%p)\n", (void *)th, (void *)th->thread_id);
1358 rb_thread_interrupt(th);
1359 th->thrown_errinfo = eKillSignal;
1360 th->status = THREAD_TO_KILL;
1368 * Thread.kill(thread) => thread
1370 * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
1373 * a = Thread.new { loop { count += 1 } }
1375 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
1377 * a.alive? #=> false
1381 rb_thread_s_kill(VALUE obj, VALUE th)
1383 return rb_thread_kill(th);
1389 * Thread.exit => thread
1391 * Terminates the currently running thread and schedules another thread to be
1392 * run. If this thread is already marked to be killed, <code>exit</code>
1393 * returns the <code>Thread</code>. If this is the main thread, or the last
1394 * thread, exit the process.
1398 rb_thread_exit(void)
1400 return rb_thread_kill(GET_THREAD()->self);
1408 * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
1409 * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
1411 * c = Thread.new { Thread.stop; puts "hey!" }
1414 * <em>produces:</em>
1420 rb_thread_wakeup(VALUE thread)
1423 GetThreadPtr(thread, th);
1425 if (th->status == THREAD_KILLED) {
1426 rb_raise(rb_eThreadError, "killed thread");
1428 rb_thread_ready(th);
1429 if (th->status != THREAD_TO_KILL) {
1430 th->status = THREAD_RUNNABLE;
1440 * Wakes up <i>thr</i>, making it eligible for scheduling.
1442 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
1448 * <em>produces:</em>
1456 rb_thread_run(VALUE thread)
1458 rb_thread_wakeup(thread);
1459 rb_thread_schedule();
1466 * Thread.stop => nil
1468 * Stops execution of the current thread, putting it into a ``sleep'' state,
1469 * and schedules execution of another thread.
1471 * a = Thread.new { print "a"; Thread.stop; print "c" }
1477 * <em>produces:</em>
1483 rb_thread_stop(void)
1485 if (rb_thread_alone()) {
1486 rb_raise(rb_eThreadError,
1487 "stopping only thread\n\tnote: use sleep to stop forever");
1489 rb_thread_sleep_deadly();
1494 thread_list_i(st_data_t key, st_data_t val, void *data)
1496 VALUE ary = (VALUE)data;
1498 GetThreadPtr((VALUE)key, th);
1500 switch (th->status) {
1501 case THREAD_RUNNABLE:
1502 case THREAD_STOPPED:
1503 case THREAD_STOPPED_FOREVER:
1504 case THREAD_TO_KILL:
1505 rb_ary_push(ary, th->self);
1512 /********************************************************************/
1516 * Thread.list => array
1518 * Returns an array of <code>Thread</code> objects for all threads that are
1519 * either runnable or stopped.
1521 * Thread.new { sleep(200) }
1522 * Thread.new { 1000000.times {|i| i*i } }
1523 * Thread.new { Thread.stop }
1524 * Thread.list.each {|t| p t}
1526 * <em>produces:</em>
1528 * #<Thread:0x401b3e84 sleep>
1529 * #<Thread:0x401b3f38 run>
1530 * #<Thread:0x401b3fb0 sleep>
1531 * #<Thread:0x401bdf4c run>
1535 rb_thread_list(void)
1537 VALUE ary = rb_ary_new();
1538 st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
1543 rb_thread_current(void)
1545 return GET_THREAD()->self;
1550 * Thread.current => thread
1552 * Returns the currently executing thread.
1554 * Thread.current #=> #<Thread:0x401bdf4c run>
1558 thread_s_current(VALUE klass)
1560 return rb_thread_current();
1564 rb_thread_main(void)
1566 return GET_THREAD()->vm->main_thread->self;
1570 rb_thread_s_main(VALUE klass)
1572 return rb_thread_main();
1578 * Thread.abort_on_exception => true or false
1580 * Returns the status of the global ``abort on exception'' condition. The
1581 * default is <code>false</code>. When set to <code>true</code>, or if the
1582 * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
1583 * command line option <code>-d</code> was specified) all threads will abort
1584 * (the process will <code>exit(0)</code>) if an exception is raised in any
1585 * thread. See also <code>Thread::abort_on_exception=</code>.
1589 rb_thread_s_abort_exc(void)
1591 return GET_THREAD()->vm->thread_abort_on_exception ? Qtrue : Qfalse;
1597 * Thread.abort_on_exception= boolean => true or false
1599 * When set to <code>true</code>, all threads will abort if an exception is
1600 * raised. Returns the new state.
1602 * Thread.abort_on_exception = true
1603 * t1 = Thread.new do
1604 * puts "In new thread"
1605 * raise "Exception from thread"
1608 * puts "not reached"
1610 * <em>produces:</em>
1613 * prog.rb:4: Exception from thread (RuntimeError)
1614 * from prog.rb:2:in `initialize'
1615 * from prog.rb:2:in `new'
1620 rb_thread_s_abort_exc_set(VALUE self, VALUE val)
1623 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
1630 * thr.abort_on_exception => true or false
1632 * Returns the status of the thread-local ``abort on exception'' condition for
1633 * <i>thr</i>. The default is <code>false</code>. See also
1634 * <code>Thread::abort_on_exception=</code>.
1638 rb_thread_abort_exc(VALUE thread)
1641 GetThreadPtr(thread, th);
1642 return th->abort_on_exception ? Qtrue : Qfalse;
1648 * thr.abort_on_exception= boolean => true or false
1650 * When set to <code>true</code>, causes all threads (including the main
1651 * program) to abort if an exception is raised in <i>thr</i>. The process will
1652 * effectively <code>exit(0)</code>.
1656 rb_thread_abort_exc_set(VALUE thread, VALUE val)
1661 GetThreadPtr(thread, th);
1662 th->abort_on_exception = RTEST(val);
1669 * thr.group => thgrp or nil
1671 * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
1672 * the thread is not a member of any group.
1674 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
1678 rb_thread_group(VALUE thread)
1682 GetThreadPtr(thread, th);
1683 group = th->thgroup;
1692 thread_status_name(enum rb_thread_status status)
1695 case THREAD_RUNNABLE:
1697 case THREAD_STOPPED:
1698 case THREAD_STOPPED_FOREVER:
1700 case THREAD_TO_KILL:
1710 rb_thread_dead(rb_thread_t *th)
1712 return th->status == THREAD_KILLED;
1718 * thr.status => string, false or nil
1720 * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
1721 * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
1722 * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
1723 * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
1724 * terminated with an exception.
1726 * a = Thread.new { raise("die now") }
1727 * b = Thread.new { Thread.stop }
1728 * c = Thread.new { Thread.exit }
1729 * d = Thread.new { sleep }
1730 * d.kill #=> #<Thread:0x401b3678 aborting>
1732 * b.status #=> "sleep"
1733 * c.status #=> false
1734 * d.status #=> "aborting"
1735 * Thread.current.status #=> "run"
1739 rb_thread_status(VALUE thread)
1742 GetThreadPtr(thread, th);
1744 if (rb_thread_dead(th)) {
1745 if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
1751 return rb_str_new2(thread_status_name(th->status));
1757 * thr.alive? => true or false
1759 * Returns <code>true</code> if <i>thr</i> is running or sleeping.
1761 * thr = Thread.new { }
1762 * thr.join #=> #<Thread:0x401b3fb0 dead>
1763 * Thread.current.alive? #=> true
1764 * thr.alive? #=> false
1768 rb_thread_alive_p(VALUE thread)
1771 GetThreadPtr(thread, th);
1773 if (rb_thread_dead(th))
1780 * thr.stop? => true or false
1782 * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
1784 * a = Thread.new { Thread.stop }
1785 * b = Thread.current
1791 rb_thread_stop_p(VALUE thread)
1794 GetThreadPtr(thread, th);
1796 if (rb_thread_dead(th))
1798 if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
1805 * thr.safe_level => integer
1807 * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
1808 * levels can help when implementing sandboxes which run insecure code.
1810 * thr = Thread.new { $SAFE = 3; sleep }
1811 * Thread.current.safe_level #=> 0
1812 * thr.safe_level #=> 3
1816 rb_thread_safe_level(VALUE thread)
1819 GetThreadPtr(thread, th);
1821 return INT2NUM(th->safe_level);
1826 * thr.inspect => string
1828 * Dump the name, id, and status of _thr_ to a string.
1832 rb_thread_inspect(VALUE thread)
1834 const char *cname = rb_obj_classname(thread);
1839 GetThreadPtr(thread, th);
1840 status = thread_status_name(th->status);
1841 str = rb_sprintf("#<%s:%p %s>", cname, (void *)thread, status);
1842 OBJ_INFECT(str, thread);
1848 rb_thread_local_aref(VALUE thread, ID id)
1853 GetThreadPtr(thread, th);
1854 if (rb_safe_level() >= 4 && th != GET_THREAD()) {
1855 rb_raise(rb_eSecurityError, "Insecure: thread locals");
1857 if (!th->local_storage) {
1860 if (st_lookup(th->local_storage, id, &val)) {
1868 * thr[sym] => obj or nil
1870 * Attribute Reference---Returns the value of a thread-local variable, using
1871 * either a symbol or a string name. If the specified variable does not exist,
1872 * returns <code>nil</code>.
1874 * a = Thread.new { Thread.current["name"] = "A"; Thread.stop }
1875 * b = Thread.new { Thread.current[:name] = "B"; Thread.stop }
1876 * c = Thread.new { Thread.current["name"] = "C"; Thread.stop }
1877 * Thread.list.each {|x| puts "#{x.inspect}: #{x[:name]}" }
1879 * <em>produces:</em>
1881 * #<Thread:0x401b3b3c sleep>: C
1882 * #<Thread:0x401b3bc8 sleep>: B
1883 * #<Thread:0x401b3c68 sleep>: A
1884 * #<Thread:0x401bdf4c run>:
1888 rb_thread_aref(VALUE thread, VALUE id)
1890 return rb_thread_local_aref(thread, rb_to_id(id));
1894 rb_thread_local_aset(VALUE thread, ID id, VALUE val)
1897 GetThreadPtr(thread, th);
1899 if (rb_safe_level() >= 4 && th != GET_THREAD()) {
1900 rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
1902 if (OBJ_FROZEN(thread)) {
1903 rb_error_frozen("thread locals");
1905 if (!th->local_storage) {
1906 th->local_storage = st_init_numtable();
1909 st_delete_wrap(th->local_storage, id);
1912 st_insert(th->local_storage, id, val);
1918 * thr[sym] = obj => obj
1920 * Attribute Assignment---Sets or creates the value of a thread-local variable,
1921 * using either a symbol or a string. See also <code>Thread#[]</code>.
1925 rb_thread_aset(VALUE self, ID id, VALUE val)
1927 return rb_thread_local_aset(self, rb_to_id(id), val);
1932 * thr.key?(sym) => true or false
1934 * Returns <code>true</code> if the given string (or symbol) exists as a
1935 * thread-local variable.
1937 * me = Thread.current
1939 * me.key?(:oliver) #=> true
1940 * me.key?(:stanley) #=> false
1944 rb_thread_key_p(VALUE self, VALUE key)
1947 ID id = rb_to_id(key);
1949 GetThreadPtr(self, th);
1951 if (!th->local_storage) {
1954 if (st_lookup(th->local_storage, id, 0)) {
1961 thread_keys_i(ID key, VALUE value, VALUE ary)
1963 rb_ary_push(ary, ID2SYM(key));
1968 vm_living_thread_num(rb_vm_t *vm)
1970 return vm->living_threads->num_entries;
1977 if (GET_THREAD()->vm->living_threads) {
1978 num = vm_living_thread_num(GET_THREAD()->vm);
1979 thread_debug("rb_thread_alone: %d\n", num);
1988 * Returns an an array of the names of the thread-local variables (as Symbols).
1990 * thr = Thread.new do
1991 * Thread.current[:cat] = 'meow'
1992 * Thread.current["dog"] = 'woof'
1994 * thr.join #=> #<Thread:0x401b3f10 dead>
1995 * thr.keys #=> [:dog, :cat]
1999 rb_thread_keys(VALUE self)
2002 VALUE ary = rb_ary_new();
2003 GetThreadPtr(self, th);
2005 if (th->local_storage) {
2006 st_foreach(th->local_storage, thread_keys_i, ary);
2013 * thr.priority => integer
2015 * Returns the priority of <i>thr</i>. Default is inherited from the
2016 * current thread which creating the new thread, or zero for the
2017 * initial main thread; higher-priority threads will run before
2018 * lower-priority threads.
2020 * Thread.current.priority #=> 0
2024 rb_thread_priority(VALUE thread)
2027 GetThreadPtr(thread, th);
2028 return INT2NUM(th->priority);
2034 * thr.priority= integer => thr
2036 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
2037 * will run before lower-priority threads.
2039 * count1 = count2 = 0
2041 * loop { count1 += 1 }
2046 * loop { count2 += 1 }
2055 rb_thread_priority_set(VALUE thread, VALUE prio)
2059 GetThreadPtr(thread, th);
2063 #if USE_NATIVE_THREAD_PRIORITY
2064 th->priority = NUM2INT(prio);
2065 native_thread_apply_priority(th);
2067 priority = NUM2INT(prio);
2068 if (priority > RUBY_THREAD_PRIORITY_MAX) {
2069 priority = RUBY_THREAD_PRIORITY_MAX;
2071 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
2072 priority = RUBY_THREAD_PRIORITY_MIN;
2074 th->priority = priority;
2075 th->slice = priority;
2077 return INT2NUM(th->priority);
2082 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
2085 * several Unix platforms supports file descriptors bigger than FD_SETSIZE
2086 * in select(2) system call.
2088 * - Linux 2.2.12 (?)
2089 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
2090 * select(2) documents how to allocate fd_set dynamically.
2091 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
2092 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
2093 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
2094 * select(2) documents how to allocate fd_set dynamically.
2095 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
2096 * - HP-UX documents how to allocate fd_set dynamically.
2097 * http://docs.hp.com/en/B2355-60105/select.2.html
2098 * - Solaris 8 has select_large_fdset
2100 * When fd_set is not big enough to hold big file descriptors,
2101 * it should be allocated dynamically.
2102 * Note that this assumes fd_set is structured as bitmap.
2104 * rb_fd_init allocates the memory.
2105 * rb_fd_term free the memory.
2106 * rb_fd_set may re-allocates bitmap.
2108 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
2112 rb_fd_init(volatile rb_fdset_t *fds)
2115 fds->fdset = ALLOC(fd_set);
2116 FD_ZERO(fds->fdset);
2120 rb_fd_term(rb_fdset_t *fds)
2122 if (fds->fdset) xfree(fds->fdset);
2128 rb_fd_zero(rb_fdset_t *fds)
2131 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
2132 FD_ZERO(fds->fdset);
2137 rb_fd_resize(int n, rb_fdset_t *fds)
2139 int m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
2140 int o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
2142 if (m < sizeof(fd_set)) m = sizeof(fd_set);
2143 if (o < sizeof(fd_set)) o = sizeof(fd_set);
2146 fds->fdset = realloc(fds->fdset, m);
2147 memset((char *)fds->fdset + o, 0, m - o);
2149 if (n >= fds->maxfd) fds->maxfd = n + 1;
2153 rb_fd_set(int n, rb_fdset_t *fds)
2155 rb_fd_resize(n, fds);
2156 FD_SET(n, fds->fdset);
2160 rb_fd_clr(int n, rb_fdset_t *fds)
2162 if (n >= fds->maxfd) return;
2163 FD_CLR(n, fds->fdset);
2167 rb_fd_isset(int n, const rb_fdset_t *fds)
2169 if (n >= fds->maxfd) return 0;
2170 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
2174 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
2176 int size = howmany(max, NFDBITS) * sizeof(fd_mask);
2178 if (size < sizeof(fd_set)) size = sizeof(fd_set);
2180 dst->fdset = realloc(dst->fdset, size);
2181 memcpy(dst->fdset, src, size);
2185 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
2187 fd_set *r = NULL, *w = NULL, *e = NULL;
2189 rb_fd_resize(n - 1, readfds);
2190 r = rb_fd_ptr(readfds);
2193 rb_fd_resize(n - 1, writefds);
2194 w = rb_fd_ptr(writefds);
2197 rb_fd_resize(n - 1, exceptfds);
2198 e = rb_fd_ptr(exceptfds);
2200 return select(n, r, w, e, timeout);
2208 #define FD_ZERO(f) rb_fd_zero(f)
2209 #define FD_SET(i, f) rb_fd_set(i, f)
2210 #define FD_CLR(i, f) rb_fd_clr(i, f)
2211 #define FD_ISSET(i, f) rb_fd_isset(i, f)
2215 #if defined(__CYGWIN__) || defined(_WIN32)
2217 cmp_tv(const struct timeval *a, const struct timeval *b)
2219 long d = (a->tv_sec - b->tv_sec);
2220 return (d != 0) ? d : (a->tv_usec - b->tv_usec);
2224 subtract_tv(struct timeval *rest, const struct timeval *wait)
2226 while (rest->tv_usec < wait->tv_usec) {
2227 if (rest->tv_sec <= wait->tv_sec) {
2231 rest->tv_usec += 1000 * 1000;
2233 rest->tv_sec -= wait->tv_sec;
2234 rest->tv_usec -= wait->tv_usec;
2240 do_select(int n, fd_set *read, fd_set *write, fd_set *except,
2241 struct timeval *timeout)
2244 fd_set orig_read, orig_write, orig_except;
2248 struct timeval wait_rest;
2249 # if defined(__CYGWIN__) || defined(_WIN32)
2250 struct timeval start_time;
2254 # if defined(__CYGWIN__) || defined(_WIN32)
2255 gettimeofday(&start_time, NULL);
2256 limit = (double)start_time.tv_sec + (double)start_time.tv_usec*1e-6;
2258 limit = timeofday();
2260 limit += (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
2261 wait_rest = *timeout;
2262 timeout = &wait_rest;
2266 if (read) orig_read = *read;
2267 if (write) orig_write = *write;
2268 if (except) orig_except = *except;
2273 #if defined(__CYGWIN__) || defined(_WIN32)
2276 /* polling duration: 100ms */
2277 struct timeval wait_100ms, *wait;
2278 wait_100ms.tv_sec = 0;
2279 wait_100ms.tv_usec = 100 * 1000; /* 100 ms */
2282 wait = (timeout == 0 || cmp_tv(&wait_100ms, timeout) > 0) ? &wait_100ms : timeout;
2285 result = select(n, read, write, except, wait);
2286 if (result < 0) lerrno = errno;
2287 if (result != 0) break;
2289 if (read) *read = orig_read;
2290 if (write) *write = orig_write;
2291 if (except) *except = orig_except;
2294 struct timeval elapsed;
2295 gettimeofday(&elapsed, NULL);
2296 subtract_tv(&elapsed, &start_time);
2297 if (!subtract_tv(timeout, &elapsed)) {
2301 if (cmp_tv(&wait_100ms, timeout) < 0) wait = timeout;
2303 } while (__th->interrupt_flag == 0);
2305 } while (result == 0 && !finish);
2309 result = select(n, read, write, except, timeout);
2310 if (result < 0) lerrno = errno;
2311 }, ubf_select, GET_THREAD());
2322 if (read) *read = orig_read;
2323 if (write) *write = orig_write;
2324 if (except) *except = orig_except;
2327 double d = limit - timeofday();
2329 wait_rest.tv_sec = (unsigned int)d;
2330 wait_rest.tv_usec = (long)((d-(double)wait_rest.tv_sec)*1e6);
2331 if (wait_rest.tv_sec < 0) wait_rest.tv_sec = 0;
2332 if (wait_rest.tv_usec < 0) wait_rest.tv_usec = 0;
2344 rb_thread_wait_fd_rw(int fd, int read)
2347 thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
2350 rb_raise(rb_eIOError, "closed stream");
2352 if (rb_thread_alone()) return;
2353 while (result <= 0) {
2359 result = do_select(fd + 1, rb_fd_ptr(&set), 0, 0, 0);
2362 result = do_select(fd + 1, 0, rb_fd_ptr(&set), 0, 0);
2372 thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
2376 rb_thread_wait_fd(int fd)
2378 rb_thread_wait_fd_rw(fd, 1);
2382 rb_thread_fd_writable(int fd)
2384 rb_thread_wait_fd_rw(fd, 0);
2389 rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
2390 struct timeval *timeout)
2392 if (!read && !write && !except) {
2394 rb_thread_sleep_forever();
2397 rb_thread_wait_for(*timeout);
2401 return do_select(max, read, write, except, timeout);
2410 #ifdef USE_CONSERVATIVE_STACK_END
2412 rb_gc_set_stack_end(VALUE **stack_end_p)
2415 *stack_end_p = &stack_end;
2420 rb_gc_save_machine_context(rb_thread_t *th)
2422 SET_MACHINE_STACK_END(&th->machine_stack_end);
2423 FLUSH_REGISTER_WINDOWS;
2425 th->machine_register_stack_end = rb_ia64_bsp();
2427 setjmp(th->machine_regs);
2434 int rb_get_next_signal(void);
2437 timer_thread_function(void *arg)
2439 rb_vm_t *vm = GET_VM(); /* TODO: fix me for Multi-VM */
2442 /* for time slice */
2443 RUBY_VM_SET_TIMER_INTERRUPT(vm->running_thread);
2446 if ((sig = rb_get_next_signal()) > 0) {
2447 rb_thread_t *mth = vm->main_thread;
2448 enum rb_thread_status prev_status = mth->status;
2449 thread_debug("main_thread: %s, sig: %d\n",
2450 thread_status_name(prev_status), sig);
2451 mth->exec_signal = sig;
2452 if (mth->status != THREAD_KILLED) mth->status = THREAD_RUNNABLE;
2453 rb_thread_interrupt(mth);
2454 mth->status = prev_status;
2458 /* prove profiler */
2459 if (vm->prove_profile.enable) {
2460 rb_thread_t *th = vm->running_thread;
2462 if (vm->during_gc) {
2463 /* GC prove profiling */
2470 rb_thread_stop_timer_thread(void)
2472 if (timer_thread_id && native_stop_timer_thread()) {
2473 native_thread_join(timer_thread_id);
2474 timer_thread_id = 0;
2479 rb_thread_reset_timer_thread(void)
2481 timer_thread_id = 0;
2485 rb_thread_start_timer_thread(void)
2488 rb_thread_create_timer_thread();
2492 clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
2495 VALUE lines = (VALUE)val;
2497 for (i = 0; i < RARRAY_LEN(lines); i++) {
2498 if (RARRAY_PTR(lines)[i] != Qnil) {
2499 RARRAY_PTR(lines)[i] = INT2FIX(0);
2506 clear_coverage(void)
2508 extern VALUE rb_get_coverages(void);
2509 VALUE coverages = rb_get_coverages();
2510 if (RTEST(coverages)) {
2511 st_foreach(RHASH_TBL(coverages), clear_coverage_i, 0);
2516 rb_thread_atfork_internal(int (*atfork)(st_data_t, st_data_t, st_data_t))
2518 rb_thread_t *th = GET_THREAD();
2519 rb_vm_t *vm = th->vm;
2520 VALUE thval = th->self;
2521 vm->main_thread = th;
2523 st_foreach(vm->living_threads, atfork, (st_data_t)th);
2524 st_clear(vm->living_threads);
2525 st_insert(vm->living_threads, thval, (st_data_t)th->thread_id);
2531 terminate_atfork_i(st_data_t key, st_data_t val, st_data_t current_th)
2535 GetThreadPtr(thval, th);
2537 if (th != (rb_thread_t *)current_th) {
2538 thread_cleanup_func(th);
2544 rb_thread_atfork(void)
2546 rb_thread_atfork_internal(terminate_atfork_i);
2547 rb_reset_random_seed();
2551 terminate_atfork_before_exec_i(st_data_t key, st_data_t val, st_data_t current_th)
2555 GetThreadPtr(thval, th);
2557 if (th != (rb_thread_t *)current_th) {
2558 thread_cleanup_func_before_exec(th);
2564 rb_thread_atfork_before_exec(void)
2566 rb_thread_atfork_internal(terminate_atfork_before_exec_i);
2575 * Document-class: ThreadGroup
2577 * <code>ThreadGroup</code> provides a means of keeping track of a number of
2578 * threads as a group. A <code>Thread</code> can belong to only one
2579 * <code>ThreadGroup</code> at a time; adding a thread to a new group will
2580 * remove it from any previous group.
2582 * Newly created threads belong to the same group as the thread from which they
2586 static VALUE thgroup_s_alloc(VALUE);
2588 thgroup_s_alloc(VALUE klass)
2591 struct thgroup *data;
2593 group = Data_Make_Struct(klass, struct thgroup, 0, -1, data);
2595 data->group = group;
2600 struct thgroup_list_params {
2606 thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
2608 VALUE thread = (VALUE)key;
2609 VALUE ary = ((struct thgroup_list_params *)data)->ary;
2610 VALUE group = ((struct thgroup_list_params *)data)->group;
2612 GetThreadPtr(thread, th);
2614 if (th->thgroup == group) {
2615 rb_ary_push(ary, thread);
2622 * thgrp.list => array
2624 * Returns an array of all existing <code>Thread</code> objects that belong to
2627 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
2631 thgroup_list(VALUE group)
2633 VALUE ary = rb_ary_new();
2634 struct thgroup_list_params param;
2637 param.group = group;
2638 st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
2645 * thgrp.enclose => thgrp
2647 * Prevents threads from being added to or removed from the receiving
2648 * <code>ThreadGroup</code>. New threads can still be started in an enclosed
2649 * <code>ThreadGroup</code>.
2651 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
2652 * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
2653 * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
2656 * <em>produces:</em>
2658 * ThreadError: can't move from the enclosed thread group
2662 thgroup_enclose(VALUE group)
2664 struct thgroup *data;
2666 Data_Get_Struct(group, struct thgroup, data);
2675 * thgrp.enclosed? => true or false
2677 * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
2678 * ThreadGroup#enclose.
2682 thgroup_enclosed_p(VALUE group)
2684 struct thgroup *data;
2686 Data_Get_Struct(group, struct thgroup, data);
2695 * thgrp.add(thread) => thgrp
2697 * Adds the given <em>thread</em> to this group, removing it from any other
2698 * group to which it may have previously belonged.
2700 * puts "Initial group is #{ThreadGroup::Default.list}"
2701 * tg = ThreadGroup.new
2702 * t1 = Thread.new { sleep }
2703 * t2 = Thread.new { sleep }
2704 * puts "t1 is #{t1}"
2705 * puts "t2 is #{t2}"
2707 * puts "Initial group now #{ThreadGroup::Default.list}"
2708 * puts "tg group now #{tg.list}"
2710 * <em>produces:</em>
2712 * Initial group is #<Thread:0x401bdf4c>
2713 * t1 is #<Thread:0x401b3c90>
2714 * t2 is #<Thread:0x401b3c18>
2715 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
2716 * tg group now #<Thread:0x401b3c90>
2720 thgroup_add(VALUE group, VALUE thread)
2723 struct thgroup *data;
2726 GetThreadPtr(thread, th);
2728 if (OBJ_FROZEN(group)) {
2729 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
2731 Data_Get_Struct(group, struct thgroup, data);
2732 if (data->enclosed) {
2733 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
2740 if (OBJ_FROZEN(th->thgroup)) {
2741 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
2743 Data_Get_Struct(th->thgroup, struct thgroup, data);
2744 if (data->enclosed) {
2745 rb_raise(rb_eThreadError,
2746 "can't move from the enclosed thread group");
2749 th->thgroup = group;
2755 * Document-class: Mutex
2757 * Mutex implements a simple semaphore that can be used to coordinate access to
2758 * shared data from multiple concurrent threads.
2763 * semaphore = Mutex.new
2766 * semaphore.synchronize {
2767 * # access shared resource
2772 * semaphore.synchronize {
2773 * # access shared resource
2779 #define GetMutexPtr(obj, tobj) \
2780 Data_Get_Struct(obj, mutex_t, tobj)
2782 static const char *mutex_unlock(mutex_t *mutex, rb_thread_t volatile *th);
2785 mutex_free(void *ptr)
2788 mutex_t *mutex = ptr;
2790 /* rb_warn("free locked mutex"); */
2791 const char *err = mutex_unlock(mutex, mutex->th);
2792 if (err) rb_bug("%s", err);
2794 native_mutex_destroy(&mutex->lock);
2795 native_cond_destroy(&mutex->cond);
2801 mutex_alloc(VALUE klass)
2806 obj = Data_Make_Struct(klass, mutex_t, NULL, mutex_free, mutex);
2807 native_mutex_initialize(&mutex->lock);
2808 native_cond_initialize(&mutex->cond);
2814 * Mutex.new => mutex
2816 * Creates a new Mutex
2819 mutex_initialize(VALUE self)
2827 return mutex_alloc(rb_cMutex);
2832 * mutex.locked? => true or false
2834 * Returns +true+ if this lock is currently held by some thread.
2837 rb_mutex_locked_p(VALUE self)
2840 GetMutexPtr(self, mutex);
2841 return mutex->th ? Qtrue : Qfalse;
2845 mutex_locked(rb_thread_t *th, VALUE self)
2848 GetMutexPtr(self, mutex);
2850 if (th->keeping_mutexes) {
2851 mutex->next_mutex = th->keeping_mutexes;
2853 th->keeping_mutexes = mutex;
2858 * mutex.try_lock => true or false
2860 * Attempts to obtain the lock and returns immediately. Returns +true+ if the
2864 rb_mutex_trylock(VALUE self)
2867 VALUE locked = Qfalse;
2868 GetMutexPtr(self, mutex);
2870 native_mutex_lock(&mutex->lock);
2871 if (mutex->th == 0) {
2872 mutex->th = GET_THREAD();
2875 mutex_locked(GET_THREAD(), self);
2877 native_mutex_unlock(&mutex->lock);
2883 lock_func(rb_thread_t *th, mutex_t *mutex, int last_thread)
2885 int interrupted = 0;
2886 #if 0 /* for debug */
2887 native_thread_yield();
2890 native_mutex_lock(&mutex->lock);
2891 th->transition_for_lock = 0;
2892 while (mutex->th || (mutex->th = th, 0)) {
2898 mutex->cond_waiting++;
2899 native_cond_wait(&mutex->cond, &mutex->lock);
2900 mutex->cond_notified--;
2902 if (RUBY_VM_INTERRUPTED(th)) {
2907 th->transition_for_lock = 1;
2908 native_mutex_unlock(&mutex->lock);
2910 if (interrupted == 2) native_thread_yield();
2911 #if 0 /* for debug */
2912 native_thread_yield();
2919 lock_interrupt(void *ptr)
2921 mutex_t *mutex = (mutex_t *)ptr;
2922 native_mutex_lock(&mutex->lock);
2923 if (mutex->cond_waiting > 0) {
2924 native_cond_broadcast(&mutex->cond);
2925 mutex->cond_notified = mutex->cond_waiting;
2926 mutex->cond_waiting = 0;
2928 native_mutex_unlock(&mutex->lock);
2933 * mutex.lock => true or false
2935 * Attempts to grab the lock and waits if it isn't available.
2936 * Raises +ThreadError+ if +mutex+ was locked by the current thread.
2939 rb_mutex_lock(VALUE self)
2942 if (rb_mutex_trylock(self) == Qfalse) {
2944 rb_thread_t *th = GET_THREAD();
2945 GetMutexPtr(self, mutex);
2947 if (mutex->th == GET_THREAD()) {
2948 rb_raise(rb_eThreadError, "deadlock; recursive locking");
2951 while (mutex->th != th) {
2953 enum rb_thread_status prev_status = th->status;
2954 int last_thread = 0;
2955 struct rb_unblock_callback oldubf;
2957 set_unblock_function(th, lock_interrupt, mutex, &oldubf);
2958 th->status = THREAD_STOPPED_FOREVER;
2960 th->locking_mutex = self;
2961 if (vm_living_thread_num(th->vm) == th->vm->sleeper) {
2965 th->transition_for_lock = 1;
2966 BLOCKING_REGION_CORE({
2967 interrupted = lock_func(th, mutex, last_thread);
2969 th->transition_for_lock = 0;
2970 remove_signal_thread_list(th);
2971 reset_unblock_function(th, &oldubf);
2973 th->locking_mutex = Qfalse;
2974 if (mutex->th && interrupted == 2) {
2975 rb_check_deadlock(th->vm);
2977 if (th->status == THREAD_STOPPED_FOREVER) {
2978 th->status = prev_status;
2982 if (mutex->th == th) mutex_locked(th, self);
2985 RUBY_VM_CHECK_INTS();
2993 mutex_unlock(mutex_t *mutex, rb_thread_t volatile *th)
2995 const char *err = NULL;
2998 native_mutex_lock(&mutex->lock);
3000 if (mutex->th == 0) {
3001 err = "Attempt to unlock a mutex which is not locked";
3003 else if (mutex->th != th) {
3004 err = "Attempt to unlock a mutex which is locked by another thread";
3008 if (mutex->cond_waiting > 0) {
3009 /* waiting thread */
3010 native_cond_signal(&mutex->cond);
3011 mutex->cond_waiting--;
3012 mutex->cond_notified++;
3016 native_mutex_unlock(&mutex->lock);
3019 th_mutex = th->keeping_mutexes;
3020 if (th_mutex == mutex) {
3021 th->keeping_mutexes = mutex->next_mutex;
3026 tmp_mutex = th_mutex->next_mutex;
3027 if (tmp_mutex == mutex) {
3028 th_mutex->next_mutex = tmp_mutex->next_mutex;
3031 th_mutex = tmp_mutex;
3034 mutex->next_mutex = NULL;
3042 * mutex.unlock => self
3044 * Releases the lock.
3045 * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
3048 rb_mutex_unlock(VALUE self)
3052 GetMutexPtr(self, mutex);
3054 err = mutex_unlock(mutex, GET_THREAD());
3055 if (err) rb_raise(rb_eThreadError, "%s", err);
3061 rb_mutex_unlock_all(mutex_t *mutexes)
3068 /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
3070 mutexes = mutex->next_mutex;
3071 err = mutex_unlock(mutex, GET_THREAD());
3072 if (err) rb_bug("invalid keeping_mutexes: %s", err);
3077 rb_mutex_sleep_forever(VALUE time)
3079 rb_thread_sleep_deadly();
3084 rb_mutex_wait_for(VALUE time)
3086 const struct timeval *t = (struct timeval *)time;
3087 rb_thread_wait_for(*t);
3092 rb_mutex_sleep(VALUE self, VALUE timeout)
3097 if (!NIL_P(timeout)) {
3098 t = rb_time_interval(timeout);
3100 rb_mutex_unlock(self);
3102 if (NIL_P(timeout)) {
3103 rb_ensure(rb_mutex_sleep_forever, Qnil, rb_mutex_lock, self);
3106 rb_ensure(rb_mutex_wait_for, (VALUE)&t, rb_mutex_lock, self);
3108 end = time(0) - beg;
3109 return INT2FIX(end);
3114 * mutex.sleep(timeout = nil) => number
3116 * Releases the lock and sleeps +timeout+ seconds if it is given and
3117 * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
3118 * the current thread.
3121 mutex_sleep(int argc, VALUE *argv, VALUE self)
3125 rb_scan_args(argc, argv, "01", &timeout);
3126 return rb_mutex_sleep(self, timeout);
3131 * mutex.synchronize { ... } => result of the block
3133 * Obtains a lock, runs the block, and releases the lock when the block
3134 * completes. See the example under +Mutex+.
3138 rb_mutex_synchronize(VALUE mutex, VALUE (*func)(VALUE arg), VALUE arg)
3140 rb_mutex_lock(mutex);
3141 return rb_ensure(func, arg, rb_mutex_unlock, mutex);
3145 * Document-class: Barrier
3148 barrier_alloc(VALUE klass)
3150 return Data_Wrap_Struct(klass, rb_gc_mark, 0, (void *)mutex_alloc(0));
3154 rb_barrier_new(void)
3156 VALUE barrier = barrier_alloc(rb_cBarrier);
3157 rb_mutex_lock((VALUE)DATA_PTR(barrier));
3162 rb_barrier_wait(VALUE self)
3164 VALUE mutex = (VALUE)DATA_PTR(self);
3167 if (!mutex) return Qfalse;
3168 GetMutexPtr(mutex, m);
3169 if (m->th == GET_THREAD()) return Qfalse;
3170 rb_mutex_lock(mutex);
3171 if (DATA_PTR(self)) return Qtrue;
3172 rb_mutex_unlock(mutex);
3177 rb_barrier_release(VALUE self)
3179 return rb_mutex_unlock((VALUE)DATA_PTR(self));
3183 rb_barrier_destroy(VALUE self)
3185 VALUE mutex = (VALUE)DATA_PTR(self);
3187 return rb_mutex_unlock(mutex);
3190 /* variables for recursive traversals */
3191 static ID recursive_key;
3194 recursive_check(VALUE hash, VALUE obj)
3196 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
3200 VALUE list = rb_hash_aref(hash, ID2SYM(rb_frame_this_func()));
3202 if (NIL_P(list) || TYPE(list) != T_HASH)
3204 if (NIL_P(rb_hash_lookup(list, obj)))
3211 recursive_push(VALUE hash, VALUE obj)
3215 sym = ID2SYM(rb_frame_this_func());
3216 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
3217 hash = rb_hash_new();
3218 rb_thread_local_aset(rb_thread_current(), recursive_key, hash);
3222 list = rb_hash_aref(hash, sym);
3224 if (NIL_P(list) || TYPE(list) != T_HASH) {
3225 list = rb_hash_new();
3226 rb_hash_aset(hash, sym, list);
3228 rb_hash_aset(list, obj, Qtrue);
3233 recursive_pop(VALUE hash, VALUE obj)
3237 sym = ID2SYM(rb_frame_this_func());
3238 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
3241 symname = rb_inspect(sym);
3242 thrname = rb_inspect(rb_thread_current());
3244 rb_raise(rb_eTypeError, "invalid inspect_tbl hash for %s in %s",
3245 StringValuePtr(symname), StringValuePtr(thrname));
3247 list = rb_hash_aref(hash, sym);
3248 if (NIL_P(list) || TYPE(list) != T_HASH) {
3249 VALUE symname = rb_inspect(sym);
3250 VALUE thrname = rb_inspect(rb_thread_current());
3251 rb_raise(rb_eTypeError, "invalid inspect_tbl list for %s in %s",
3252 StringValuePtr(symname), StringValuePtr(thrname));
3254 rb_hash_delete(list, obj);
3258 rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
3260 VALUE hash = rb_thread_local_aref(rb_thread_current(), recursive_key);
3261 VALUE objid = rb_obj_id(obj);
3263 if (recursive_check(hash, objid)) {
3264 return (*func) (obj, arg, Qtrue);
3267 VALUE result = Qundef;
3270 hash = recursive_push(hash, objid);
3272 if ((state = EXEC_TAG()) == 0) {
3273 result = (*func) (obj, arg, Qfalse);
3276 recursive_pop(hash, objid);
3285 static rb_event_hook_t *
3286 alloc_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
3288 rb_event_hook_t *hook = ALLOC(rb_event_hook_t);
3290 hook->flag = events;
3296 thread_reset_event_flags(rb_thread_t *th)
3298 rb_event_hook_t *hook = th->event_hooks;
3299 rb_event_flag_t flag = th->event_flags & RUBY_EVENT_VM;
3308 rb_thread_add_event_hook(rb_thread_t *th,
3309 rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
3311 rb_event_hook_t *hook = alloc_event_hook(func, events, data);
3312 hook->next = th->event_hooks;
3313 th->event_hooks = hook;
3314 thread_reset_event_flags(th);
3318 set_threads_event_flags_i(st_data_t key, st_data_t val, st_data_t flag)
3322 GetThreadPtr(thval, th);
3325 th->event_flags |= RUBY_EVENT_VM;
3328 th->event_flags &= (~RUBY_EVENT_VM);
3334 set_threads_event_flags(int flag)
3336 st_foreach(GET_VM()->living_threads, set_threads_event_flags_i, (st_data_t) flag);
3340 rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
3342 rb_event_hook_t *hook = alloc_event_hook(func, events, data);
3343 rb_vm_t *vm = GET_VM();
3345 hook->next = vm->event_hooks;
3346 vm->event_hooks = hook;
3348 set_threads_event_flags(1);
3352 remove_event_hook(rb_event_hook_t **root, rb_event_hook_func_t func)
3354 rb_event_hook_t *prev = NULL, *hook = *root, *next;
3358 if (func == 0 || hook->func == func) {
3360 prev->next = hook->next;
3376 rb_thread_remove_event_hook(rb_thread_t *th, rb_event_hook_func_t func)
3378 int ret = remove_event_hook(&th->event_hooks, func);
3379 thread_reset_event_flags(th);
3384 rb_remove_event_hook(rb_event_hook_func_t func)
3386 rb_vm_t *vm = GET_VM();
3387 rb_event_hook_t *hook = vm->event_hooks;
3388 int ret = remove_event_hook(&vm->event_hooks, func);
3390 if (hook != NULL && vm->event_hooks == NULL) {
3391 set_threads_event_flags(0);
3398 clear_trace_func_i(st_data_t key, st_data_t val, st_data_t flag)
3401 GetThreadPtr((VALUE)key, th);
3402 rb_thread_remove_event_hook(th, 0);
3407 rb_clear_trace_func(void)
3409 st_foreach(GET_VM()->living_threads, clear_trace_func_i, (st_data_t) 0);
3410 rb_remove_event_hook(0);
3413 static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
3417 * set_trace_func(proc) => proc
3418 * set_trace_func(nil) => nil
3420 * Establishes _proc_ as the handler for tracing, or disables
3421 * tracing if the parameter is +nil+. _proc_ takes up
3422 * to six parameters: an event name, a filename, a line number, an
3423 * object id, a binding, and the name of a class. _proc_ is
3424 * invoked whenever an event occurs. Events are: <code>c-call</code>
3425 * (call a C-language routine), <code>c-return</code> (return from a
3426 * C-language routine), <code>call</code> (call a Ruby method),
3427 * <code>class</code> (start a class or module definition),
3428 * <code>end</code> (finish a class or module definition),
3429 * <code>line</code> (execute code on a new line), <code>raise</code>
3430 * (raise an exception), and <code>return</code> (return from a Ruby
3431 * method). Tracing is disabled within the context of _proc_.
3440 * set_trace_func proc { |event, file, line, id, binding, classname|
3441 * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
3446 * line prog.rb:11 false
3447 * c-call prog.rb:11 new Class
3448 * c-call prog.rb:11 initialize Object
3449 * c-return prog.rb:11 initialize Object
3450 * c-return prog.rb:11 new Class
3451 * line prog.rb:12 false
3452 * call prog.rb:2 test Test
3453 * line prog.rb:3 test Test
3454 * line prog.rb:4 test Test
3455 * return prog.rb:4 test Test
3459 set_trace_func(VALUE obj, VALUE trace)
3461 rb_remove_event_hook(call_trace_func);
3467 if (!rb_obj_is_proc(trace)) {
3468 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
3471 rb_add_event_hook(call_trace_func, RUBY_EVENT_ALL, trace);
3476 get_event_name(rb_event_flag_t event)
3479 case RUBY_EVENT_LINE:
3481 case RUBY_EVENT_CLASS:
3483 case RUBY_EVENT_END:
3485 case RUBY_EVENT_CALL:
3487 case RUBY_EVENT_RETURN:
3489 case RUBY_EVENT_C_CALL:
3491 case RUBY_EVENT_C_RETURN:
3493 case RUBY_EVENT_RAISE:
3500 VALUE ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always);
3502 struct call_trace_func_args {
3503 rb_event_flag_t event;
3511 call_trace_proc(VALUE args, int tracing)
3513 struct call_trace_func_args *p = (struct call_trace_func_args *)args;
3514 VALUE eventname = rb_str_new2(get_event_name(p->event));
3515 VALUE filename = rb_str_new2(rb_sourcefile());
3517 int line = rb_sourceline();
3521 if (p->event == RUBY_EVENT_C_CALL ||
3522 p->event == RUBY_EVENT_C_RETURN) {
3527 rb_thread_method_id_and_class(GET_THREAD(), &id, &klass);
3529 if (id == ID_ALLOCATOR)
3532 if (TYPE(klass) == T_ICLASS) {
3533 klass = RBASIC(klass)->klass;
3535 else if (FL_TEST(klass, FL_SINGLETON)) {
3536 klass = rb_iv_get(klass, "__attached__");
3540 argv[0] = eventname;
3542 argv[2] = INT2FIX(line);
3543 argv[3] = id ? ID2SYM(id) : Qnil;
3544 argv[4] = p->self ? rb_binding_new() : Qnil;
3545 argv[5] = klass ? klass : Qnil;
3547 return rb_proc_call_with_block(p->proc, 6, argv, Qnil);
3551 call_trace_func(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
3553 struct call_trace_func_args args;
3560 ruby_suppress_tracing(call_trace_proc, (VALUE)&args, Qfalse);
3564 ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always)
3566 rb_thread_t *th = GET_THREAD();
3567 int state, raised, tracing;
3568 VALUE result = Qnil;
3570 if ((tracing = th->tracing) != 0 && !always) {
3577 raised = rb_thread_reset_raised(th);
3580 if ((state = EXEC_TAG()) == 0) {
3581 result = (*func)(arg, tracing);
3585 rb_thread_set_raised(th);
3589 th->tracing = tracing;
3598 * +Thread+ encapsulates the behavior of a thread of
3599 * execution, including the main thread of the Ruby script.
3601 * In the descriptions of the methods in this class, the parameter _sym_
3602 * refers to a symbol, which is either a quoted string or a
3603 * +Symbol+ (such as <code>:name</code>).
3610 #define rb_intern(str) rb_intern_const(str)
3614 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
3615 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
3616 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
3617 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
3618 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
3619 rb_define_singleton_method(rb_cThread, "stop", rb_thread_stop, 0);
3620 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
3621 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
3622 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
3623 rb_define_singleton_method(rb_cThread, "list", rb_thread_list, 0);
3624 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
3625 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
3626 #if THREAD_DEBUG < 0
3627 rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
3628 rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
3631 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
3632 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
3633 rb_define_method(rb_cThread, "join", thread_join_m, -1);
3634 rb_define_method(rb_cThread, "value", thread_value, 0);
3635 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
3636 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
3637 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
3638 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
3639 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
3640 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
3641 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
3642 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
3643 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
3644 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
3645 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
3646 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
3647 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
3648 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
3649 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
3650 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
3651 rb_define_method(rb_cThread, "safe_level", rb_thread_safe_level, 0);
3652 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
3654 rb_define_method(rb_cThread, "inspect", rb_thread_inspect, 0);
3656 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
3657 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
3658 rb_define_method(cThGroup, "list", thgroup_list, 0);
3659 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
3660 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
3661 rb_define_method(cThGroup, "add", thgroup_add, 1);
3664 rb_thread_t *th = GET_THREAD();
3665 th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
3666 rb_define_const(cThGroup, "Default", th->thgroup);
3669 rb_cMutex = rb_define_class("Mutex", rb_cObject);
3670 rb_define_alloc_func(rb_cMutex, mutex_alloc);
3671 rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
3672 rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
3673 rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
3674 rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
3675 rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
3676 rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
3678 recursive_key = rb_intern("__recursive_key__");
3679 rb_eThreadError = rb_define_class("ThreadError", rb_eStandardError);
3682 rb_define_global_function("set_trace_func", set_trace_func, 1);
3684 /* init thread core */
3685 Init_native_thread();
3687 /* main thread setting */
3689 /* acquire global interpreter lock */
3690 rb_thread_lock_t *lp = &GET_THREAD()->vm->global_vm_lock;
3691 native_mutex_initialize(lp);
3692 native_mutex_lock(lp);
3693 native_mutex_initialize(&GET_THREAD()->interrupt_lock);
3697 rb_thread_create_timer_thread();
3699 (void)native_mutex_trylock;
3700 (void)ruby_thread_set_native;
3704 ruby_native_thread_p(void)
3706 rb_thread_t *th = ruby_thread_from_native();
3708 return th ? Qtrue : Qfalse;
3712 check_deadlock_i(st_data_t key, st_data_t val, int *found)
3716 GetThreadPtr(thval, th);
3718 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th) || th->transition_for_lock) {
3721 else if (th->locking_mutex) {
3723 GetMutexPtr(th->locking_mutex, mutex);
3725 native_mutex_lock(&mutex->lock);
3726 if (mutex->th == th || (!mutex->th && mutex->cond_notified)) {
3729 native_mutex_unlock(&mutex->lock);
3732 return (*found) ? ST_STOP : ST_CONTINUE;
3735 #if 0 /* for debug */
3737 debug_i(st_data_t key, st_data_t val, int *found)
3741 GetThreadPtr(thval, th);
3743 printf("th:%p %d %d %d", th, th->status, th->interrupt_flag, th->transition_for_lock);
3744 if (th->locking_mutex) {
3746 GetMutexPtr(th->locking_mutex, mutex);
3748 native_mutex_lock(&mutex->lock);
3749 printf(" %p %d\n", mutex->th, mutex->cond_notified);
3750 native_mutex_unlock(&mutex->lock);
3759 rb_check_deadlock(rb_vm_t *vm)
3763 if (vm_living_thread_num(vm) > vm->sleeper) return;
3764 if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
3766 st_foreach(vm->living_threads, check_deadlock_i, (st_data_t)&found);
3770 argv[0] = rb_eFatal;
3771 argv[1] = rb_str_new2("deadlock detected");
3772 #if 0 /* for debug */
3773 printf("%d %d %p %p\n", vm->living_threads->num_entries, vm->sleeper, GET_THREAD(), vm->main_thread);
3774 st_foreach(vm->living_threads, debug_i, (st_data_t)0);
3776 rb_thread_raise(2, argv, vm->main_thread);
3781 update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
3783 VALUE coverage = GET_THREAD()->cfp->iseq->coverage;
3784 if (coverage && RBASIC(coverage)->klass == 0) {
3785 long line = rb_sourceline() - 1;
3787 if (RARRAY_PTR(coverage)[line] == Qnil) {
3790 count = FIX2LONG(RARRAY_PTR(coverage)[line]) + 1;
3791 if (POSFIXABLE(count)) {
3792 RARRAY_PTR(coverage)[line] = LONG2FIX(count);
3798 rb_get_coverages(void)
3800 return GET_VM()->coverages;
3804 rb_set_coverages(VALUE coverages)
3806 GET_VM()->coverages = coverages;
3807 rb_add_event_hook(update_coverage, RUBY_EVENT_COVERAGE, Qnil);
3811 rb_reset_coverages(void)
3813 GET_VM()->coverages = Qfalse;
3814 rb_remove_event_hook(update_coverage);