[前][次][番号順一覧][スレッド一覧]

ruby-changes:51893

From: normal <ko1@a...>
Date: Mon, 30 Jul 2018 05:47:40 +0900 (JST)
Subject: [ruby-changes:51893] normal:r64107 (trunk): thread_pthread: remove timer-thread by restructuring GVL

normal	2018-07-30 05:47:33 +0900 (Mon, 30 Jul 2018)

  New Revision: 64107

  https://svn.ruby-lang.org/cgi-bin/viewvc.cgi?view=revision&revision=64107

  Log:
    thread_pthread: remove timer-thread by restructuring GVL
    
    To reduce resource use and reduce CI failure; remove
    timer-thread.  Single-threaded Ruby processes (including forked
    children) will never see extra thread overhead.  This prevents
    glibc and jemalloc from going into multi-threaded mode and
    initializing locks or causing fragmentation via arena explosion.
    
    The GVL is implements its own wait-queue as a ccan/list to
    permit controlling wakeup order.  Timeslice under contention is
    handled by a designated timer thread (similar to choosing a
    "patrol_thread" for current deadlock checking).
    
    There is only one self-pipe, now, as wakeups for timeslice are
    done independently using condition variables.  This reduces FD
    pressure slightly.
    
    Signal handling is handled directly by a Ruby Thread (instead
    of timer-thread) by exposing signal self-pipe to callers of
    rb_thread_fd_select, native_sleep, rb_wait_for_single_fd, etc...
    Acquiring, using, and releasing the self-pipe is exposed via 4
    new internal functions:
    
    1) rb_sigwait_fd_get - exclusively acquire timer_thread_pipe.normal[0]
    
    2) rb_sigwait_fd_sleep - sleep and wait for signal (and no other FDs)
    
    3) rb_sigwait_fd_put - release acquired result from rb_sigwait_fd_get
    
    4) rb_sigwait_fd_migrate - migrate signal handling to another thread
                               after calling rb_sigwait_fd_put.
    
    rb_sigwait_fd_migrate is necessary for waitpid callers because
    only one thread can wait on self-pipe at a time, otherwise a
    deadlock will occur if threads fight over the self-pipe.
    
    TRAP_INTERRUPT_MASK is now set for the main thread directly in
    signal handler via rb_thread_wakeup_timer_thread.
    
    Originally, I wanted to use POSIX timers
    (timer_create/timer_settime) for this.  Unfortunately, this
    proved unfeasible as Mutex#sleep resumes on spurious wakeups and
    test/thread/test_cv.rb::test_condvar_timed_wait failed.  Using
    pthread_sigmask to mask out SIGVTALRM fixed that test,  but
    test/fiddle/test_function.rb::test_nogvl_poll proved there'd be
    some unavoidable (and frequent) incompatibilities from that
    approach.
    
    Finally, this allows us to drop thread_destruct_lock and
    interrupt current ec directly.
    
    We don't need to rely on vm->thread_destruct_lock or a coherent
    vm->running_thread on any platform.  Separate timer-thread for
    time slice and signal handling is relegated to thread_win32.c,
    now.
    
    [ruby-core:88088] [Misc #14937]

  Modified files:
    trunk/internal.h
    trunk/process.c
    trunk/signal.c
    trunk/test/ruby/test_process.rb
    trunk/thread.c
    trunk/thread_pthread.c
    trunk/thread_pthread.h
    trunk/thread_win32.c
    trunk/vm_core.h
Index: signal.c
===================================================================
--- signal.c	(revision 64106)
+++ signal.c	(revision 64107)
@@ -709,9 +709,6 @@ signal_enque(int sig) https://github.com/ruby/ruby/blob/trunk/signal.c#L709
 
 static rb_atomic_t sigchld_hit;
 
-/* Prevent compiler from reordering access */
-#define ACCESS_ONCE(type,x) (*((volatile type *)&(x)))
-
 static RETSIGTYPE
 sighandler(int sig)
 {
@@ -730,7 +727,7 @@ sighandler(int sig) https://github.com/ruby/ruby/blob/trunk/signal.c#L727
     else {
         signal_enque(sig);
     }
-    rb_thread_wakeup_timer_thread();
+    rb_thread_wakeup_timer_thread(sig);
 #if !defined(BSD_SIGNAL) && !defined(POSIX_SIGNAL)
     ruby_signal(sig, sighandler);
 #endif
@@ -764,7 +761,6 @@ rb_enable_interrupt(void) https://github.com/ruby/ruby/blob/trunk/signal.c#L761
 #ifdef HAVE_PTHREAD_SIGMASK
     sigset_t mask;
     sigemptyset(&mask);
-    sigaddset(&mask, RUBY_SIGCHLD); /* timer-thread handles this */
     pthread_sigmask(SIG_SETMASK, &mask, NULL);
 #endif
 }
@@ -1077,7 +1073,6 @@ rb_trap_exit(void) https://github.com/ruby/ruby/blob/trunk/signal.c#L1073
 
 void ruby_waitpid_all(rb_vm_t *); /* process.c */
 
-/* only runs in the timer-thread */
 void
 ruby_sigchld_handler(rb_vm_t *vm)
 {
Index: process.c
===================================================================
--- process.c	(revision 64106)
+++ process.c	(revision 64107)
@@ -936,13 +936,51 @@ void rb_native_cond_signal(rb_nativethre https://github.com/ruby/ruby/blob/trunk/process.c#L936
 void rb_native_cond_wait(rb_nativethread_cond_t *, rb_nativethread_lock_t *);
 rb_nativethread_cond_t *rb_sleep_cond_get(const rb_execution_context_t *);
 void rb_sleep_cond_put(rb_nativethread_cond_t *);
+int rb_sigwait_fd_get(const rb_thread_t *);
+void rb_sigwait_sleep(const rb_thread_t *, int fd, const struct timespec *);
+void rb_sigwait_fd_put(const rb_thread_t *, int fd);
+
+/*
+ * When a thread is done using sigwait_fd and there are other threads
+ * sleeping on waitpid, we must kick one of the threads out of
+ * rb_native_cond_wait so it can switch to rb_sigwait_sleep
+ */
+static void
+sigwait_fd_migrate_sleeper(rb_vm_t *vm)
+{
+    struct waitpid_state *w = 0;
+
+    list_for_each(&vm->waiting_pids, w, wnode) {
+        if (!w->cond) continue; /* somebody else already got sigwait_fd */
+        rb_native_cond_signal(w->cond);
+        return;
+    }
+    list_for_each(&vm->waiting_grps, w, wnode) {
+        if (!w->cond) continue; /* somebody else already got sigwait_fd */
+        rb_native_cond_signal(w->cond);
+        return;
+    }
+}
+
+void
+rb_sigwait_fd_migrate(rb_vm_t *vm)
+{
+    rb_native_mutex_lock(&vm->waitpid_lock);
+    sigwait_fd_migrate_sleeper(vm);
+    rb_native_mutex_unlock(&vm->waitpid_lock);
+}
 
 static void
 waitpid_notify(struct waitpid_state *w, rb_pid_t ret)
 {
     w->ret = ret;
     list_del_init(&w->wnode);
-    rb_native_cond_signal(w->cond);
+    if (w->cond) {
+        rb_native_cond_signal(w->cond);
+    }
+    else {
+        /* w is owned by this thread */
+    }
 }
 
 #ifdef _WIN32 /* for spawnvp result from mjit.c */
@@ -954,7 +992,7 @@ waitpid_notify(struct waitpid_state *w, https://github.com/ruby/ruby/blob/trunk/process.c#L992
 #endif
 
 extern volatile unsigned int ruby_nocldwait; /* signal.c */
-/* called by timer thread */
+/* called by timer thread or thread which acquired sigwait_fd */
 static void
 waitpid_each(struct list_head *head)
 {
@@ -1008,6 +1046,17 @@ waitpid_state_init(struct waitpid_state https://github.com/ruby/ruby/blob/trunk/process.c#L1046
     w->options = options;
 }
 
+static const struct timespec *
+sigwait_sleep_time(void)
+{
+    if (SIGCHLD_LOSSY) {
+        static const struct timespec busy_wait = { 0, 100000000 };
+
+        return &busy_wait;
+    }
+    return 0;
+}
+
 /*
  * must be called with vm->waitpid_lock held, this is not interruptible
  */
@@ -1026,13 +1075,30 @@ ruby_waitpid_locked(rb_vm_t *vm, rb_pid_ https://github.com/ruby/ruby/blob/trunk/process.c#L1075
         if (w.ret == -1) w.errnum = errno;
     }
     else {
-        w.cond = cond;
+        int sigwait_fd;
+
         w.ec = 0;
         list_add(w.pid > 0 ? &vm->waiting_pids : &vm->waiting_grps, &w.wnode);
         do {
-            rb_native_cond_wait(w.cond, &vm->waitpid_lock);
+            sigwait_fd = rb_sigwait_fd_get(0);
+
+            if (sigwait_fd >= 0) {
+                w.cond = 0;
+                rb_native_mutex_unlock(&vm->waitpid_lock);
+                rb_sigwait_sleep(0, sigwait_fd, sigwait_sleep_time());
+                rb_native_mutex_lock(&vm->waitpid_lock);
+                rb_sigwait_fd_put(0, sigwait_fd);
+            }
+            else {
+                w.cond = cond;
+                rb_native_cond_wait(w.cond, &vm->waitpid_lock);
+            }
         } while (!w.ret);
         list_del(&w.wnode);
+
+        /* we're done, maybe other waitpid callers are not: */
+        if (sigwait_fd >= 0)
+            sigwait_fd_migrate_sleeper(vm);
     }
     if (status) {
         *status = w.status;
@@ -1047,7 +1113,10 @@ waitpid_wake(void *x) https://github.com/ruby/ruby/blob/trunk/process.c#L1113
     struct waitpid_state *w = x;
 
     /* th->interrupt_lock is already held by rb_threadptr_interrupt_common */
-    rb_native_cond_signal(w->cond);
+    if (w->cond)
+        rb_native_cond_signal(w->cond);
+    else
+        rb_thread_wakeup_timer_thread(0); /* kick sigwait_fd */
 }
 
 static void *
@@ -1055,6 +1124,7 @@ waitpid_nogvl(void *x) https://github.com/ruby/ruby/blob/trunk/process.c#L1124
 {
     struct waitpid_state *w = x;
     rb_thread_t *th = rb_ec_thread_ptr(w->ec);
+    int sigwait_fd = -1;
 
     rb_native_mutex_lock(&th->interrupt_lock);
     /*
@@ -1062,13 +1132,30 @@ waitpid_nogvl(void *x) https://github.com/ruby/ruby/blob/trunk/process.c#L1132
      * by the time we enter this.  And we may also be interrupted.
      */
     if (!w->ret && !RUBY_VM_INTERRUPTED_ANY(w->ec)) {
-        if (SIGCHLD_LOSSY) {
-            rb_thread_wakeup_timer_thread();
+        sigwait_fd = rb_sigwait_fd_get(th);
+        if (sigwait_fd >= 0) {
+            rb_nativethread_cond_t *cond = w->cond;
+
+            w->cond = 0;
+            rb_native_mutex_unlock(&th->interrupt_lock);
+            rb_sigwait_sleep(th, sigwait_fd, sigwait_sleep_time());
+            rb_native_mutex_lock(&th->interrupt_lock);
+            w->cond = cond;
+            rb_sigwait_fd_put(th, sigwait_fd);
+        }
+        else {
+            /* another thread calling rb_sigwait_sleep will process
+             * signals for us */
+            if (SIGCHLD_LOSSY) {
+                rb_thread_wakeup_timer_thread(0);
+            }
+            rb_native_cond_wait(w->cond, &th->interrupt_lock);
         }
-        rb_native_cond_wait(w->cond, &th->interrupt_lock);
     }
     rb_native_mutex_unlock(&th->interrupt_lock);
 
+    if (sigwait_fd >= 0)
+        rb_sigwait_fd_migrate(th->vm);
     return 0;
 }
 
Index: thread_pthread.c
===================================================================
--- thread_pthread.c	(revision 64106)
+++ thread_pthread.c	(revision 64107)
@@ -45,27 +45,16 @@ void rb_native_cond_broadcast(rb_nativet https://github.com/ruby/ruby/blob/trunk/thread_pthread.c#L45
 void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex);
 void rb_native_cond_initialize(rb_nativethread_cond_t *cond);
 void rb_native_cond_destroy(rb_nativethread_cond_t *cond);
-static void rb_thread_wakeup_timer_thread_low(void);
 static void clear_thread_cache_altstack(void);
+static void ubf_wakeup_all_threads(void);
+static int ubf_threads_empty(void);
+static int native_cond_timedwait(rb_nativethread_cond_t *, pthread_mutex_t *,
+                                 const struct timespec *);
 
-#define TIMER_THREAD_MASK    (1)
-#define TIMER_THREAD_SLEEPY  (2|TIMER_THREAD_MASK)
-#define TIMER_THREAD_BUSY    (4|TIMER_THREAD_MASK)
-
-#if defined(HAVE_POLL) && defined(HAVE_FCNTL) && defined(F_GETFL) && \
-   defined(F_SETFL) && defined(O_NONBLOCK) && \
-   defined(F_GETFD) && defined(F_SETFD) && defined(FD_CLOEXEC)
-/* The timer thread sleeps while only one Ruby thread is running. */
-# define TIMER_IMPL TIMER_THREAD_SLEEPY
-#else
-# define TIMER_IMPL TIMER_THREAD_BUSY
-#endif
+#define TIMER_THREAD_CREATED_P() (timer_thread_pipe.owner_process == getpid())
 
-static struct {
-    pthread_t id;
-    int created;
-} timer_thread;
-#define TIMER_THREAD_CREATED_P() (timer_thread.created != 0)
+#define THREAD_INVALID ((const rb_thread_t *)-1)
+static const rb_thread_t *sigwait_th;
 
 #ifdef HAVE_SCHED_YIELD
 #define native_thread_yield() (void)sched_yield()
@@ -82,49 +71,85 @@ static pthread_condattr_t *condattr_mono https://github.com/ruby/ruby/blob/trunk/thread_pthread.c#L71
 static const void *const condattr_monotonic = NULL;
 #endif
 
+/* 100ms.  10ms is too small for user level thread scheduling
+ * on recent Linux (tested on 2.6.35)
+ */
+#define TIME_QUANTUM_USEC (100 * 1000)
+
+static struct timespec native_cond_timeout(rb_nativethread_cond_t *,
+                                           struct timespec rel);
+
 static void
-gvl_acquire_common(rb_vm_t *vm)
+gvl_acquire_common(rb_vm_t *vm, rb_thread_t *th)
 {
     if (vm->gvl.acquired) {
+        native_thread_data_t *nd = &th->native_thread_data;
 
-	if (!vm->gvl.waiting++) {
-	    /*
-	     * Wake up timer thread iff timer thread is slept.
-	     * When timer thread is polling mode, we don't want to
-	     * make confusing timer thread interval time.
-	     */
-	    rb_thread_wakeup_timer_thread_low();
-	}
+        VM_ASSERT(th->unblock.func == 0 && "we reuse ubf_list for GVL waitq");
 
-	while (vm->gvl.acquired) {
-            rb_native_cond_wait(&vm->gvl.cond, &vm->gvl.lock);
-	}
+        list_add_tail(&vm->gvl.waitq, &nd->ubf_list);
+        do {
+            if (!vm->gvl.timer) {
+                struct timespec ts = { 0, TIME_QUANTUM_USEC * 1000 };
+                /*
+                 * become designated timer thread to kick vm->gvl.acquired
+                 * periodically
+                 */
+                ts = native_cond_timeout(&nd->sleep_cond, ts);
+                vm->gvl.timer = th;
+                native_cond_timedwait(&nd->sleep_cond, &vm->gvl.lock, &ts);
+                vm->gvl.timer = 0;
+                ubf_wakeup_all_threads();
+
+                /*
+                 * Timeslice.  We can't touch thread_destruct_lock here,
+                 * as the process may fork while this thread is contending
+                 * for GVL:
+                 */
+                if (vm->gvl.acquired) timer_thread_function();
+            }
+            else {
+                rb_native_cond_wait(&nd->sleep_cond, &vm->gvl.lock);
+            }
+        } while (vm->gvl.acquired);
 
-	--vm->gvl.waiting;
+        list_del_init(&nd->ubf_list);
 
-	if (vm->gvl.need_yield) {
-	    vm->gvl.need_yield = 0;
+        if (vm->gvl.need_yield) {
+            vm->gvl.need_yield = 0;
             rb_native_cond_signal(&vm->gvl.switch_cond);
-	}
+        }
     }
+    vm->gvl.acquired = th;
+    /*
+     * Designate the next gvl.timer thread, favor the last thread in
+     * the waitq since it will be in waitq longest
+     */
+    if (!vm->gvl.timer) {
+        native_thread_data_t *last;
 
-    vm->gvl.acquired = 1;
+        last = list_tail(&vm->gvl.waitq, native_thread_data_t, ubf_list);
+        if (last) rb_native_cond_signal(&last->sleep_cond);
+    }
 }
 
 static void
 gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
 {
     rb_native_mutex_lock(&vm->gvl.lock);
-    gvl_acquire_common(vm);
+    gvl_acquire_common(vm, th);
     rb_native_mutex_unlock(&vm->gvl.lock);
 }
 
-static void
+static native_thread_data_t *
 gvl_release_common(rb_vm_t *vm)
 {
+    native_thread_data_t *next;
     vm->gvl.acquired = 0;
-    if (vm->gvl.waiting > 0)
-        rb_native_cond_signal(&vm->gvl.cond);
+    next = list_top(&vm->gvl.waitq, native_thread_data_t, ubf_list);
+    if (next) rb_native_cond_signal(&next->sleep_cond);
+
+    return next;
 }
 
 static void
@@ -138,34 +163,32 @@ gvl_release(rb_vm_t *vm) https://github.com/ruby/ruby/blob/trunk/thread_pthread.c#L163
 static void
 gvl_yield(rb_vm_t *vm, rb_thread_t *th)
 {
-    rb_native_mutex_lock(&vm->gvl.lock);
+    native_thread_data_t *next;
 
-    gvl_release_common(vm);
+    rb_native_mutex_lock(&vm->gvl.lock);
+    next = gvl_release_common(vm);
 
     /* An another thread is processing GVL yield. */
     if (UNLIKELY(vm->gvl.wait_yield)) {
-	while (vm->gvl.wait_yield)
+        while (vm->gvl.wait_yield)
             rb_native_cond_wait(&vm->gvl.switch_wait_cond, &vm->gvl.lock);
-	goto acquire;
     }
-
-    if (vm->gvl.waiting > 0) {
-	/* Wait until another thread task take GVL. */
-	vm->gvl.need_yield = 1;
-	vm->gvl.wait_yield = 1;
-	while (vm->gvl.need_yield)
+    else if (next) {
+        /* Wait until another thread task takes GVL. */
+        vm->gvl.need_yield = 1;
+        vm->gvl.wait_yield = 1;
+        while (vm->gvl.need_yield)
             rb_native_cond_wait(&vm->gvl.switch_cond, &vm->gvl.lock);
-	vm->gvl.wait_yield = 0;
+        vm->gvl.wait_yield = 0;
+        rb_native_cond_broadcast(&vm->gvl.switch_wait_cond);
     }
     else {
-	rb_native_mutex_unlock(&vm->gvl.lock);
-	sched_yield();
+        rb_native_mutex_unlock(&vm->gvl.lock);
+        native_thread_yield();
         rb_native_mutex_lock(&vm->gvl.lock);
+        rb_native_cond_broadcast(&vm->gvl.switch_wait_cond);
     }
-
-    rb_native_cond_broadcast(&vm->gvl.switch_wait_cond);
-  acquire:
-    gvl_acquire_common(vm);
+    gvl_acquire_common(vm, th);
     rb_native_mutex_unlock(&vm->gvl.lock);
 }
 
@@ -173,11 +196,11 @@ static void https://github.com/ruby/ruby/blob/trunk/thread_pthread.c#L196
 gvl_init(rb_vm_t *vm)
 {
     rb_native_mutex_initialize(&vm->gvl.lock);
-    rb_native_cond_initialize(&vm->gvl.cond);
     rb_native_cond_initialize(&vm->gvl.switch_cond);
     rb_native_cond_initialize(&vm->gvl.switch_wait_cond);
+    list_head_init(&vm->gvl.waitq);
     vm->gvl.acquired = 0;
-    vm->gvl.waiting = 0;
+    vm->gvl.timer = 0;
     vm->gvl.need_yield = 0;
     vm->gvl.wait_yield = 0;
 }
@@ -187,7 +210,6 @@ gvl_destroy(rb_vm_t *vm) https://github.com/ruby/ruby/blob/trunk/thread_pthread.c#L210
 {
     rb_native_cond_destroy(&vm->gvl.switch_wait_cond);
     rb_native_cond_destroy(&vm->gvl.switch_cond);
-    rb_native_cond_destroy(&vm->gvl.cond);
     rb_native_mutex_destroy(&vm->gvl.lock);
     clear_thread_cache_altstack();
 }
@@ -1012,17 +1034,6 @@ native_thread_create(rb_thread_t *th) https://github.com/ruby/ruby/blob/trunk/thread_pthread.c#L1034
     return err;
 }
 
-#if (TIMER_IMPL & TIMER_THREAD_MASK)
-static void
-native_thread_join(pthread_t th)
-{
-    int err = pthread_join(th, 0);
-    if (err) {
-	rb_raise(rb_eThreadError, "native_thread_join() failed (%d)", err);
-    }
-}
-#endif /* TIMER_THREAD_MASK */
-
 #if USE_NATIVE_THREAD_PRIORITY
 
 static void
@@ -1068,7 +1079,7 @@ ubf_pthread_cond_signal(void *ptr) https://github.com/ruby/ruby/blob/trunk/thread_pthread.c#L1079
 }
 
 static void
-native_sleep(rb_thread_t *th, struct timespec *timeout_rel)
+native_cond_sleep(rb_thread_t *th, struct timespec *timeout_rel)
 {
     struct timespec timeout;
     rb_nativethread_lock_t *lock = &th->interrupt_lock;
@@ -1161,17 +1172,8 @@ static void https://github.com/ruby/ruby/blob/trunk/thread_pthread.c#L1172
 ubf_select(void *ptr)
 {
     rb_thread_t *th = (rb_thread_t *)ptr;
-    register_ubf_list(th);
 
-    /*
-     * ubf_wakeup_thread() doesn't guarantee to wake up a target thread.
-     * Therefore, we repeatedly call ubf_wakeup_thread() until a target thread
-     * exit from ubf function.
-     * In the other hands, we shouldn't call rb_thread_wakeup_timer_thread()
-     * if running on timer thread because it may make endless wakeups.
-     */
-    if (!pthread_equal(pthread_self(), timer_thread.id))
-	rb_thread_wakeup_timer_thread();
+    register_ubf_list(th);
     ubf_wakeup_thread(th);
 }
 
@@ -1208,39 +1210,16 @@ static int ubf_threads_empty(void) { ret https://github.com/ruby/ruby/blob/trunk/thread_pthread.c#L1210
 #define TT_DEBUG 0
 #define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
 
-/* 100ms.  10ms is too small for user level thread scheduling
- * on recent Linux (tested on 2.6.35)
- */
-#define TIME_QUANTUM_USEC (100 * 1000)
-
-#if TIMER_IMPL == TIMER_THREAD_SLEEPY
 static struct {
-    /*
-     * Read end of each pipe is closed inside timer thread for shutdown
-     * Write ends are closed by a normal Ruby thread during shutdown
-     */
+    /* pipes are closed in forked children when owner_process does not match */
     int normal[2];
-    int low[2];
 
     /* volatile for signal handler use: */
     volatile rb_pid_t owner_process;
 } timer_thread_pipe = {
     {-1, -1},
-    {-1, -1}, /* low priority */
 };
 
-NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
-static void
-async_bug_fd(const char *mesg, int errno_arg, int fd)
-{
-    char buff[64];
-    size_t n = strlcpy(buff, mesg, sizeof(buff));
-    if (n < sizeof(buff)-3) {
-	ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
-    }
-    rb_async_bug_errno(buff, errno_arg);
-}
-
 /* only use signal-safe system calls here */
 static void
 rb_thread_wakeup_timer_thread_fd(int fd)
@@ -1272,49 +1251,33 @@ rb_thread_wakeup_timer_thread_fd(int fd) https://github.com/ruby/ruby/blob/trunk/thread_pthread.c#L1251
 }
 
 void
-rb_thread_wakeup_timer_thread(void)
+rb_thread_wakeup_timer_thread(int sig)
 {
     /* must be safe inside sighandler, so no mutex */
     if (timer_thread_pipe.owner_process == getpid()) {
-	rb_thread_wakeup_timer_thread_fd(timer_thread_pipe.normal[1]);
-    }
-}
+        rb_thread_wakeup_timer_thread_fd(timer_thread_pipe.normal[1]);
 
-static void
-rb_thread_wakeup_timer_thread_low(void)
-{
-    if (timer_thread_pipe.owner_process == getpid()) {
-	rb_thread_wakeup_timer_thread_fd(timer_thread_pipe.low[1]);
-    }
-}
+        /*
+         * system_working check is required because vm and main_thread are
+         * freed during shutdown
+         */
+        if (sig && system_working) {
+            volatile rb_execution_context_t *ec;
+            rb_vm_t *vm = GET_VM();
+            rb_thread_t *mth;
+
+            /*
+             * FIXME: root VM and main_thread should be static and not
+             * on heap for maximum safety (and startup/shutdown speed)
+             */
+            if (!vm) return;
+            mth = vm->main_thread;
+            if (!mth || !system_working) return;
 
-/* VM-dependent API is not available for this function */
-static void
-consume_communication_pipe(int fd)
-{
-#define CCP_READ_BUFF_SIZE 1024
-    /* buffer can be shared because no one refers to them. */
-    static char buff[CCP_READ_BUFF_SIZE];
-    ssize_t result;
+            /* this relies on GC for grace period before cont_free */
+            ec = ACCESS_ONCE(rb_execution_context_t *, mth->ec);
 
-    while (1) {
-	result = read(fd, buff, sizeof(buff));
-	if (result == 0) {
-	    return;
-	}
-	else if (result < 0) {
-	    int e = errno;
-	    switch (e) {
-	      case EINTR:
-		continue; /* retry */
-	      case EAGAIN:
-#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
-	      case EWOULDBLOCK:
-#endif
-		return;
-	      default:
-		async_bug_fd("consume_communication_pipe: read", e, fd);
-	    }
+            if (ec) RUBY_VM_SET_TRAP_INTERRUPT(ec);
 	}
     }
 }
@@ -1347,6 +1310,7 @@ set_nonblock(int fd) https://github.com/ruby/ruby/blob/trunk/thread_pthread.c#L1310
 	rb_sys_fail(0);
 }
 
+/* communication pipe with timer thread and signal handler */
 static int
 setup_communication_pipe_internal(int pipes[2])
 {
@@ -1371,108 +1335,6 @@ setup_communication_pipe_internal(int pi https://github.com/ruby/ruby/blob/trunk/thread_pthread.c#L1335
     return 0;
 }
 
-/* communication pipe with timer thread and signal handler */
-static int (... truncated)

--
ML: ruby-changes@q...
Info: http://www.atdot.net/~ko1/quickml/

[前][次][番号順一覧][スレッド一覧]