[前][次][番号順一覧][スレッド一覧]

ruby-changes:17940

From: ko1 <ko1@a...>
Date: Sun, 28 Nov 2010 05:16:07 +0900 (JST)
Subject: [ruby-changes:17940] Ruby:r29956 (trunk): * thread.c, vm_core.h: make gvl_acquire/release/init/destruct

ko1	2010-11-28 05:15:59 +0900 (Sun, 28 Nov 2010)

  New Revision: 29956

  http://svn.ruby-lang.org/cgi-bin/viewvc.cgi?view=rev&revision=29956

  Log:
    * thread.c, vm_core.h: make gvl_acquire/release/init/destruct
      APIs to modularize GVL implementation.
    * thread_pthread.c, thread_pthread.h: Two GVL implementations.
      (1) Simple locking GVL which is same as existing GVL.
      (2) Wake-up queued threads.  The wake-up order is simple FIFO.
      (We can make several queues to support exact priorities, however
      this causes some issues such as priority inversion and so on.)
      This impl. prevents spin-loop (*1) caused on SMP environemnts.
      *1: Only one Ruby thread acqures GVL again and again.
      Bug #2359 [ruby-core:26694]
    * thread_win32.c, thread_win32.h: Using simple lock
      not by CRITICAL_SECTION but by Mutex.
      Bug #3890 [ruby-dev:42315]
    * vm.c (ruby_vm_destruct): ditto.

  Modified files:
    trunk/ChangeLog
    trunk/thread.c
    trunk/thread_pthread.c
    trunk/thread_pthread.h
    trunk/thread_win32.c
    trunk/thread_win32.h
    trunk/vm.c
    trunk/vm_core.h

Index: thread_win32.c
===================================================================
--- thread_win32.c	(revision 29955)
+++ thread_win32.c	(revision 29956)
@@ -25,6 +25,7 @@
 static int native_mutex_unlock(rb_thread_lock_t *);
 static int native_mutex_trylock(rb_thread_lock_t *);
 static void native_mutex_initialize(rb_thread_lock_t *);
+static void native_mutex_destroy(rb_thread_lock_t *);
 
 static void native_cond_signal(rb_thread_cond_t *cond);
 static void native_cond_broadcast(rb_thread_cond_t *cond);
@@ -32,6 +33,105 @@
 static void native_cond_initialize(rb_thread_cond_t *cond);
 static void native_cond_destroy(rb_thread_cond_t *cond);
 
+static void
+w32_error(const char *func)
+{
+    LPVOID lpMsgBuf;
+    DWORD err = GetLastError();
+    if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+		      FORMAT_MESSAGE_FROM_SYSTEM |
+		      FORMAT_MESSAGE_IGNORE_INSERTS,
+		      NULL,
+		      err,
+		      MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
+		      (LPTSTR) & lpMsgBuf, 0, NULL) == 0)
+	FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+		      FORMAT_MESSAGE_FROM_SYSTEM |
+		      FORMAT_MESSAGE_IGNORE_INSERTS,
+		      NULL,
+		      err,
+		      MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+		      (LPTSTR) & lpMsgBuf, 0, NULL);
+    rb_bug("%s: %s", func, (char*)lpMsgBuf);
+}
+
+static int
+w32_mutex_lock(HANDLE lock)
+{
+    DWORD result;
+    while (1) {
+	thread_debug("native_mutex_lock: %p\n", lock);
+	result = w32_wait_events(&lock, 1, INFINITE, 0);
+	switch (result) {
+	  case WAIT_OBJECT_0:
+	    /* get mutex object */
+	    thread_debug("acquire mutex: %p\n", lock);
+	    return 0;
+	  case WAIT_OBJECT_0 + 1:
+	    /* interrupt */
+	    errno = EINTR;
+	    thread_debug("acquire mutex interrupted: %p\n", lock);
+	    return 0;
+	  case WAIT_TIMEOUT:
+	    thread_debug("timeout mutex: %p\n", lock);
+	    break;
+	  case WAIT_ABANDONED:
+	    rb_bug("win32_mutex_lock: WAIT_ABANDONED");
+	    break;
+	  default:
+	    rb_bug("win32_mutex_lock: unknown result (%d)", result);
+	    break;
+	}
+    }
+    return 0;
+}
+
+static HANDLE
+w32_mutex_create(void)
+{
+    HANDLE lock = CreateMutex(NULL, FALSE, NULL);
+    if (lock == NULL) {
+	w32_error("native_mutex_initialize");
+    }
+    return lock;
+}
+
+#define GVL_DEBUG 0
+
+static void
+gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
+{
+    w32_mutex_lock(vm->gvl.lock);
+    if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): acquire\n", th);
+}
+
+static void
+gvl_release(rb_vm_t *vm)
+{
+    ReleaseMutex(vm->gvl.lock);
+}
+
+static void
+gvl_atfork(rb_vm_t *vm)
+{
+    rb_bug("gvl_atfork() is called on win32");
+}
+
+static void
+gvl_init(rb_vm_t *vm)
+{
+    int r;
+    if (GVL_DEBUG) fprintf(stderr, "gvl init\n");
+    vm->gvl.lock = w32_mutex_create();
+}
+
+static void
+gvl_destroy(rb_vm_t *vm)
+{
+    if (GVL_DEBUG) fprintf(stderr, "gvl destroy\n");
+    CloseHandle(vm->gvl.lock);
+}
+
 static rb_thread_t *
 ruby_thread_from_native(void)
 {
@@ -64,28 +164,6 @@
 }
 
 static void
-w32_error(const char *func)
-{
-    LPVOID lpMsgBuf;
-    DWORD err = GetLastError();
-    if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
-		      FORMAT_MESSAGE_FROM_SYSTEM |
-		      FORMAT_MESSAGE_IGNORE_INSERTS,
-		      NULL,
-		      err,
-		      MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
-		      (LPTSTR) & lpMsgBuf, 0, NULL) == 0)
-	FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
-		      FORMAT_MESSAGE_FROM_SYSTEM |
-		      FORMAT_MESSAGE_IGNORE_INSERTS,
-		      NULL,
-		      err,
-		      MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
-		      (LPTSTR) & lpMsgBuf, 0, NULL);
-    rb_bug("%s: %s", func, (char*)lpMsgBuf);
-}
-
-static void
 w32_set_event(HANDLE handle)
 {
     if (SetEvent(handle) == 0) {
@@ -111,7 +189,7 @@
     thread_debug("  w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n",
 		 events, count, timeout, th);
     if (th && (intr = th->native_thread_data.interrupt_event)) {
-	native_mutex_lock(&th->vm->global_vm_lock);
+	gvl_acquire(th->vm, th);
 	if (intr == th->native_thread_data.interrupt_event) {
 	    w32_reset_event(intr);
 	    if (RUBY_VM_INTERRUPTED(th)) {
@@ -124,7 +202,7 @@
 	    targets[count++] = intr;
 	    thread_debug("  * handle: %p (count: %d, intr)\n", intr, count);
 	}
-	native_mutex_unlock(&th->vm->global_vm_lock);
+	gvl_release(th->vm);
     }
 
     thread_debug("  WaitForMultipleObjects start (count: %d)\n", count);
@@ -260,32 +338,7 @@
 native_mutex_lock(rb_thread_lock_t *lock)
 {
 #if USE_WIN32_MUTEX
-    DWORD result;
-    while (1) {
-	thread_debug("native_mutex_lock: %p\n", *lock);
-	result = w32_wait_events(&*lock, 1, INFINITE, 0);
-	switch (result) {
-	  case WAIT_OBJECT_0:
-	    /* get mutex object */
-	    thread_debug("acquire mutex: %p\n", *lock);
-	    return 0;
-	  case WAIT_OBJECT_0 + 1:
-	    /* interrupt */
-	    errno = EINTR;
-	    thread_debug("acquire mutex interrupted: %p\n", *lock);
-	    return 0;
-	  case WAIT_TIMEOUT:
-	    thread_debug("timeout mutex: %p\n", *lock);
-	    break;
-	  case WAIT_ABANDONED:
-	    rb_bug("win32_mutex_lock: WAIT_ABANDONED");
-	    break;
-	  default:
-	    rb_bug("win32_mutex_lock: unknown result (%d)", result);
-	    break;
-	}
-    }
-    return 0;
+    w32_mutex_lock(*lock);
 #else
     EnterCriticalSection(lock);
     return 0;
@@ -328,10 +381,7 @@
 native_mutex_initialize(rb_thread_lock_t *lock)
 {
 #if USE_WIN32_MUTEX
-    *lock = CreateMutex(NULL, FALSE, NULL);
-    if (*lock == NULL) {
-	w32_error("native_mutex_initialize");
-    }
+    *lock = w32_mutex_create();
     /* thread_debug("initialize mutex: %p\n", *lock); */
 #else
     InitializeCriticalSection(lock);
@@ -355,11 +405,6 @@
     HANDLE event;
 };
 
-struct rb_thread_cond_struct {
-    struct cond_event_entry *next;
-    struct cond_event_entry *last;
-};
-
 static void
 native_cond_signal(rb_thread_cond_t *cond)
 {
Index: thread_win32.h
===================================================================
--- thread_win32.h	(revision 29955)
+++ thread_win32.h	(revision 29956)
@@ -23,11 +23,18 @@
 
 typedef HANDLE rb_thread_id_t;
 typedef CRITICAL_SECTION rb_thread_lock_t;
-typedef struct rb_thread_cond_struct rb_thread_cond_t;
+typedef struct rb_thread_cond_struct {
+    struct cond_event_entry *next;
+    struct cond_event_entry *last;
+} rb_thread_cond_t;
 
 typedef struct native_thread_data_struct {
     HANDLE interrupt_event;
 } native_thread_data_t;
 
+typedef struct rb_global_vm_lock_struct {
+    HANDLE lock;
+} rb_global_vm_lock_t;
+
 #endif /* RUBY_THREAD_WIN32_H */
 
Index: ChangeLog
===================================================================
--- ChangeLog	(revision 29955)
+++ ChangeLog	(revision 29956)
@@ -1,3 +1,23 @@
+Sun Nov 28 12:23:57 2010  Koichi Sasada  <ko1@a...>
+
+	* thread.c, vm_core.h: make gvl_acquire/release/init/destruct
+	  APIs to modularize GVL implementation.
+
+	* thread_pthread.c, thread_pthread.h: Two GVL implementations.
+	  (1) Simple locking GVL which is same as existing GVL.
+	  (2) Wake-up queued threads.  The wake-up order is simple FIFO.
+	     (We can make several queues to support exact priorities, however
+	      this causes some issues such as priority inversion and so on.)
+	      This impl. prevents spin-loop (*1) caused on SMP environemnts.
+	  *1: Only one Ruby thread acqures GVL again and again.
+	      Bug #2359 [ruby-core:26694] 
+
+	* thread_win32.c, thread_win32.h: Using simple lock
+	  not by CRITICAL_SECTION but by Mutex.
+	  Bug #3890 [ruby-dev:42315]
+
+	* vm.c (ruby_vm_destruct): ditto.
+
 Sun Nov 28 04:40:00 2010  Luis Lavena  <luislavena@g...>
 
 	* io.c (io_fwrite): use rb_w32_write_console under Windows.
Index: thread_pthread.c
===================================================================
--- thread_pthread.c	(revision 29955)
+++ thread_pthread.c	(revision 29956)
@@ -29,10 +29,158 @@
 static void native_cond_initialize(pthread_cond_t *cond);
 static void native_cond_destroy(pthread_cond_t *cond);
 
+static void native_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void)); 
+
+#define native_mutex_reinitialize_atfork(lock) (\
+	native_mutex_unlock(lock), \
+	native_mutex_initialize(lock), \
+	native_mutex_lock(lock))
+
+#define GVL_SIMPLE_LOCK 0
+#define GVL_DEBUG 0
+
 static void
+gvl_show_waiting_threads(rb_vm_t *vm)
+{
+    rb_thread_t *th = vm->gvl.waiting_threads;
+    int i = 0;
+    while (th) {
+	fprintf(stderr, "waiting (%d): %p\n", i++, th);
+	th = th->native_thread_data.gvl_next;
+    }
+}
+
+static void
+gvl_waiting_push(rb_vm_t *vm, rb_thread_t *th)
+{
+    th->native_thread_data.gvl_next = 0;
+
+    if (vm->gvl.waiting_threads) {
+	vm->gvl.waiting_last_thread->native_thread_data.gvl_next = th;
+	vm->gvl.waiting_last_thread = th;
+    }
+    else {
+	vm->gvl.waiting_threads = th;
+	vm->gvl.waiting_last_thread = th;
+    }
+    th = vm->gvl.waiting_threads;
+    vm->gvl.waiting++;
+}
+
+static void
+gvl_waiting_shift(rb_vm_t *vm, rb_thread_t *th)
+{
+    vm->gvl.waiting_threads = vm->gvl.waiting_threads->native_thread_data.gvl_next;
+    vm->gvl.waiting--;
+}
+
+static void
+gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
+{
+#if GVL_SIMPLE_LOCK
+    native_mutex_lock(&vm->gvl.lock);
+#else
+    native_mutex_lock(&vm->gvl.lock);
+    if (vm->gvl.waiting > 0 || vm->gvl.acquired != 0) {
+	if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): sleep\n", th);
+	gvl_waiting_push(vm, th);
+        if (GVL_DEBUG) gvl_show_waiting_threads(vm);
+
+	while (vm->gvl.acquired != 0 || vm->gvl.waiting_threads != th) {
+	    native_cond_wait(&th->native_thread_data.gvl_cond, &vm->gvl.lock);
+	}
+	gvl_waiting_shift(vm, th);
+    }
+    else {
+	/* do nothing */
+    }
+    vm->gvl.acquired = 1;
+    native_mutex_unlock(&vm->gvl.lock);
+#endif
+    if (GVL_DEBUG) gvl_show_waiting_threads(vm);
+    if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): acquire\n", th);
+}
+
+static void
+gvl_release(rb_vm_t *vm)
+{
+#if GVL_SIMPLE_LOCK
+    native_mutex_unlock(&vm->gvl.lock);
+#else
+    native_mutex_lock(&vm->gvl.lock);
+    if (vm->gvl.waiting > 0) {
+	rb_thread_t *th = vm->gvl.waiting_threads;
+	if (GVL_DEBUG) fprintf(stderr, "gvl release (%p): wakeup: %p\n", GET_THREAD(), th);
+	native_cond_signal(&th->native_thread_data.gvl_cond);
+    }
+    else {
+	if (GVL_DEBUG) fprintf(stderr, "gvl release (%p): wakeup: %p\n", GET_THREAD(), 0);
+	/* do nothing */
+    }
+    vm->gvl.acquired = 0;
+    native_mutex_unlock(&vm->gvl.lock);
+#endif
+}
+
+static void
+gvl_atfork(rb_vm_t *vm)
+{
+#if GVL_SIMPLE_LOCK
+    native_mutex_reinitialize_atfork(&vm->gvl.lock);
+#else
+    /* do nothing */
+#endif
+}
+
+static void gvl_init(rb_vm_t *vm);
+
+static void
+gvl_atfork_child(void)
+{
+    gvl_init(GET_VM());
+}
+
+static void
+gvl_init(rb_vm_t *vm)
+{
+    int r;
+    if (GVL_DEBUG) fprintf(stderr, "gvl init\n");
+    native_mutex_initialize(&vm->gvl.lock);
+    native_atfork(0, 0, gvl_atfork_child);
+
+    vm->gvl.waiting_threads = 0;
+    vm->gvl.waiting_last_thread = 0;
+    vm->gvl.waiting = 0;
+    vm->gvl.acquired = 0;
+}
+
+static void
+gvl_destroy(rb_vm_t *vm)
+{
+    if (GVL_DEBUG) fprintf(stderr, "gvl destroy\n");
+    native_mutex_destroy(&vm->gvl.lock);
+}
+
+static void
+mutex_debug(const char *msg, pthread_mutex_t *lock)
+{
+    if (0) {
+	int r;
+	static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
+
+	if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(1);}
+	fprintf(stdout, "%s: %p\n", msg, lock);
+	if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(1);}
+    }
+}
+
+#define NATIVE_MUTEX_LOCK_DEBUG 1
+
+static void
 native_mutex_lock(pthread_mutex_t *lock)
 {
     int r;
+    mutex_debug("lock", lock);
     if ((r = pthread_mutex_lock(lock)) != 0) {
 	rb_bug_errno("pthread_mutex_lock", r);
     }
@@ -42,6 +190,7 @@
 native_mutex_unlock(pthread_mutex_t *lock)
 {
     int r;
+    mutex_debug("unlock", lock);
     if ((r = pthread_mutex_unlock(lock)) != 0) {
 	rb_bug_errno("pthread_mutex_unlock", r);
     }
@@ -51,6 +200,7 @@
 native_mutex_trylock(pthread_mutex_t *lock)
 {
     int r;
+    mutex_debug("trylock", lock);
     if ((r = pthread_mutex_trylock(lock)) != 0) {
 	if (r == EBUSY) {
 	    return EBUSY;
@@ -66,20 +216,17 @@
 native_mutex_initialize(pthread_mutex_t *lock)
 {
     int r = pthread_mutex_init(lock, 0);
+    mutex_debug("init", lock);
     if (r != 0) {
 	rb_bug_errno("pthread_mutex_init", r);
     }
 }
 
-#define native_mutex_reinitialize_atfork(lock) (\
-	native_mutex_unlock(lock), \
-	native_mutex_initialize(lock), \
-	native_mutex_lock(lock))
-
 static void
 native_mutex_destroy(pthread_mutex_t *lock)
 {
     int r = pthread_mutex_destroy(lock);
+    mutex_debug("destroy", lock);
     if (r != 0) {
 	rb_bug_errno("pthread_mutex_destroy", r);
     }
@@ -127,6 +274,14 @@
     return pthread_cond_timedwait(cond, mutex, ts);
 }
 
+static void
+native_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void))
+{
+    int r = pthread_atfork(prepare, parent, child);
+    if (r != 0) {
+	rb_bug_errno("native_atfork", r);
+    }
+}
 
 #define native_cleanup_push pthread_cleanup_push
 #define native_cleanup_pop  pthread_cleanup_pop
@@ -171,6 +326,7 @@
     pthread_key_create(&ruby_native_thread_key, NULL);
     th->thread_id = pthread_self();
     native_cond_initialize(&th->native_thread_data.sleep_cond);
+    native_cond_initialize(&th->native_thread_data.gvl_cond);
     ruby_thread_set_native(th);
     native_mutex_initialize(&signal_thread_list_lock);
     posix_signal(SIGVTALRM, null_func);
Index: thread_pthread.h
===================================================================
--- thread_pthread.h	(revision 29955)
+++ thread_pthread.h	(revision 29956)
@@ -22,6 +22,18 @@
 typedef struct native_thread_data_struct {
     void *signal_thread_list;
     pthread_cond_t sleep_cond;
+    pthread_cond_t gvl_cond;
+    struct rb_thread_struct *gvl_next;
 } native_thread_data_t;
 
+#include <semaphore.h>
+
+typedef struct rb_global_vm_lock_struct {
+    pthread_mutex_t lock;
+    struct rb_thread_struct * volatile waiting_threads;
+    struct rb_thread_struct *waiting_last_thread;
+    int waiting;
+    int volatile acquired;
+} rb_global_vm_lock_t;
+
 #endif /* RUBY_THREAD_PTHREAD_H */
Index: vm_core.h
===================================================================
--- vm_core.h	(revision 29955)
+++ vm_core.h	(revision 29956)
@@ -274,7 +274,7 @@
 typedef struct rb_vm_struct {
     VALUE self;
 
-    rb_thread_lock_t global_vm_lock;
+    rb_global_vm_lock_t gvl;
 
     struct rb_thread_struct *main_thread;
     struct rb_thread_struct *running_thread;
Index: thread.c
===================================================================
--- thread.c	(revision 29955)
+++ thread.c	(revision 29956)
@@ -103,10 +103,10 @@
 #define GVL_UNLOCK_BEGIN() do { \
   rb_thread_t *_th_stored = GET_THREAD(); \
   RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
-  native_mutex_unlock(&_th_stored->vm->global_vm_lock)
+  gvl_release(_th_stored->vm);
 
 #define GVL_UNLOCK_END() \
-  native_mutex_lock(&_th_stored->vm->global_vm_lock); \
+  gvl_acquire(_th_stored->vm, _th_stored); \
   rb_thread_set_current(_th_stored); \
 } while(0)
 
@@ -125,7 +125,7 @@
     (th)->status = THREAD_STOPPED; \
     thread_debug("enter blocking region (%p)\n", (void *)(th)); \
     RB_GC_SAVE_MACHINE_CONTEXT(th); \
-    native_mutex_unlock(&(th)->vm->global_vm_lock); \
+    gvl_release((th)->vm); \
   } while (0)
 
 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
@@ -247,6 +247,13 @@
 #endif
 
 void
+rb_vm_gvl_destroy(rb_vm_t *vm)
+{
+    gvl_release(vm);
+    gvl_destroy(vm);
+}
+
+void
 rb_thread_lock_unlock(rb_thread_lock_t *lock)
 {
     native_mutex_unlock(lock);
@@ -426,7 +433,7 @@
 #endif
     thread_debug("thread start: %p\n", (void *)th);
 
-    native_mutex_lock(&th->vm->global_vm_lock);
+    gvl_acquire(th->vm, th);
     {
 	thread_debug("thread start (get lock): %p\n", (void *)th);
 	rb_thread_set_current(th);
@@ -519,7 +526,7 @@
     }
     else {
 	thread_cleanup_func(th);
-	native_mutex_unlock(&th->vm->global_vm_lock);
+	gvl_release(th->vm);
     }
 
     return 0;
@@ -1002,11 +1009,11 @@
 	thread_debug("rb_thread_schedule/switch start\n");
 
 	RB_GC_SAVE_MACHINE_CONTEXT(th);
-	native_mutex_unlock(&th->vm->global_vm_lock);
+	gvl_release(th->vm);
 	{
 	    native_thread_yield();
 	}
-	native_mutex_lock(&th->vm->global_vm_lock);
+	gvl_acquire(th->vm, th);
 
 	rb_thread_set_current(th);
 	thread_debug("rb_thread_schedule/switch done\n");
@@ -1028,7 +1035,7 @@
 static inline void
 blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
 {
-    native_mutex_lock(&th->vm->global_vm_lock);
+    gvl_acquire(th->vm, th);
     rb_thread_set_current(th);
     thread_debug("leave blocking region (%p)\n", (void *)th);
     remove_signal_thread_list(th);
@@ -2753,7 +2760,7 @@
     VALUE thval = th->self;
     vm->main_thread = th;
 
-    native_mutex_reinitialize_atfork(&th->vm->global_vm_lock);
+    gvl_atfork(th->vm);
     st_foreach(vm->living_threads, atfork, (st_data_t)th);
     st_clear(vm->living_threads);
     st_insert(vm->living_threads, thval, (st_data_t)th->thread_id);
@@ -4297,6 +4304,7 @@
 #define rb_intern(str) rb_intern_const(str)
 
     VALUE cThGroup;
+    rb_thread_t *th = GET_THREAD();
 
     rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
     rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
@@ -4349,7 +4357,6 @@
     rb_define_method(cThGroup, "add", thgroup_add, 1);
 
     {
-	rb_thread_t *th = GET_THREAD();
 	th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
 	rb_define_const(cThGroup, "Default", th->thgroup);
     }
@@ -4376,10 +4383,9 @@
 	/* main thread setting */
 	{
 	    /* acquire global vm lock */
-	    rb_thread_lock_t *lp = &GET_THREAD()->vm->global_vm_lock;
-	    native_mutex_initialize(lp);
-	    native_mutex_lock(lp);
-	    native_mutex_initialize(&GET_THREAD()->interrupt_lock);
+	    gvl_init(th->vm);
+	    gvl_acquire(th->vm, th);
+	    native_mutex_initialize(&th->interrupt_lock);
 	}
     }
 
@@ -4502,3 +4508,4 @@
     GET_VM()->coverages = Qfalse;
     rb_remove_event_hook(update_coverage);
 }
+
Index: vm.c
===================================================================
--- vm.c	(revision 29955)
+++ vm.c	(revision 29956)
@@ -1537,8 +1537,7 @@
 	    st_free_table(vm->living_threads);
 	    vm->living_threads = 0;
 	}
-	rb_thread_lock_unlock(&vm->global_vm_lock);
-	rb_thread_lock_destroy(&vm->global_vm_lock);
+	rb_vm_gvl_destroy(vm);
 	ruby_xfree(vm);
 	ruby_current_vm = 0;
 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE

--
ML: ruby-changes@q...
Info: http://www.atdot.net/~ko1/quickml/

[前][次][番号順一覧][スレッド一覧]