[前][次][番号順一覧][スレッド一覧]

ruby-changes:25023

From: nari <ko1@a...>
Date: Wed, 3 Oct 2012 21:30:33 +0900 (JST)
Subject: [ruby-changes:25023] nari:r37075 (trunk): * gc.c: Use the non-recursive marking instead of recursion. The

nari	2012-10-03 21:30:21 +0900 (Wed, 03 Oct 2012)

  New Revision: 37075

  http://svn.ruby-lang.org/cgi-bin/viewvc.cgi?view=rev&revision=37075

  Log:
    * gc.c: Use the non-recursive marking instead of recursion. The
      recursion marking of CRuby needs checking stack overflow and the
      fail-safe system, but these systems not good at partial points,
      for example, marking deep tree structures. [ruby-dev:46184]
      [Feature #7095]
    
    * configure.in (GC_MARK_STACKFRAME_WORD): removed. It's used by
      checking stack overflow of marking.
    
    * win32/Makefile.sub (GC_MARK_STACKFRAME_WORD): ditto.

  Modified files:
    trunk/ChangeLog
    trunk/configure.in
    trunk/gc.c
    trunk/win32/Makefile.sub

Index: configure.in
===================================================================
--- configure.in	(revision 37074)
+++ configure.in	(revision 37075)
@@ -1384,62 +1384,6 @@
   AC_DEFINE_UNQUOTED(STACK_END_ADDRESS, $rb_cv_stack_end_address)
 fi
 
-AC_CACHE_CHECK(for gc_mark and gc_children stack frame approximate size(word), rb_cv_gc_mark_stackframe_word,
-[save_CFLAGS="$CFLAGS"
-CFLAGS="-O0"
-AC_TRY_RUN([
-int word;
-char *stack_start;
-
-void
-set_stackframe_word()
-{
-    int dumy = 42;
-    int diff;
-
-    if (stack_start < (char *)&dumy) {
-        diff = (int)((char *)&dumy - stack_start);
-    }
-    else {
-        diff = (int)(stack_start - (char *)&dumy);
-    }
-    word = (diff/sizeof(void *));
-    if ((diff % sizeof(void *)) != 0) {
-        word++;
-    }
-}
-
-void
-gc_mark_children(void *p1, void *p2, int lev)
-{
-    void *obj = p2;
-
-    set_stackframe_word(p1,p2,lev);
-}
-
-void
-gc_mark(void *p1, void *p2, int lev)
-{
-    void *obj = p2;
-
-    gc_mark_children(p1,p2,lev++);
-}
-
-int
-main() {
-  int dumy = 42;
-
-  stack_start = (char *)&dumy;
-  gc_mark(0, 0, 255);
-  return word;
-}
-],
-  [rb_cv_gc_mark_stackframe_word="$?"],
-  [rb_cv_gc_mark_stackframe_word="$?"],
-  [rb_cv_gc_mark_stackframe_word="30"])
-CFLAGS="$save_CFLAGS"])
-AC_DEFINE_UNQUOTED(GC_MARK_STACKFRAME_WORD, $rb_cv_gc_mark_stackframe_word)
-
 AS_CASE(["$target_os"],
 [openbsd*], [
   AC_CACHE_CHECK(for heap align log on openbsd, rb_cv_page_size_log,
Index: ChangeLog
===================================================================
--- ChangeLog	(revision 37074)
+++ ChangeLog	(revision 37075)
@@ -1,3 +1,16 @@
+Wed Oct  3 19:51:57 2012  Narihiro Nakamura  <authornari@g...>
+
+	* gc.c: Use the non-recursive marking instead of recursion. The
+	  recursion marking of CRuby needs checking stack overflow and the
+	  fail-safe system, but these systems not good at partial points,
+	  for example, marking deep tree structures. [ruby-dev:46184]
+	  [Feature #7095]
+
+	* configure.in (GC_MARK_STACKFRAME_WORD): removed. It's used by
+	  checking stack overflow of marking.
+
+	* win32/Makefile.sub (GC_MARK_STACKFRAME_WORD): ditto.
+
 Wed Oct  3 15:33:02 2012  Nobuyoshi Nakada  <nobu@r...>
 
 	* thread_pthread.c (ruby_init_stack): use getrlimit() for the main
Index: win32/Makefile.sub
===================================================================
--- win32/Makefile.sub	(revision 37074)
+++ win32/Makefile.sub	(revision 37075)
@@ -607,7 +607,6 @@
 #define GETGROUPS_T int
 #define RETSIGTYPE void
 #define TYPEOF_TIMEVAL_TV_SEC long
-#define GC_MARK_STACKFRAME_WORD 30
 #define HAVE_ALLOCA 1
 #define HAVE_DUP2 1
 #define HAVE_MEMCMP 1
Index: gc.c
===================================================================
--- gc.c	(revision 37074)
+++ gc.c	(revision 37075)
@@ -112,8 +112,6 @@
 
 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
 
-#define MARK_STACK_MAX 1024
-
 #ifndef GC_PROFILE_MORE_DETAIL
 #define GC_PROFILE_MORE_DETAIL 0
 #endif
@@ -208,6 +206,22 @@
     struct gc_list *next;
 };
 
+#define STACK_CHUNK_SIZE 500
+
+typedef struct stack_chunk {
+    VALUE data[STACK_CHUNK_SIZE];
+    struct stack_chunk *next;
+} stack_chunk_t;
+
+typedef struct mark_stack {
+    stack_chunk_t *chunk;
+    stack_chunk_t *cache;
+    size_t index;
+    size_t limit;
+    size_t cache_size;
+    size_t unused_cache_size;
+} mark_stack_t;
+
 #ifndef CALC_EXACT_MALLOC_SIZE
 #define CALC_EXACT_MALLOC_SIZE 0
 #endif
@@ -248,12 +262,8 @@
 	st_table *table;
 	RVALUE *deferred;
     } final;
+    mark_stack_t mark_stack;
     struct {
-	VALUE buffer[MARK_STACK_MAX];
-	VALUE *ptr;
-	int overflow;
-    } markstack;
-    struct {
 	int run;
 	gc_profile_record *record;
 	size_t count;
@@ -287,9 +297,6 @@
 #define finalizing		objspace->flags.finalizing
 #define finalizer_table 	objspace->final.table
 #define deferred_final_list	objspace->final.deferred
-#define mark_stack		objspace->markstack.buffer
-#define mark_stack_ptr		objspace->markstack.ptr
-#define mark_stack_overflow	objspace->markstack.overflow
 #define global_List		objspace->global_list
 #define ruby_gc_stress		objspace->gc_stress
 #define initial_malloc_limit	initial_params.initial_malloc_limit
@@ -342,10 +349,13 @@
 static void *aligned_malloc(size_t, size_t);
 static void aligned_free(void *);
 
+static void init_mark_stack(mark_stack_t *stack);
+static void free_stack_chunks(mark_stack_t *);
+
 static VALUE lazy_sweep_enable(void);
 static int garbage_collect(rb_objspace_t *);
 static int gc_lazy_sweep(rb_objspace_t *);
-static void mark_tbl(rb_objspace_t *, st_table *, int);
+static void mark_tbl(rb_objspace_t *, st_table *);
 static void rest_sweep(rb_objspace_t *);
 
 static double getrusage_time(void);
@@ -413,6 +423,7 @@
 	heaps_used = 0;
 	heaps = 0;
     }
+    free_stack_chunks(&objspace->mark_stack);
     free(objspace);
 }
 #endif
@@ -586,6 +597,7 @@
 
     objspace->profile.invoke_time = getrusage_time();
     finalizer_table = st_init_numtable();
+    init_mark_stack(&objspace->mark_stack);
 }
 
 static void
@@ -734,8 +746,8 @@
     }
 }
 
-static void gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev);
-static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev);
+static void gc_mark(rb_objspace_t *objspace, VALUE ptr);
+static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
 
 static inline int
 is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
@@ -1479,7 +1491,7 @@
 	/* XXX: this loop will make no sense */
 	/* because mark will not be removed */
 	finalize_deferred(objspace);
-	mark_tbl(objspace, finalizer_table, 0);
+	mark_tbl(objspace, finalizer_table);
 	st_foreach(finalizer_table, chain_finalized_object,
 		   (st_data_t)&deferred_final_list);
     } while (deferred_final_list);
@@ -2060,6 +2072,137 @@
     during_gc = 0;
 }
 
+/* Marking stack */
+
+static void push_mark_stack(mark_stack_t *, VALUE);
+static int pop_mark_stack(mark_stack_t *, VALUE *);
+static void shrink_stack_chunk_cache(mark_stack_t *stack);
+
+static stack_chunk_t *
+stack_chunk_alloc(void)
+{
+    stack_chunk_t *res;
+
+    res = malloc(sizeof(stack_chunk_t));
+    if (!res)
+        rb_memerror();
+
+    return res;
+}
+
+static inline int
+is_mark_stask_empty(mark_stack_t *stack)
+{
+    return stack->chunk == NULL;
+}
+
+static void
+add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
+{
+    chunk->next = stack->cache;
+    stack->cache = chunk;
+    stack->cache_size++;
+}
+
+static void
+shrink_stack_chunk_cache(mark_stack_t *stack)
+{
+    stack_chunk_t *chunk;
+
+    if (stack->unused_cache_size > (stack->cache_size/2)) {
+        chunk = stack->cache;
+        stack->cache = stack->cache->next;
+        stack->cache_size--;
+        free(chunk);
+    }
+    stack->unused_cache_size = stack->cache_size;
+}
+
+static void
+push_mark_stack_chunk(mark_stack_t *stack)
+{
+    stack_chunk_t *next;
+
+    assert(stack->index == stack->limit);
+    if (stack->cache_size > 0) {
+        next = stack->cache;
+        stack->cache = stack->cache->next;
+        stack->cache_size--;
+        if (stack->unused_cache_size > stack->cache_size)
+            stack->unused_cache_size = stack->cache_size;
+    }
+    else {
+        next = stack_chunk_alloc();
+    }
+    next->next = stack->chunk;
+    stack->chunk = next;
+    stack->index = 0;
+}
+
+static void
+pop_mark_stack_chunk(mark_stack_t *stack)
+{
+    stack_chunk_t *prev;
+
+    prev = stack->chunk->next;
+    assert(stack->index == 0);
+    add_stack_chunk_cache(stack, stack->chunk);
+    stack->chunk = prev;
+    stack->index = stack->limit;
+}
+
+static void
+free_stack_chunks(mark_stack_t *stack)
+{
+    stack_chunk_t *chunk = stack->chunk;
+    stack_chunk_t *next = NULL;
+
+    while (chunk != NULL) {
+        next = chunk->next;
+        free(chunk);
+        chunk = next;
+    }
+}
+
+static void
+push_mark_stack(mark_stack_t *stack, VALUE data)
+{
+    if (stack->index == stack->limit) {
+        push_mark_stack_chunk(stack);
+    }
+    stack->chunk->data[stack->index++] = data;
+}
+
+static int
+pop_mark_stack(mark_stack_t *stack, VALUE *data)
+{
+    if (is_mark_stask_empty(stack)) {
+        return FALSE;
+    }
+    if (stack->index == 1) {
+        *data = stack->chunk->data[--stack->index];
+        pop_mark_stack_chunk(stack);
+        return TRUE;
+    }
+    *data = stack->chunk->data[--stack->index];
+    return TRUE;
+}
+
+static void
+init_mark_stack(mark_stack_t *stack)
+{
+    int i;
+
+    push_mark_stack_chunk(stack);
+    stack->limit = STACK_CHUNK_SIZE;
+
+    for(i=0; i < 4; i++) {
+        add_stack_chunk_cache(stack, stack_chunk_alloc());
+    }
+    stack->unused_cache_size = stack->cache_size;
+}
+
+
 /* Marking */
 
 #define MARK_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] = bits[BITMAP_INDEX(p)] | ((uintptr_t)1 << BITMAP_OFFSET(p)))
@@ -2096,9 +2239,6 @@
 }
 #endif
 
-#define GC_LEVEL_MAX 250
-#define STACKFRAME_FOR_GC_MARK (GC_LEVEL_MAX * GC_MARK_STACKFRAME_WORD)
-
 size_t
 ruby_stack_length(VALUE **p)
 {
@@ -2108,6 +2248,7 @@
     return STACK_LENGTH;
 }
 
+#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
 static int
 stack_check(int water_mark)
 {
@@ -2123,6 +2264,7 @@
 #endif
     return ret;
 }
+#endif
 
 #define STACKFRAME_FOR_CALL_CFUNC 512
 
@@ -2137,13 +2279,6 @@
 }
 
 static void
-init_mark_stack(rb_objspace_t *objspace)
-{
-    mark_stack_overflow = 0;
-    mark_stack_ptr = mark_stack;
-}
-
-static void
 mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n)
 {
     VALUE v;
@@ -2151,7 +2286,7 @@
         v = *x;
         VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v));
 	if (is_pointer_to_heap(objspace, (void *)v)) {
-	    gc_mark(objspace, v, 0);
+	    gc_mark(objspace, v);
 	}
 	x++;
     }
@@ -2177,24 +2312,22 @@
 
 struct mark_tbl_arg {
     rb_objspace_t *objspace;
-    int lev;
 };
 
 static int
 mark_entry(st_data_t key, st_data_t value, st_data_t data)
 {
     struct mark_tbl_arg *arg = (void*)data;
-    gc_mark(arg->objspace, (VALUE)value, arg->lev);
+    gc_mark(arg->objspace, (VALUE)value);
     return ST_CONTINUE;
 }
 
 static void
-mark_tbl(rb_objspace_t *objspace, st_table *tbl, int lev)
+mark_tbl(rb_objspace_t *objspace, st_table *tbl)
 {
     struct mark_tbl_arg arg;
     if (!tbl || tbl->num_entries == 0) return;
     arg.objspace = objspace;
-    arg.lev = lev;
     st_foreach(tbl, mark_entry, (st_data_t)&arg);
 }
 
@@ -2202,68 +2335,66 @@
 mark_key(st_data_t key, st_data_t value, st_data_t data)
 {
     struct mark_tbl_arg *arg = (void*)data;
-    gc_mark(arg->objspace, (VALUE)key, arg->lev);
+    gc_mark(arg->objspace, (VALUE)key);
     return ST_CONTINUE;
 }
 
 static void
-mark_set(rb_objspace_t *objspace, st_table *tbl, int lev)
+mark_set(rb_objspace_t *objspace, st_table *tbl)
 {
     struct mark_tbl_arg arg;
     if (!tbl) return;
     arg.objspace = objspace;
-    arg.lev = lev;
     st_foreach(tbl, mark_key, (st_data_t)&arg);
 }
 
 void
 rb_mark_set(st_table *tbl)
 {
-    mark_set(&rb_objspace, tbl, 0);
+    mark_set(&rb_objspace, tbl);
 }
 
 static int
 mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
 {
     struct mark_tbl_arg *arg = (void*)data;
-    gc_mark(arg->objspace, (VALUE)key, arg->lev);
-    gc_mark(arg->objspace, (VALUE)value, arg->lev);
+    gc_mark(arg->objspace, (VALUE)key);
+    gc_mark(arg->objspace, (VALUE)value);
     return ST_CONTINUE;
 }
 
 static void
-mark_hash(rb_objspace_t *objspace, st_table *tbl, int lev)
+mark_hash(rb_objspace_t *objspace, st_table *tbl)
 {
     struct mark_tbl_arg arg;
     if (!tbl) return;
     arg.objspace = objspace;
-    arg.lev = lev;
     st_foreach(tbl, mark_keyvalue, (st_data_t)&arg);
 }
 
 void
 rb_mark_hash(st_table *tbl)
 {
-    mark_hash(&rb_objspace, tbl, 0);
+    mark_hash(&rb_objspace, tbl);
 }
 
 static void
-mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me, int lev)
+mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
 {
     const rb_method_definition_t *def = me->def;
 
-    gc_mark(objspace, me->klass, lev);
+    gc_mark(objspace, me->klass);
     if (!def) return;
     switch (def->type) {
       case VM_METHOD_TYPE_ISEQ:
-	gc_mark(objspace, def->body.iseq->self, lev);
+	gc_mark(objspace, def->body.iseq->self);
 	break;
       case VM_METHOD_TYPE_BMETHOD:
-	gc_mark(objspace, def->body.proc, lev);
+	gc_mark(objspace, def->body.proc);
 	break;
       case VM_METHOD_TYPE_ATTRSET:
       case VM_METHOD_TYPE_IVAR:
-	gc_mark(objspace, def->body.attr.location, lev);
+	gc_mark(objspace, def->body.attr.location);
 	break;
       default:
 	break; /* ignore */
@@ -2273,24 +2404,23 @@
 void
 rb_mark_method_entry(const rb_method_entry_t *me)
 {
-    mark_method_entry(&rb_objspace, me, 0);
+    mark_method_entry(&rb_objspace, me);
 }
 
 static int
 mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data)
 {
     struct mark_tbl_arg *arg = (void*)data;
-    mark_method_entry(arg->objspace, me, arg->lev);
+    mark_method_entry(arg->objspace, me);
     return ST_CONTINUE;
 }
 
 static void
-mark_m_tbl(rb_objspace_t *objspace, st_table *tbl, int lev)
+mark_m_tbl(rb_objspace_t *objspace, st_table *tbl)
 {
     struct mark_tbl_arg arg;
     if (!tbl) return;
     arg.objspace = objspace;
-    arg.lev = lev;
     st_foreach(tbl, mark_method_entry_i, (st_data_t)&arg);
 }
 
@@ -2298,18 +2428,17 @@
 mark_const_entry_i(ID key, const rb_const_entry_t *ce, st_data_t data)
 {
     struct mark_tbl_arg *arg = (void*)data;
-    gc_mark(arg->objspace, ce->value, arg->lev);
-    gc_mark(arg->objspace, ce->file, arg->lev);
+    gc_mark(arg->objspace, ce->value);
+    gc_mark(arg->objspace, ce->file);
     return ST_CONTINUE;
 }
 
 static void
-mark_const_tbl(rb_objspace_t *objspace, st_table *tbl, int lev)
+mark_const_tbl(rb_objspace_t *objspace, st_table *tbl)
 {
     struct mark_tbl_arg arg;
     if (!tbl) return;
     arg.objspace = objspace;
-    arg.lev = lev;
     st_foreach(tbl, mark_const_entry_i, (st_data_t)&arg);
 }
 
@@ -2369,14 +2498,14 @@
 void
 rb_mark_tbl(st_table *tbl)
 {
-    mark_tbl(&rb_objspace, tbl, 0);
+    mark_tbl(&rb_objspace, tbl);
 }
 
 void
 rb_gc_mark_maybe(VALUE obj)
 {
     if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
-	gc_mark(&rb_objspace, obj, 0);
+	gc_mark(&rb_objspace, obj);
     }
 }
 
@@ -2391,7 +2520,7 @@
 }
 
 static void
-gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev)
+gc_mark(rb_objspace_t *objspace, VALUE ptr)
 {
     register RVALUE *obj;
 
@@ -2400,33 +2529,21 @@
     if (obj->as.basic.flags == 0) return;       /* free cell */
     if (!gc_mark_ptr(objspace, ptr)) return;	/* already marked */
 
-    if (lev > GC_LEVEL_MAX || (lev == 0 && stack_check(STACKFRAME_FOR_GC_MARK))) {
-	if (!mark_stack_overflow) {
-	    if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) {
-		*mark_stack_ptr = ptr;
-		mark_stack_ptr++;
-	    }
-	    else {
-		mark_stack_overflow = 1;
-	    }
-	}
-	return;
-    }
-    gc_mark_children(objspace, ptr, lev+1);
+    push_mark_stack(&objspace->mark_stack, ptr);
 }
 
 void
 rb_gc_mark(VALUE ptr)
 {
-    gc_mark(&rb_objspace, ptr, 0);
+    gc_mark(&rb_objspace, ptr);
 }
 
 static void
-gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev)
+gc_mark_children(rb_objspace_t *objspace, VALUE ptr)
 {
     register RVALUE *obj = RANY(ptr);
 
-    goto marking;		/* skip */
+    goto marking;              /* skip */
 
   again:
     obj = RANY(ptr);
@@ -2456,7 +2573,7 @@
 	  case NODE_RESBODY:
 	  case NODE_CLASS:
 	  case NODE_BLOCK_PASS:
-	    gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
+            gc_mark(objspace, (VALUE)obj->as.node.u2.node);
 	    /* fall through */
 	  case NODE_BLOCK:	/* 1,3 */
 	  case NODE_ARRAY:
@@ -2468,14 +2585,14 @@
 	  case NODE_CALL:
 	  case NODE_DEFS:
 	  case NODE_OP_ASGN1:
-	    gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
+            gc_mark(objspace, (VALUE)obj->as.node.u1.node);
 	    /* fall through */
 	  case NODE_SUPER:	/* 3 */
 	  case NODE_FCALL:
 	  case NODE_DEFN:
 	  case NODE_ARGS_AUX:
-	    ptr = (VALUE)obj->as.node.u3.node;
-	    goto again;
+            ptr = (VALUE)obj->as.node.u3.node;
+            goto again;
 
 	  case NODE_WHILE:	/* 1,2 */
 	  case NODE_UNTIL:
@@ -2495,7 +2612,7 @@
 	  case NODE_ALIAS:
 	  case NODE_VALIAS:
 	  case NODE_ARGSCAT:
-	    gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
+            gc_mark(objspace, (VALUE)obj->as.node.u1.node);
 	    /* fall through */
 	  case NODE_GASGN:	/* 2 */
 	  case NODE_LASGN:
@@ -2509,8 +2626,8 @@
 	  case NODE_EVSTR:
 	  case NODE_UNDEF:
 	  case NODE_POSTEXE:
-	    ptr = (VALUE)obj->as.node.u2.node;
-	    goto again;
+            ptr = (VALUE)obj->as.node.u2.node;
+            goto again;
 
 	  case NODE_HASH:	/* 1 */
 	  case NODE_LIT:
@@ -2525,29 +2642,29 @@
 	  case NODE_COLON2:
 	  case NODE_SPLAT:
 	  case NODE_TO_ARY:
-	    ptr = (VALUE)obj->as.node.u1.node;
-	    goto again;
+            ptr = (VALUE)obj->as.node.u1.node;
+            goto again;
 
 	  case NODE_SCOPE:	/* 2,3 */
 	  case NODE_CDECL:
 	  case NODE_OPT_ARG:
-	    gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
-	    ptr = (VALUE)obj->as.node.u2.node;
-	    goto again;
+            gc_mark(objspace, (VALUE)obj->as.node.u3.node);
+            ptr = (VALUE)obj->as.node.u2.node;
+            goto again;
 
 	  case NODE_ARGS:	/* custom */
 	    {
 		struct rb_args_info *args = obj->as.node.u3.args;
 		if (args) {
-		    if (args->pre_init)    gc_mark(objspace, (VALUE)args->pre_init, lev);
-		    if (args->post_init)   gc_mark(objspace, (VALUE)args->post_init, lev);
-		    if (args->opt_args)    gc_mark(objspace, (VALUE)args->opt_args, lev);
-		    if (args->kw_args)     gc_mark(objspace, (VALUE)args->kw_args, lev);
-		    if (args->kw_rest_arg) gc_mark(objspace, (VALUE)args->kw_rest_arg, lev);
+		    if (args->pre_init)    gc_mark(objspace, (VALUE)args->pre_init);
+		    if (args->post_init)   gc_mark(objspace, (VALUE)args->post_init);
+		    if (args->opt_args)    gc_mark(objspace, (VALUE)args->opt_args);
+		    if (args->kw_args)     gc_mark(objspace, (VALUE)args->kw_args);
+		    if (args->kw_rest_arg) gc_mark(objspace, (VALUE)args->kw_rest_arg);
 		}
 	    }
-	    ptr = (VALUE)obj->as.node.u2.node;
-	    goto again;
+            ptr = (VALUE)obj->as.node.u2.node;
+            goto again;
 
 	  case NODE_ZARRAY:	/* - */
 	  case NODE_ZSUPER:
@@ -2572,65 +2689,65 @@
 	    mark_locations_array(objspace,
 				 (VALUE*)obj->as.node.u1.value,
 				 obj->as.node.u3.cnt);
-	    ptr = (VALUE)obj->as.node.u2.node;
-	    goto again;
+            gc_mark(objspace, (VALUE)obj->as.node.u2.node);
+	    break;
 
 	  case NODE_CREF:
-	    gc_mark(objspace, obj->as.node.nd_omod, lev);
-	    gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
-	    ptr = (VALUE)obj->as.node.u3.node;
-	    goto again;
+            gc_mark(objspace, obj->as.node.nd_omod);
+            gc_mark(objspace, (VALUE)obj->as.node.u1.node);
+            ptr = (VALUE)obj->as.node.u3.node;
+            goto again;
 
 	  default:		/* unlisted NODE */
 	    if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) {
-		gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
+                gc_mark(objspace, (VALUE)obj->as.node.u1.node);
 	    }
 	    if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) {
-		gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
+                gc_mark(objspace, (VALUE)obj->as.node.u2.node);
 	    }
 	    if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) {
-		gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
+                gc_mark(objspace, (VALUE)obj->as.node.u3.node);
 	    }
 	}
 	return;			/* no need to mark class. */
     }
 
-    gc_mark(objspace, obj->as.basic.klass, lev);
+    gc_mark(objspace, obj->as.basic.klass);
     switch (BUILTIN_TYPE(obj)) {
       case T_ICLASS:
       case T_CLASS:
       case T_MODULE:
-	mark_m_tbl(objspace, RCLASS_M (... truncated)

--
ML: ruby-changes@q...
Info: http://www.atdot.net/~ko1/quickml/

[前][次][番号順一覧][スレッド一覧]