[前][次][番号順一覧][スレッド一覧]

ruby-changes:24575

From: nari <ko1@a...>
Date: Sun, 5 Aug 2012 19:39:50 +0900 (JST)
Subject: [ruby-changes:24575] nari:r36626 (trunk): * gc.c: just move functions and so on. I don't touch any internal

nari	2012-08-05 19:39:37 +0900 (Sun, 05 Aug 2012)

  New Revision: 36626

  http://svn.ruby-lang.org/cgi-bin/viewvc.cgi?view=rev&revision=36626

  Log:
    * gc.c: just move functions and so on. I don't touch any internal
      implementation.

  Modified files:
    trunk/ChangeLog
    trunk/gc.c

Index: ChangeLog
===================================================================
--- ChangeLog	(revision 36625)
+++ ChangeLog	(revision 36626)
@@ -1,3 +1,8 @@
+Sun Aug  5 19:31:57 2012  Narihiro Nakamura  <authornari@g...>
+
+	* gc.c: just move functions and so on. I don't touch any internal
+	  implementation.
+
 Sun Aug  5 13:22:29 2012  NARUSE, Yui  <naruse@r...>
 
 	* configure.in: use gcc-4.2 prior to clang, gcc, and cc if exist for
Index: gc.c
===================================================================
--- gc.c	(revision 36625)
+++ gc.c	(revision 36626)
@@ -114,8 +114,6 @@
 
 #define MARK_STACK_MAX 1024
 
-int ruby_gc_debug_indent = 0;
-
 #ifndef GC_PROFILE_MORE_DETAIL
 #define GC_PROFILE_MORE_DETAIL 0
 #endif
@@ -302,13 +300,69 @@
 
 #define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
 
+#define RANY(o) ((RVALUE*)(o))
+#define has_free_object (objspace->heap.free_slots && objspace->heap.free_slots->freelist)
+
 #define HEAP_HEADER(p) ((struct heaps_header *)(p))
+#define GET_HEAP_HEADER(x) (HEAP_HEADER((uintptr_t)(x) & ~(HEAP_ALIGN_MASK)))
+#define GET_HEAP_SLOT(x) (GET_HEAP_HEADER(x)->base)
+#define GET_HEAP_BITMAP(x) (GET_HEAP_HEADER(x)->bits)
+#define NUM_IN_SLOT(p) (((uintptr_t)(p) & HEAP_ALIGN_MASK)/sizeof(RVALUE))
+#define BITMAP_INDEX(p) (NUM_IN_SLOT(p) / (sizeof(uintptr_t) * 8))
+#define BITMAP_OFFSET(p) (NUM_IN_SLOT(p) & ((sizeof(uintptr_t) * 8)-1))
+#define MARKED_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] & ((uintptr_t)1 << BITMAP_OFFSET(p)))
 
+#ifndef HEAP_ALIGN_LOG
+/* default tiny heap size: 16KB */
+#define HEAP_ALIGN_LOG 14
+#endif
+
+#define HEAP_ALIGN (1UL << HEAP_ALIGN_LOG)
+#define HEAP_ALIGN_MASK (~(~0UL << HEAP_ALIGN_LOG))
+#define REQUIRED_SIZE_BY_MALLOC (sizeof(size_t) * 5)
+#define HEAP_SIZE (HEAP_ALIGN - REQUIRED_SIZE_BY_MALLOC)
+#define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
+
+#define HEAP_OBJ_LIMIT (unsigned int)((HEAP_SIZE - sizeof(struct heaps_header))/sizeof(struct RVALUE))
+#define HEAP_BITMAP_LIMIT CEILDIV(CEILDIV(HEAP_SIZE, sizeof(struct RVALUE)), sizeof(uintptr_t)*8)
+
+int ruby_gc_debug_indent = 0;
+VALUE rb_mGC;
+extern st_table *rb_class_tbl;
+int ruby_disable_gc_stress = 0;
+
 static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
 static VALUE define_final0(VALUE obj, VALUE block);
 VALUE rb_define_final(VALUE obj, VALUE block);
 VALUE rb_undefine_final(VALUE obj);
+static void run_final(rb_objspace_t *objspace, VALUE obj);
+static void initial_expand_heap(rb_objspace_t *objspace);
 
+static void negative_size_allocation_error(const char *);
+static void *aligned_malloc(size_t, size_t);
+static void aligned_free(void *);
+
+static VALUE lazy_sweep_enable(void);
+static int garbage_collect(rb_objspace_t *);
+static int gc_lazy_sweep(rb_objspace_t *);
+static void mark_tbl(rb_objspace_t *, st_table *, int);
+
+static double getrusage_time(void);
+static inline void gc_prof_timer_start(rb_objspace_t *);
+static inline void gc_prof_timer_stop(rb_objspace_t *, int);
+static inline void gc_prof_mark_timer_start(rb_objspace_t *);
+static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
+static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
+static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
+static inline void gc_prof_set_malloc_info(rb_objspace_t *);
+static inline void gc_prof_inc_live_num(rb_objspace_t *);
+static inline void gc_prof_dec_live_num(rb_objspace_t *);
+
+
+/*
+  --------------------------- ObjectSpace -----------------------------
+*/
+
 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
 rb_objspace_t *
 rb_objspace_alloc(void)
@@ -322,49 +376,6 @@
 }
 #endif
 
-static void initial_expand_heap(rb_objspace_t *objspace);
-
-void
-rb_gc_set_params(void)
-{
-    char *malloc_limit_ptr, *heap_min_slots_ptr, *free_min_ptr;
-
-    if (rb_safe_level() > 0) return;
-
-    malloc_limit_ptr = getenv("RUBY_GC_MALLOC_LIMIT");
-    if (malloc_limit_ptr != NULL) {
-	int malloc_limit_i = atoi(malloc_limit_ptr);
-	if (RTEST(ruby_verbose))
-	    fprintf(stderr, "malloc_limit=%d (%d)\n",
-		    malloc_limit_i, initial_malloc_limit);
-	if (malloc_limit_i > 0) {
-	    initial_malloc_limit = malloc_limit_i;
-	}
-    }
-
-    heap_min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS");
-    if (heap_min_slots_ptr != NULL) {
-	int heap_min_slots_i = atoi(heap_min_slots_ptr);
-	if (RTEST(ruby_verbose))
-	    fprintf(stderr, "heap_min_slots=%d (%d)\n",
-		    heap_min_slots_i, initial_heap_min_slots);
-	if (heap_min_slots_i > 0) {
-	    initial_heap_min_slots = heap_min_slots_i;
-            initial_expand_heap(&rb_objspace);
-	}
-    }
-
-    free_min_ptr = getenv("RUBY_FREE_MIN");
-    if (free_min_ptr != NULL) {
-	int free_min_i = atoi(free_min_ptr);
-	if (RTEST(ruby_verbose))
-	    fprintf(stderr, "free_min=%d (%d)\n", free_min_i, initial_free_min);
-	if (free_min_i > 0) {
-	    initial_free_min = free_min_i;
-	}
-    }
-}
-
 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
 static void gc_sweep(rb_objspace_t *);
 static void slot_sweep(rb_objspace_t *, struct heaps_slot *);
@@ -408,519 +419,13 @@
 }
 #endif
 
-#ifndef HEAP_ALIGN_LOG
-/* default tiny heap size: 16KB */
-#define HEAP_ALIGN_LOG 14
-#endif
-#define HEAP_ALIGN (1UL << HEAP_ALIGN_LOG)
-#define HEAP_ALIGN_MASK (~(~0UL << HEAP_ALIGN_LOG))
-#define REQUIRED_SIZE_BY_MALLOC (sizeof(size_t) * 5)
-#define HEAP_SIZE (HEAP_ALIGN - REQUIRED_SIZE_BY_MALLOC)
-#define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
-
-#define HEAP_OBJ_LIMIT (unsigned int)((HEAP_SIZE - sizeof(struct heaps_header))/sizeof(struct RVALUE))
-#define HEAP_BITMAP_LIMIT CEILDIV(CEILDIV(HEAP_SIZE, sizeof(struct RVALUE)), sizeof(uintptr_t)*8)
-
-#define GET_HEAP_HEADER(x) (HEAP_HEADER((uintptr_t)(x) & ~(HEAP_ALIGN_MASK)))
-#define GET_HEAP_SLOT(x) (GET_HEAP_HEADER(x)->base)
-#define GET_HEAP_BITMAP(x) (GET_HEAP_HEADER(x)->bits)
-#define NUM_IN_SLOT(p) (((uintptr_t)(p) & HEAP_ALIGN_MASK)/sizeof(RVALUE))
-#define BITMAP_INDEX(p) (NUM_IN_SLOT(p) / (sizeof(uintptr_t) * 8))
-#define BITMAP_OFFSET(p) (NUM_IN_SLOT(p) & ((sizeof(uintptr_t) * 8)-1))
-#define MARKED_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] & ((uintptr_t)1 << BITMAP_OFFSET(p)))
-#define MARK_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] = bits[BITMAP_INDEX(p)] | ((uintptr_t)1 << BITMAP_OFFSET(p)))
-#define CLEAR_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] &= ~((uintptr_t)1 << BITMAP_OFFSET(p)))
-
-extern st_table *rb_class_tbl;
-
-int ruby_disable_gc_stress = 0;
-
-static void run_final(rb_objspace_t *objspace, VALUE obj);
-static int garbage_collect(rb_objspace_t *objspace);
-static int gc_lazy_sweep(rb_objspace_t *objspace);
-
-static double getrusage_time(void);
-static inline void gc_prof_timer_start(rb_objspace_t *);
-static inline void gc_prof_timer_stop(rb_objspace_t *, int);
-static inline void gc_prof_mark_timer_start(rb_objspace_t *);
-static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
-static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
-static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
-static inline void gc_prof_set_malloc_info(rb_objspace_t *);
-static inline void gc_prof_inc_live_num(rb_objspace_t *);
-static inline void gc_prof_dec_live_num(rb_objspace_t *);
-
 void
 rb_global_variable(VALUE *var)
 {
     rb_gc_register_address(var);
 }
 
-static void *
-ruby_memerror_body(void *dummy)
-{
-    rb_memerror();
-    return 0;
-}
-
 static void
-ruby_memerror(void)
-{
-    if (ruby_thread_has_gvl_p()) {
-	rb_memerror();
-    }
-    else {
-	if (ruby_native_thread_p()) {
-	    rb_thread_call_with_gvl(ruby_memerror_body, 0);
-	}
-	else {
-	    /* no ruby thread */
-	    fprintf(stderr, "[FATAL] failed to allocate memory\n");
-	    exit(EXIT_FAILURE);
-	}
-    }
-}
-
-void
-rb_memerror(void)
-{
-    rb_thread_t *th = GET_THREAD();
-    if (!nomem_error ||
-	(rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) {
-	fprintf(stderr, "[FATAL] failed to allocate memory\n");
-	exit(EXIT_FAILURE);
-    }
-    if (rb_thread_raised_p(th, RAISED_NOMEMORY)) {
-	rb_thread_raised_clear(th);
-	GET_THREAD()->errinfo = nomem_error;
-	JUMP_TAG(TAG_RAISE);
-    }
-    rb_thread_raised_set(th, RAISED_NOMEMORY);
-    rb_exc_raise(nomem_error);
-}
-
-/*
- *  call-seq:
- *    GC.stress                 -> true or false
- *
- *  returns current status of GC stress mode.
- */
-
-static VALUE
-gc_stress_get(VALUE self)
-{
-    rb_objspace_t *objspace = &rb_objspace;
-    return ruby_gc_stress ? Qtrue : Qfalse;
-}
-
-/*
- *  call-seq:
- *    GC.stress = bool          -> bool
- *
- *  Updates the GC stress mode.
- *
- *  When stress mode is enabled the GC is invoked at every GC opportunity:
- *  all memory and object allocations.
- *
- *  Enabling stress mode makes Ruby very slow, it is only for debugging.
- */
-
-static VALUE
-gc_stress_set(VALUE self, VALUE flag)
-{
-    rb_objspace_t *objspace = &rb_objspace;
-    rb_secure(2);
-    ruby_gc_stress = RTEST(flag);
-    return flag;
-}
-
-/*
- *  call-seq:
- *    GC::Profiler.enable?                 -> true or false
- *
- *  The current status of GC profile mode.
- */
-
-static VALUE
-gc_profile_enable_get(VALUE self)
-{
-    rb_objspace_t *objspace = &rb_objspace;
-    return objspace->profile.run ? Qtrue : Qfalse;
-}
-
-/*
- *  call-seq:
- *    GC::Profiler.enable          -> nil
- *
- *  Starts the GC profiler.
- *
- */
-
-static VALUE
-gc_profile_enable(void)
-{
-    rb_objspace_t *objspace = &rb_objspace;
-
-    objspace->profile.run = TRUE;
-    return Qnil;
-}
-
-/*
- *  call-seq:
- *    GC::Profiler.disable          -> nil
- *
- *  Stops the GC profiler.
- *
- */
-
-static VALUE
-gc_profile_disable(void)
-{
-    rb_objspace_t *objspace = &rb_objspace;
-
-    objspace->profile.run = FALSE;
-    return Qnil;
-}
-
-static void *
-negative_size_allocation_error_with_gvl(void *ptr)
-{
-    rb_raise(rb_eNoMemError, "%s", (const char *)ptr);
-    return 0; /* should not be reached */
-}
-
-static void
-negative_size_allocation_error(const char *msg)
-{
-    if (ruby_thread_has_gvl_p()) {
-	rb_raise(rb_eNoMemError, "%s", msg);
-    }
-    else {
-	if (ruby_native_thread_p()) {
-	    rb_thread_call_with_gvl(negative_size_allocation_error_with_gvl, (void *)msg);
-	}
-	else {
-	    fprintf(stderr, "[FATAL] %s\n", msg);
-	    exit(EXIT_FAILURE);
-	}
-    }
-}
-
-static void *
-gc_with_gvl(void *ptr)
-{
-    return (void *)(VALUE)garbage_collect((rb_objspace_t *)ptr);
-}
-
-static int
-garbage_collect_with_gvl(rb_objspace_t *objspace)
-{
-    if (dont_gc) return TRUE;
-    if (ruby_thread_has_gvl_p()) {
-	return garbage_collect(objspace);
-    }
-    else {
-	if (ruby_native_thread_p()) {
-	    return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)objspace);
-	}
-	else {
-	    /* no ruby thread */
-	    fprintf(stderr, "[FATAL] failed to allocate memory\n");
-	    exit(EXIT_FAILURE);
-	}
-    }
-}
-
-static void vm_xfree(rb_objspace_t *objspace, void *ptr);
-
-static inline size_t
-vm_malloc_prepare(rb_objspace_t *objspace, size_t size)
-{
-    if ((ssize_t)size < 0) {
-	negative_size_allocation_error("negative allocation size (or too big)");
-    }
-    if (size == 0) size = 1;
-
-#if CALC_EXACT_MALLOC_SIZE
-    size += sizeof(size_t);
-#endif
-
-    if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
-	(malloc_increase+size) > malloc_limit) {
-	garbage_collect_with_gvl(objspace);
-    }
-
-    return size;
-}
-
-static inline void *
-vm_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
-{
-    ATOMIC_SIZE_ADD(malloc_increase, size);
-
-#if CALC_EXACT_MALLOC_SIZE
-    ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, size);
-    ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
-    ((size_t *)mem)[0] = size;
-    mem = (size_t *)mem + 1;
-#endif
-
-    return mem;
-}
-
-#define TRY_WITH_GC(alloc) do { \
-	if (!(alloc) && \
-	    (!garbage_collect_with_gvl(objspace) || \
-	     !(alloc))) { \
-	    ruby_memerror(); \
-	} \
-    } while (0)
-
-static void *
-vm_xmalloc(rb_objspace_t *objspace, size_t size)
-{
-    void *mem;
-
-    size = vm_malloc_prepare(objspace, size);
-    TRY_WITH_GC(mem = malloc(size));
-    return vm_malloc_fixup(objspace, mem, size);
-}
-
-static void *
-vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
-{
-    void *mem;
-#if CALC_EXACT_MALLOC_SIZE
-    size_t oldsize;
-#endif
-
-    if ((ssize_t)size < 0) {
-	negative_size_allocation_error("negative re-allocation size");
-    }
-    if (!ptr) return vm_xmalloc(objspace, size);
-    if (size == 0) {
-	vm_xfree(objspace, ptr);
-	return 0;
-    }
-    if (ruby_gc_stress && !ruby_disable_gc_stress)
-	garbage_collect_with_gvl(objspace);
-
-#if CALC_EXACT_MALLOC_SIZE
-    size += sizeof(size_t);
-    ptr = (size_t *)ptr - 1;
-    oldsize = ((size_t *)ptr)[0];
-#endif
-
-    mem = realloc(ptr, size);
-    if (!mem) {
-	if (garbage_collect_with_gvl(objspace)) {
-	    mem = realloc(ptr, size);
-	}
-	if (!mem) {
-	    ruby_memerror();
-        }
-    }
-    ATOMIC_SIZE_ADD(malloc_increase, size);
-
-#if CALC_EXACT_MALLOC_SIZE
-    ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, size - oldsize);
-    ((size_t *)mem)[0] = size;
-    mem = (size_t *)mem + 1;
-#endif
-
-    return mem;
-}
-
-static void
-vm_xfree(rb_objspace_t *objspace, void *ptr)
-{
-#if CALC_EXACT_MALLOC_SIZE
-    size_t size;
-    ptr = ((size_t *)ptr) - 1;
-    size = ((size_t*)ptr)[0];
-    if (size) {
-	ATOMIC_SIZE_SUB(objspace->malloc_params.allocated_size, size);
-	ATOMIC_SIZE_DEC(objspace->malloc_params.allocations);
-    }
-#endif
-
-    free(ptr);
-}
-
-void *
-ruby_xmalloc(size_t size)
-{
-    return vm_xmalloc(&rb_objspace, size);
-}
-
-static inline size_t
-xmalloc2_size(size_t n, size_t size)
-{
-    size_t len = size * n;
-    if (n != 0 && size != len / n) {
-	rb_raise(rb_eArgError, "malloc: possible integer overflow");
-    }
-    return len;
-}
-
-void *
-ruby_xmalloc2(size_t n, size_t size)
-{
-    return vm_xmalloc(&rb_objspace, xmalloc2_size(n, size));
-}
-
-static void *
-vm_xcalloc(rb_objspace_t *objspace, size_t count, size_t elsize)
-{
-    void *mem;
-    size_t size;
-
-    size = xmalloc2_size(count, elsize);
-    size = vm_malloc_prepare(objspace, size);
-
-    TRY_WITH_GC(mem = calloc(1, size));
-    return vm_malloc_fixup(objspace, mem, size);
-}
-
-void *
-ruby_xcalloc(size_t n, size_t size)
-{
-    return vm_xcalloc(&rb_objspace, n, size);
-}
-
-void *
-ruby_xrealloc(void *ptr, size_t size)
-{
-    return vm_xrealloc(&rb_objspace, ptr, size);
-}
-
-void *
-ruby_xrealloc2(void *ptr, size_t n, size_t size)
-{
-    size_t len = size * n;
-    if (n != 0 && size != len / n) {
-	rb_raise(rb_eArgError, "realloc: possible integer overflow");
-    }
-    return ruby_xrealloc(ptr, len);
-}
-
-void
-ruby_xfree(void *x)
-{
-    if (x)
-	vm_xfree(&rb_objspace, x);
-}
-
-
-/* Mimic ruby_xmalloc, but need not rb_objspace.
- * should return pointer suitable for ruby_xfree
- */
-void *
-ruby_mimmalloc(size_t size)
-{
-    void *mem;
-#if CALC_EXACT_MALLOC_SIZE
-    size += sizeof(size_t);
-#endif
-    mem = malloc(size);
-#if CALC_EXACT_MALLOC_SIZE
-    /* set 0 for consistency of allocated_size/allocations */
-    ((size_t *)mem)[0] = 0;
-    mem = (size_t *)mem + 1;
-#endif
-    return mem;
-}
-
-/*
- *  call-seq:
- *     GC.enable    -> true or false
- *
- *  Enables garbage collection, returning <code>true</code> if garbage
- *  collection was previously disabled.
- *
- *     GC.disable   #=> false
- *     GC.enable    #=> true
- *     GC.enable    #=> false
- *
- */
-
-VALUE
-rb_gc_enable(void)
-{
-    rb_objspace_t *objspace = &rb_objspace;
-    int old = dont_gc;
-
-    dont_gc = FALSE;
-    return old ? Qtrue : Qfalse;
-}
-
-/*
- *  call-seq:
- *     GC.disable    -> true or false
- *
- *  Disables garbage collection, returning <code>true</code> if garbage
- *  collection was already disabled.
- *
- *     GC.disable   #=> false
- *     GC.disable   #=> true
- *
- */
-
-VALUE
-rb_gc_disable(void)
-{
-    rb_objspace_t *objspace = &rb_objspace;
-    int old = dont_gc;
-
-    dont_gc = TRUE;
-    return old ? Qtrue : Qfalse;
-}
-
-VALUE rb_mGC;
-
-void
-rb_gc_register_mark_object(VALUE obj)
-{
-    VALUE ary = GET_THREAD()->vm->mark_object_ary;
-    rb_ary_push(ary, obj);
-}
-
-void
-rb_gc_register_address(VALUE *addr)
-{
-    rb_objspace_t *objspace = &rb_objspace;
-    struct gc_list *tmp;
-
-    tmp = ALLOC(struct gc_list);
-    tmp->next = global_List;
-    tmp->varptr = addr;
-    global_List = tmp;
-}
-
-void
-rb_gc_unregister_address(VALUE *addr)
-{
-    rb_objspace_t *objspace = &rb_objspace;
-    struct gc_list *tmp = global_List;
-
-    if (tmp->varptr == addr) {
-	global_List = tmp->next;
-	xfree(tmp);
-	return;
-    }
-    while (tmp->next) {
-	if (tmp->next->varptr == addr) {
-	    struct gc_list *t = tmp->next;
-
-	    tmp->next = tmp->next->next;
-	    xfree(t);
-	    break;
-	}
-	tmp = tmp->next;
-    }
-}
-
-static void
 allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
 {
     struct sorted_heaps_slot *p;
@@ -955,56 +460,7 @@
     }
 }
 
-static void *
-aligned_malloc(size_t alignment, size_t size)
-{
-    void *res;
-
-#if defined __MINGW32__
-    res = __mingw_aligned_malloc(size, alignment);
-#elif defined _WIN32 && !defined __CYGWIN__
-    res = _aligned_malloc(size, alignment);
-#elif defined(HAVE_POSIX_MEMALIGN)
-    if (posix_memalign(&res, alignment, size) == 0) {
-        return res;
-    }
-    else {
-        return NULL;
-    }
-#elif defined(HAVE_MEMALIGN)
-    res = memalign(alignment, size);
-#else
-    char* aligned;
-    res = malloc(alignment + size + sizeof(void*));
-    aligned = (char*)res + alignment + sizeof(void*);
-    aligned -= ((VALUE)aligned & (alignment - 1));
-    ((void**)aligned)[-1] = res;
-    res = (void*)aligned;
-#endif
-
-#if defined(_DEBUG) || defined(GC_DEBUG)
-    /* alignment must be a power of 2 */
-    assert((alignment - 1) & alignment == 0);
-    assert(alignment % sizeof(void*) == 0);
-#endif
-    return res;
-}
-
 static void
-aligned_free(void *ptr)
-{
-#if defined __MINGW32__
-    __mingw_aligned_free(ptr);
-#elif defined _WIN32 && !defined __CYGWIN__
-    _aligned_free(ptr);
-#elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
-    free(ptr);
-#else
-    free(((void**)ptr)[-1]);
-#endif
-}
-
-static void
 link_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
 {
     slot->free_next = objspace->heap.free_slots;
@@ -1172,16 +628,6 @@
     return FALSE;
 }
 
-int
-rb_during_gc(void)
-{
-    rb_objspace_t *objspace = &rb_objspace;
-    return during_gc;
-}
-
-#define RANY(o) ((RVALUE*)(o))
-#define has_free_object (objspace->heap.free_slots && objspace->heap.free_slots->freelist)
-
 VALUE
 rb_newobj(void)
 {
@@ -1290,124 +736,9 @@
     }
 }
 
-#ifdef __ia64
-#define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp())
-#else
-#define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end)
-#endif
-
-#define STACK_START (th->machine_stack_start)
-#define STACK_END (th->machine_stack_end)
-#define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE))
-
-#if STACK_GROW_DIRECTION < 0
-# define STACK_LENGTH  (size_t)(STACK_START - STACK_END)
-#elif STACK_GROW_DIRECTION > 0
-# define STACK_LENGTH  (size_t)(STACK_END - STACK_START + 1)
-#else
-# define STACK_LENGTH  ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
-			: (size_t)(STACK_END - STACK_START + 1))
-#endif
-#if !STACK_GROW_DIRECTION
-int ruby_stack_grow_direction;
-int
-ruby_get_stack_grow_direction(volatile VALUE *addr)
-{
-    VALUE *end;
-    SET_MACHINE_STACK_END(&end);
-
-    if (end > addr) return ruby_stack_grow_direction = 1;
-    return ruby_stack_grow_direction = -1;
-}
-#endif
-
-#define GC_LEVEL_MAX 250
-#define STACKFRAME_FOR_GC_MARK (GC_LEVEL_MAX * GC_MARK_STACKFRAME_WORD)
-
-size_t
-ruby_stack_length(VALUE **p)
-{
-    rb_thread_t *th = GET_THREAD();
-    SET_STACK_END;
-    if (p) *p = STACK_UPP (... truncated)

--
ML: ruby-changes@q...
Info: http://www.atdot.net/~ko1/quickml/

[前][次][番号順一覧][スレッド一覧]