ruby-changes:67248
From: Peter <ko1@a...>
Date: Wed, 25 Aug 2021 22:28:37 +0900 (JST)
Subject: [ruby-changes:67248] 62bc4a9420 (master): [Feature #18045] Implement size classes for GC
https://git.ruby-lang.org/ruby.git/commit/?id=62bc4a9420 From 62bc4a9420fa5786d49391a713bd38b09b8db0ff Mon Sep 17 00:00:00 2001 From: Peter Zhu <peter@p...> Date: Tue, 24 Aug 2021 13:14:23 -0400 Subject: [Feature #18045] Implement size classes for GC This commits implements size classes in the GC for the Variable Width Allocation feature. Unless `USE_RVARGC` compile flag is set, only a single size class is created, maintaining current behaviour. See the redmine ticket for more details. Co-authored-by: Aaron Patterson <tenderlove@r...> --- class.c | 3 +- gc.c | 1078 ++++++++++++++++++++++++++++++++++++---------------- internal/gc.h | 2 +- misc/lldb_cruby.py | 7 +- 4 files changed, 755 insertions(+), 335 deletions(-) diff --git a/class.c b/class.c index 5fa51df..4c469ff 100644 --- a/class.c +++ b/class.c @@ -185,8 +185,7 @@ class_alloc(VALUE flags, VALUE klass) https://github.com/ruby/ruby/blob/trunk/class.c#L185 RVARGC_NEWOBJ_OF(obj, struct RClass, klass, (flags & T_MASK) | FL_PROMOTED1 /* start from age == 2 */ | (RGENGC_WB_PROTECTED_CLASS ? FL_WB_PROTECTED : 0), payload_size); #if USE_RVARGC - obj->ptr = (rb_classext_t *)rb_rvargc_payload_data_ptr((VALUE)obj + rb_slot_size()); - RB_OBJ_WRITTEN(obj, Qundef, (VALUE)obj + rb_slot_size()); + obj->ptr = (rb_classext_t *)rb_gc_rvargc_object_data((VALUE)obj); #else obj->ptr = ZALLOC(rb_classext_t); #endif diff --git a/gc.c b/gc.c index e410178..f7ce094 100644 --- a/gc.c +++ b/gc.c @@ -660,6 +660,14 @@ typedef struct mark_stack { https://github.com/ruby/ruby/blob/trunk/gc.c#L660 size_t unused_cache_size; } mark_stack_t; +#if USE_RVARGC +#define SIZE_POOL_COUNT 4 +#else +#define SIZE_POOL_COUNT 1 +#endif +#define SIZE_POOL_EDEN_HEAP(size_pool) (&(size_pool)->eden_heap) +#define SIZE_POOL_TOMB_HEAP(size_pool) (&(size_pool)->tomb_heap) + typedef struct rb_heap_struct { struct heap_page *free_pages; struct list_head pages; @@ -673,6 +681,29 @@ typedef struct rb_heap_struct { https://github.com/ruby/ruby/blob/trunk/gc.c#L681 size_t total_slots; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */ } rb_heap_t; +typedef struct rb_size_pool_struct { +#if USE_RVARGC + RVALUE *freelist; + struct heap_page *using_page; +#endif + + short slot_size; + + size_t allocatable_pages; + +#if USE_RVARGC + /* Sweeping statistics */ + size_t freed_slots; + size_t empty_slots; + + /* Global statistics */ + size_t force_major_gc_count; +#endif + + rb_heap_t eden_heap; + rb_heap_t tomb_heap; +} rb_size_pool_t; + enum gc_mode { gc_mode_none, gc_mode_marking, @@ -708,8 +739,7 @@ typedef struct rb_objspace { https://github.com/ruby/ruby/blob/trunk/gc.c#L739 size_t total_allocated_objects; VALUE next_object_id; - rb_heap_t eden_heap; - rb_heap_t tomb_heap; /* heap for zombies and ghosts */ + rb_size_pool_t size_pools[SIZE_POOL_COUNT]; struct { rb_atomic_t finalizing; @@ -869,6 +899,8 @@ struct heap_page { https://github.com/ruby/ruby/blob/trunk/gc.c#L899 unsigned int in_tomb : 1; } flags; + rb_size_pool_t *size_pool; + struct heap_page *free_next; RVALUE *start; RVALUE *freelist; @@ -921,12 +953,10 @@ VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress; https://github.com/ruby/ruby/blob/trunk/gc.c#L953 #define heap_pages_sorted_length objspace->heap_pages.sorted_length #define heap_pages_lomem objspace->heap_pages.range[0] #define heap_pages_himem objspace->heap_pages.range[1] -#define heap_allocatable_pages objspace->heap_pages.allocatable_pages #define heap_pages_freeable_pages objspace->heap_pages.freeable_pages #define heap_pages_final_slots objspace->heap_pages.final_slots #define heap_pages_deferred_final objspace->heap_pages.deferred_final -#define heap_eden (&objspace->eden_heap) -#define heap_tomb (&objspace->tomb_heap) +#define size_pools objspace->size_pools #define during_gc objspace->flags.during_gc #define finalizing objspace->atomic_flags.finalizing #define finalizer_table objspace->finalizer_table @@ -967,6 +997,69 @@ gc_mode_verify(enum gc_mode mode) https://github.com/ruby/ruby/blob/trunk/gc.c#L997 return mode; } +static inline bool +has_sweeping_pages(rb_objspace_t *objspace) +{ + for (int i = 0; i < SIZE_POOL_COUNT; i++) { + if (size_pools[i].eden_heap.sweeping_page) { + return TRUE; + } + } + return FALSE; +} + +static inline size_t +heap_eden_total_pages(rb_objspace_t *objspace) +{ + size_t count = 0; + for (int i = 0; i < SIZE_POOL_COUNT; i++) { + count += size_pools[i].eden_heap.total_pages; + } + return count; +} + +static inline size_t +heap_eden_total_slots(rb_objspace_t *objspace) +{ + size_t count = 0; + for (int i = 0; i < SIZE_POOL_COUNT; i++) { + count += size_pools[i].eden_heap.total_slots; + } + return count; +} + +static inline size_t +heap_tomb_total_pages(rb_objspace_t *objspace) +{ + size_t count = 0; + for (int i = 0; i < SIZE_POOL_COUNT; i++) { + count += size_pools[i].tomb_heap.total_pages; + } + return count; +} + +static inline size_t +heap_allocatable_pages(rb_objspace_t *objspace) +{ + size_t count = 0; + for (int i = 0; i < SIZE_POOL_COUNT; i++) { + count += size_pools[i].allocatable_pages; + } + return count; +} + +static inline size_t +heap_allocatable_slots(rb_objspace_t *objspace) +{ + size_t count = 0; + for (int i = 0; i < SIZE_POOL_COUNT; i++) { + rb_size_pool_t *size_pool = &size_pools[i]; + int slot_size_multiple = size_pool->slot_size / sizeof(RVALUE); + count += size_pool->allocatable_pages * HEAP_PAGE_OBJ_LIMIT / slot_size_multiple; + } + return count; +} + #define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode) #define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode)) @@ -983,8 +1076,7 @@ gc_mode_verify(enum gc_mode mode) https://github.com/ruby/ruby/blob/trunk/gc.c#L1076 #else #define will_be_incremental_marking(objspace) FALSE #endif -#define has_sweeping_pages(heap) ((heap)->sweeping_page != 0) -#define is_lazy_sweeping(heap) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(heap)) +#define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace)) #if SIZEOF_LONG == SIZEOF_VOIDP # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG) @@ -1056,14 +1148,14 @@ static void gc_marks(rb_objspace_t *objspace, int full_mark); https://github.com/ruby/ruby/blob/trunk/gc.c#L1148 static void gc_marks_start(rb_objspace_t *objspace, int full); static int gc_marks_finish(rb_objspace_t *objspace); static void gc_marks_rest(rb_objspace_t *objspace); -static void gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap); +static void gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap); static void gc_sweep(rb_objspace_t *objspace); static void gc_sweep_start(rb_objspace_t *objspace); static void gc_sweep_finish(rb_objspace_t *objspace); -static int gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap); +static int gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap); static void gc_sweep_rest(rb_objspace_t *objspace); -static void gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap); +static void gc_sweep_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap); static inline void gc_mark(rb_objspace_t *objspace, VALUE ptr); static inline void gc_pin(rb_objspace_t *objspace, VALUE ptr); @@ -1285,13 +1377,16 @@ check_rvalue_consistency_force(const VALUE obj, int terminate) https://github.com/ruby/ruby/blob/trunk/gc.c#L1377 else if (!is_pointer_to_heap(objspace, (void *)obj)) { /* check if it is in tomb_pages */ struct heap_page *page = NULL; - list_for_each(&heap_tomb->pages, page, page_node) { - if (&page->start[0] <= (RVALUE *)obj && - (RVALUE *)obj < &page->start[page->total_slots]) { - fprintf(stderr, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n", - (void *)obj, (void *)page); - err++; - goto skip; + for (int i = 0; i < SIZE_POOL_COUNT; i++) { + rb_size_pool_t *size_pool = &size_pools[i]; + list_for_each(&size_pool->tomb_heap.pages, page, page_node) { + if (&page->start[0] <= (RVALUE *)obj && + (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * size_pool->slot_size))) { + fprintf(stderr, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n", + (void *)obj, (void *)page); + err++; + goto skip; + } } } bp(); @@ -1622,8 +1717,16 @@ rb_objspace_alloc(void) https://github.com/ruby/ruby/blob/trunk/gc.c#L1717 { rb_objspace_t *objspace = calloc1(sizeof(rb_objspace_t)); malloc_limit = gc_params.malloc_limit_min; - list_head_init(&objspace->eden_heap.pages); - list_head_init(&objspace->tomb_heap.pages); + + for (int i = 0; i < SIZE_POOL_COUNT; i++) { + rb_size_pool_t *size_pool = &size_pools[i]; + + size_pool->slot_size = sizeof(RVALUE) * (1 << i); + + list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages); + list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages); + } + dont_gc_on(); return objspace; @@ -1635,7 +1738,7 @@ static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page); https://github.com/ruby/ruby/blob/trunk/gc.c#L1738 void rb_objspace_free(rb_objspace_t *objspace) { - if (is_lazy_s (... truncated) -- ML: ruby-changes@q... Info: http://www.atdot.net/~ko1/quickml/