ruby-changes:71076
From: Peter <ko1@a...>
Date: Wed, 2 Feb 2022 23:52:16 +0900 (JST)
Subject: [ruby-changes:71076] 7b77d46671 (master): Decouple GC slot sizes from RVALUE
https://git.ruby-lang.org/ruby.git/commit/?id=7b77d46671 From 7b77d46671685c837adc33b39ae0210e04cd8b24 Mon Sep 17 00:00:00 2001 From: Peter Zhu <peter@p...> Date: Tue, 1 Feb 2022 09:25:12 -0500 Subject: Decouple GC slot sizes from RVALUE Add a new macro BASE_SLOT_SIZE that determines the slot size. For Variable Width Allocation (compiled with USE_RVARGC=1), all slot sizes are powers-of-2 multiples of BASE_SLOT_SIZE. For USE_RVARGC=0, BASE_SLOT_SIZE is set to sizeof(RVALUE). --- gc.c | 157 ++++++++++++++++++++----------------- test/-ext-/string/test_capacity.rb | 9 ++- test/-ext-/string/test_set_len.rb | 8 +- test/objspace/test_objspace.rb | 2 +- test/ruby/test_file_exhaustive.rb | 4 +- test/ruby/test_gc.rb | 2 +- test/ruby/test_time.rb | 20 ++--- 7 files changed, 109 insertions(+), 93 deletions(-) diff --git a/gc.c b/gc.c index b06ebac446..4eaceef807 100644 --- a/gc.c +++ b/gc.c @@ -676,7 +676,7 @@ typedef struct rb_heap_struct { https://github.com/ruby/ruby/blob/trunk/gc.c#L676 struct list_head pages; struct heap_page *sweeping_page; /* iterator for .pages */ struct heap_page *compact_cursor; - RVALUE * compact_cursor_index; + uintptr_t compact_cursor_index; #if GC_ENABLE_INCREMENTAL_MARK struct heap_page *pooled_pages; #endif @@ -752,7 +752,7 @@ typedef struct rb_objspace { https://github.com/ruby/ruby/blob/trunk/gc.c#L752 size_t allocated_pages; size_t allocatable_pages; size_t sorted_length; - RVALUE *range[2]; + uintptr_t range[2]; size_t freeable_pages; /* final */ @@ -865,13 +865,16 @@ typedef struct rb_objspace { https://github.com/ruby/ruby/blob/trunk/gc.c#L865 /* default tiny heap size: 16KB */ #define HEAP_PAGE_ALIGN_LOG 14 #endif + +#define BASE_SLOT_SIZE sizeof(RVALUE) + #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod)) enum { HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG), HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)), HEAP_PAGE_SIZE = HEAP_PAGE_ALIGN, - HEAP_PAGE_OBJ_LIMIT = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header))/sizeof(struct RVALUE)), - HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE, sizeof(struct RVALUE)), BITS_BITLENGTH), + HEAP_PAGE_OBJ_LIMIT = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header)) / BASE_SLOT_SIZE), + HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE, BASE_SLOT_SIZE), BITS_BITLENGTH), HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT), }; #define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG) @@ -910,7 +913,7 @@ struct heap_page { https://github.com/ruby/ruby/blob/trunk/gc.c#L913 rb_size_pool_t *size_pool; struct heap_page *free_next; - RVALUE *start; + uintptr_t start; RVALUE *freelist; struct list_node page_node; @@ -928,7 +931,7 @@ struct heap_page { https://github.com/ruby/ruby/blob/trunk/gc.c#L931 #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header) #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page) -#define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE)) +#define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK) / BASE_SLOT_SIZE) #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH ) #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1)) #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p)) @@ -1062,7 +1065,7 @@ heap_allocatable_slots(rb_objspace_t *objspace) https://github.com/ruby/ruby/blob/trunk/gc.c#L1065 size_t count = 0; for (int i = 0; i < SIZE_POOL_COUNT; i++) { rb_size_pool_t *size_pool = &size_pools[i]; - int slot_size_multiple = size_pool->slot_size / sizeof(RVALUE); + int slot_size_multiple = size_pool->slot_size / BASE_SLOT_SIZE; count += size_pool->allocatable_pages * HEAP_PAGE_OBJ_LIMIT / slot_size_multiple; } return count; @@ -1396,8 +1399,8 @@ check_rvalue_consistency_force(const VALUE obj, int terminate) https://github.com/ruby/ruby/blob/trunk/gc.c#L1399 for (int i = 0; i < SIZE_POOL_COUNT; i++) { rb_size_pool_t *size_pool = &size_pools[i]; list_for_each(&size_pool->tomb_heap.pages, page, page_node) { - if (&page->start[0] <= (RVALUE *)obj && - (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * size_pool->slot_size))) { + if (page->start <= (uintptr_t)obj && + (uintptr_t)obj < (page->start + (page->total_slots * size_pool->slot_size))) { fprintf(stderr, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n", (void *)obj, (void *)page); err++; @@ -1738,7 +1741,7 @@ rb_objspace_alloc(void) https://github.com/ruby/ruby/blob/trunk/gc.c#L1741 for (int i = 0; i < SIZE_POOL_COUNT; i++) { rb_size_pool_t *size_pool = &size_pools[i]; - size_pool->slot_size = sizeof(RVALUE) * (1 << i); + size_pool->slot_size = (1 << i) * BASE_SLOT_SIZE; list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages); list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages); @@ -1865,9 +1868,9 @@ heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj https://github.com/ruby/ruby/blob/trunk/gc.c#L1868 if (RGENGC_CHECK_MODE && /* obj should belong to page */ - !(&page->start[0] <= (RVALUE *)obj && + !(page->start <= (uintptr_t)obj && (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * page->slot_size)) && - obj % sizeof(RVALUE) == 0)) { + obj % BASE_SLOT_SIZE == 0)) { rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)p); } @@ -1956,8 +1959,8 @@ heap_pages_free_unused_pages(rb_objspace_t *objspace) https://github.com/ruby/ruby/blob/trunk/gc.c#L1959 struct heap_page *hipage = heap_pages_sorted[heap_allocated_pages - 1]; uintptr_t himem = (uintptr_t)hipage->start + (hipage->total_slots * hipage->slot_size); - GC_ASSERT(himem <= (uintptr_t)heap_pages_himem); - heap_pages_himem = (RVALUE *)himem; + GC_ASSERT(himem <= heap_pages_himem); + heap_pages_himem = himem; GC_ASSERT(j == heap_allocated_pages); } @@ -1989,8 +1992,8 @@ heap_page_allocate(rb_objspace_t *objspace, rb_size_pool_t *size_pool) https://github.com/ruby/ruby/blob/trunk/gc.c#L1992 /* adjust obj_limit (object number available in this page) */ start = (uintptr_t)((VALUE)page_body + sizeof(struct heap_page_header)); - if ((VALUE)start % sizeof(RVALUE) != 0) { - int delta = (int)sizeof(RVALUE) - (start % (int)sizeof(RVALUE)); + if ((VALUE)start % BASE_SLOT_SIZE != 0) { + int delta = BASE_SLOT_SIZE - (start % BASE_SLOT_SIZE); start = start + delta; GC_ASSERT(NUM_IN_PAGE(start) == 0 || NUM_IN_PAGE(start) == 1); @@ -1999,10 +2002,10 @@ heap_page_allocate(rb_objspace_t *objspace, rb_size_pool_t *size_pool) https://github.com/ruby/ruby/blob/trunk/gc.c#L2002 * In other words, ensure there are an even number of objects * per bit plane. */ if (NUM_IN_PAGE(start) == 1) { - start += stride - sizeof(RVALUE); + start += stride - BASE_SLOT_SIZE; } - GC_ASSERT(NUM_IN_PAGE(start) * sizeof(RVALUE) % stride == 0); + GC_ASSERT(NUM_IN_PAGE(start) * BASE_SLOT_SIZE % stride == 0); limit = (HEAP_PAGE_SIZE - (int)(start - (uintptr_t)page_body))/(int)stride; } @@ -2046,10 +2049,10 @@ heap_page_allocate(rb_objspace_t *objspace, rb_size_pool_t *size_pool) https://github.com/ruby/ruby/blob/trunk/gc.c#L2049 heap_allocated_pages, heap_pages_sorted_length); } - if (heap_pages_lomem == 0 || (uintptr_t)heap_pages_lomem > start) heap_pages_lomem = (RVALUE *)start; - if ((uintptr_t)heap_pages_himem < end) heap_pages_himem = (RVALUE *)end; + if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start; + if (heap_pages_himem < end) heap_pages_himem = end; - page->start = (RVALUE *)start; + page->start = start; page->total_slots = limit; page->slot_size = size_pool->slot_size; page->size_pool = size_pool; @@ -2352,7 +2355,7 @@ size_pool_slot_size(unsigned char pool_id) https://github.com/ruby/ruby/blob/trunk/gc.c#L2355 { GC_ASSERT(pool_id < SIZE_POOL_COUNT); - size_t slot_size = (1 << pool_id) * sizeof(RVALUE); + size_t slot_size = (1 << pool_id) * BASE_SLOT_SIZE; #if RGENGC_CHECK_MODE rb_objspace_t *objspace = &rb_objspace; @@ -2458,14 +2461,21 @@ static inline size_t https://github.com/ruby/ruby/blob/trunk/gc.c#L2461 size_pool_idx_for_size(size_t size) { #if USE_RVARGC - size_t slot_count = CEILDIV(size, sizeof(RVALUE)); + size_t slot_count = CEILDIV(size, BASE_SLOT_SIZE); /* size_pool_idx is ceil(log2(slot_count)) */ size_t size_pool_idx = 64 - nlz_int64(slot_count - 1); + if (size_pool_idx >= SIZE_POOL_COUNT) { rb_bug("size_pool_idx_for_size: allocation size too large"); } +#if RGENGC_CHECK_MODE + rb_objspace_t *objspace = &rb_objspace; + GC_ASSERT(size <= (size_t)size_pools[size_pool_idx].slot_size); + if (size_pool_idx > 0) GC_ASSERT(size > (size_t)size_pools[size_pool_idx - 1].slot_size); +#endif + return size_pool_idx; #else GC_ASSERT(size <= sizeof(RVALUE)); @@ -2853,7 +2863,7 @@ PUREFUNC(static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr https://github.com/ruby/ruby/blob/trunk/gc.c#L2863 static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr) { - register RVALUE *p = RANY(ptr); + register uintptr_t p = (uintptr_t)ptr; register struct heap_page *page; RB_DEBUG_COUNTER_INC(gc_isptr_trial); @@ -2861,7 +2871,7 @@ is_pointer_to_heap(rb_objspace_t *objspace, void *ptr) https://github.com/ruby/ruby/blob/trunk/gc.c#L2871 if (p < heap_pages_lom (... truncated) -- ML: ruby-changes@q... Info: http://www.atdot.net/~ko1/quickml/