[前][次][番号順一覧][スレッド一覧]

ruby-changes:63506

From: Aaron <ko1@a...>
Date: Tue, 3 Nov 2020 07:43:34 +0900 (JST)
Subject: [ruby-changes:63506] 67b2c21c32 (master): Add `GC.auto_compact= true/false` and `GC.auto_compact`

https://git.ruby-lang.org/ruby.git/commit/?id=67b2c21c32

From 67b2c21c327c96d80b8a0fe02a96d417e85293e8 Mon Sep 17 00:00:00 2001
From: Aaron Patterson <tenderlove@r...>
Date: Mon, 2 Nov 2020 14:40:29 -0800
Subject: Add `GC.auto_compact= true/false` and `GC.auto_compact`

* `GC.auto_compact=`, `GC.auto_compact` can be used to control when
  compaction runs.  Setting `auto_compact=` to true will cause
  compaction to occurr duing major collections.  At the moment,
  compaction adds significant overhead to major collections, so please
  test first!

[Feature #17176]

diff --git a/NEWS.md b/NEWS.md
index 2fa1b3d..ecf9c77 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -262,6 +262,16 @@ Outstanding ones only. https://github.com/ruby/ruby/blob/trunk/NEWS.md#L262
         * Warning#warn now supports a category kwarg.
         [[Feature #17122]]
 
+* GC
+    * New method
+
+        * `GC.auto_compact=`, `GC.auto_compact` can be used to control when
+          compaction runs.  Setting `auto_compact=` to true will cause
+          compaction to occurr duing major collections.  At the moment,
+          compaction adds significant overhead to major collections, so please
+          test first!
+          [[Feature #17176]]
+
 ## Stdlib updates
 
 Outstanding ones only.
diff --git a/compile.c b/compile.c
index bf2c4f8..f6f5a1d 100644
--- a/compile.c
+++ b/compile.c
@@ -1304,6 +1304,9 @@ new_insn_send(rb_iseq_t *iseq, int line_no, ID id, VALUE argc, const rb_iseq_t * https://github.com/ruby/ruby/blob/trunk/compile.c#L1304
     VALUE *operands = compile_data_calloc2(iseq, sizeof(VALUE), 2);
     operands[0] = (VALUE)new_callinfo(iseq, id, FIX2INT(argc), FIX2INT(flag), keywords, blockiseq != NULL);
     operands[1] = (VALUE)blockiseq;
+    if (blockiseq) {
+        RB_OBJ_WRITTEN(iseq, Qundef, blockiseq);
+    }
     return new_insn_core(iseq, line_no, BIN(send), 2, operands);
 }
 
diff --git a/gc.c b/gc.c
index af7fcef..6587689 100644
--- a/gc.c
+++ b/gc.c
@@ -19,6 +19,15 @@ https://github.com/ruby/ruby/blob/trunk/gc.c#L19
 # include "ruby/ruby.h"
 #endif
 
+#include <signal.h>
+
+#define sighandler_t ruby_sighandler_t
+
+#ifndef _WIN32
+#include <unistd.h>
+#include <sys/mman.h>
+#endif
+
 #include <setjmp.h>
 #include <stdarg.h>
 #include <stdio.h>
@@ -482,6 +491,7 @@ typedef enum { https://github.com/ruby/ruby/blob/trunk/gc.c#L491
     GPR_FLAG_HAVE_FINALIZE     = 0x4000,
     GPR_FLAG_IMMEDIATE_MARK    = 0x8000,
     GPR_FLAG_FULL_MARK        = 0x10000,
+    GPR_FLAG_COMPACT          = 0x20000,
 
     GPR_DEFAULT_REASON =
         (GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
@@ -497,6 +507,7 @@ typedef struct gc_profile_record { https://github.com/ruby/ruby/blob/trunk/gc.c#L507
     size_t heap_total_objects;
     size_t heap_use_size;
     size_t heap_total_size;
+    size_t moved_objects;
 
 #if GC_PROFILE_MORE_DETAIL
     double gc_mark_time;
@@ -529,13 +540,12 @@ typedef struct gc_profile_record { https://github.com/ruby/ruby/blob/trunk/gc.c#L540
 #endif
 } gc_profile_record;
 
+#define FL_FROM_FREELIST FL_USER0
+
 struct RMoved {
     VALUE flags;
+    VALUE dummy;
     VALUE destination;
-    union {
-        struct list_node node;
-        struct list_head head;
-    } as;
 };
 
 #define RMOVED(obj) ((struct RMoved *)(obj))
@@ -641,6 +651,8 @@ typedef struct rb_heap_struct { https://github.com/ruby/ruby/blob/trunk/gc.c#L651
     struct heap_page *using_page;
     struct list_head pages;
     struct heap_page *sweeping_page; /* iterator for .pages */
+    struct heap_page *compact_cursor;
+    size_t compact_cursor_index;
 #if GC_ENABLE_INCREMENTAL_MARK
     struct heap_page *pooled_pages;
 #endif
@@ -724,6 +736,7 @@ typedef struct rb_objspace { https://github.com/ruby/ruby/blob/trunk/gc.c#L736
 	size_t minor_gc_count;
 	size_t major_gc_count;
 	size_t compact_count;
+	size_t read_barrier_faults;
 #if RGENGC_PROFILE > 0
 	size_t total_generated_normal_object_count;
 	size_t total_generated_shady_object_count;
@@ -780,6 +793,7 @@ typedef struct rb_objspace { https://github.com/ruby/ruby/blob/trunk/gc.c#L793
     struct {
         size_t considered_count_table[T_MASK];
         size_t moved_count_table[T_MASK];
+        size_t total_moved;
     } rcompactor;
 
 #if GC_ENABLE_INCREMENTAL_MARK
@@ -969,6 +983,7 @@ int ruby_gc_debug_indent = 0; https://github.com/ruby/ruby/blob/trunk/gc.c#L983
 #endif
 VALUE rb_mGC;
 int ruby_disable_gc = 0;
+int ruby_enable_autocompact = 0;
 
 void rb_iseq_mark(const rb_iseq_t *iseq);
 void rb_iseq_update_references(rb_iseq_t *iseq);
@@ -3801,6 +3816,7 @@ is_live_object(rb_objspace_t *objspace, VALUE ptr) https://github.com/ruby/ruby/blob/trunk/gc.c#L3816
 {
     switch (BUILTIN_TYPE(ptr)) {
       case T_NONE:
+      case T_MOVED:
       case T_ZOMBIE:
 	return FALSE;
       default:
@@ -3948,8 +3964,10 @@ cached_object_id(VALUE obj) https://github.com/ruby/ruby/blob/trunk/gc.c#L3964
         id = objspace->next_object_id;
         objspace->next_object_id = rb_int_plus(id, INT2FIX(OBJ_ID_INCREMENT));
 
+        VALUE already_disabled = rb_gc_disable_no_rest();
         st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)id);
         st_insert(objspace->id_to_obj_tbl, (st_data_t)id, (st_data_t)obj);
+        if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
         FL_SET(obj, FL_SEEN_OBJ_ID);
 
         return id;
@@ -4361,16 +4379,370 @@ gc_setup_mark_bits(struct heap_page *page) https://github.com/ruby/ruby/blob/trunk/gc.c#L4379
     memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
 }
 
+static int gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj);
+static VALUE gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free);
+
+static void
+lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
+{
+#if defined(_WIN32)
+    DWORD old_protect;
+
+    if (!VirtualProtect(body, HEAP_PAGE_SIZE, PAGE_NOACCESS, &old_protect)) {
+#else
+    if(mprotect(body, HEAP_PAGE_SIZE, PROT_NONE)) {
+#endif
+        rb_bug("Couldn't protect page %p", (void *)body);
+    } else {
+        gc_report(5, objspace, "Protecting page in move %p\n", (void *)body);
+    }
+}
+
+static void
+unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
+{
+#if defined(_WIN32)
+    DWORD old_protect;
+
+    if (!VirtualProtect(body, HEAP_PAGE_SIZE, PAGE_READWRITE, &old_protect)) {
+#else
+    if(mprotect(body, HEAP_PAGE_SIZE, PROT_READ | PROT_WRITE)) {
+#endif
+        rb_bug("Couldn't unprotect page %p", (void *)body);
+    } else {
+        gc_report(5, objspace, "Unprotecting page in move %p\n", (void *)body);
+    }
+}
+
+static short
+try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page, VALUE dest)
+{
+    struct heap_page * cursor = heap->compact_cursor;
+    char from_freelist = 0;
+
+    GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest));
+
+    /* T_NONE objects came from the free list.  If the object is *not* a
+     * T_NONE, it is an object that just got freed but hasn't been
+     * added to the freelist yet */
+
+    if (BUILTIN_TYPE(dest) == T_NONE) {
+        from_freelist = 1;
+    }
+
+    while(1) {
+        size_t index = heap->compact_cursor_index;
+
+        bits_t *mark_bits = cursor->mark_bits;
+        bits_t *pin_bits = cursor->pinned_bits;
+        RVALUE * p = cursor->start;
+        RVALUE * offset = p - NUM_IN_PAGE(p);
+
+        /* Find an object to move and move it. Movable objects must be
+         * marked, so we iterate using the marking bitmap */
+        for (size_t i = index; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
+            bits_t bits = mark_bits[i] & ~pin_bits[i];
+
+            if (bits) {
+                p = offset + i * BITS_BITLENGTH;
+
+                do {
+                    if (bits & 1) {
+                        /* We're trying to move "p" */
+                        objspace->rcompactor.considered_count_table[BUILTIN_TYPE((VALUE)p)]++;
+
+                        if (gc_is_moveable_obj(objspace, (VALUE)p)) {
+                            /* We were able to move "p" */
+                            objspace->rcompactor.moved_count_table[BUILTIN_TYPE((VALUE)p)]++;
+                            objspace->rcompactor.total_moved++;
+                            gc_move(objspace, (VALUE)p, dest);
+                            gc_pin(objspace, (VALUE)p);
+                            heap->compact_cursor_index = i;
+                            if (from_freelist) {
+                                FL_SET((VALUE)p, FL_FROM_FREELIST);
+                            }
+
+                            return 1;
+                        }
+                    }
+                    p++;
+                    bits >>= 1;
+                } while (bits);
+            }
+        }
+
+        /* We couldn't find a movable object on the compact cursor, so lets
+         * move to the next page (previous page since we are traveling in the
+         * opposite direction of the sweep cursor) and look there. */
+
+        struct heap_page * next;
+
+        next = list_prev(&heap->pages, cursor, page_node);
+
+        /* Protect the current cursor since it probably has T_MOVED slots. */
+        lock_page_body(objspace, GET_PAGE_BODY(cursor->start));
+
+        heap->compact_cursor = next;
+        heap->compact_cursor_index = 0;
+        cursor = next;
+
+        // Cursors have met, lets quit.  We set `heap->compact_cursor` equal
+        // to `heap->sweeping_page` so we know how far to iterate through
+        // the heap when unprotecting pages.
+        if (next == sweep_page) {
+            break;
+        }
+    }
+
+    return 0;
+}
+
+static void
+gc_unprotect_pages(rb_objspace_t *objspace, rb_heap_t *heap)
+{
+    struct heap_page *cursor = heap->compact_cursor;
+
+    while(cursor) {
+        unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
+        cursor = list_next(&heap->pages, cursor, page_node);
+    }
+}
+
+static void gc_update_references(rb_objspace_t * objspace, rb_heap_t *heap);
+static void invali (... truncated)

--
ML: ruby-changes@q...
Info: http://www.atdot.net/~ko1/quickml/

[前][次][番号順一覧][スレッド一覧]