[前][次][番号順一覧][スレッド一覧]

ruby-changes:65650

From: Peter <ko1@a...>
Date: Thu, 25 Mar 2021 03:31:39 +0900 (JST)
Subject: [ruby-changes:65650] b25361f731 (master): Change heap walking to be safe for object allocation

https://git.ruby-lang.org/ruby.git/commit/?id=b25361f731

From b25361f7319cac953145d9d15e2e673e560ec3d9 Mon Sep 17 00:00:00 2001
From: Peter Zhu <peter@p...>
Date: Fri, 12 Mar 2021 19:36:58 +0000
Subject: Change heap walking to be safe for object allocation

---
 gc.c                          | 130 ++++++++++++++++++++++++++++--------------
 test/ruby/test_objectspace.rb |   7 +++
 2 files changed, 93 insertions(+), 44 deletions(-)

diff --git a/gc.c b/gc.c
index f7deedc..8218f88 100644
--- a/gc.c
+++ b/gc.c
@@ -3216,53 +3216,67 @@ Init_gc_stress(void) https://github.com/ruby/ruby/blob/trunk/gc.c#L3216
 
 typedef int each_obj_callback(void *, void *, size_t, void *);
 
-static void objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data);
+static void objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected);
 static void objspace_reachable_objects_from_root(rb_objspace_t *, void (func)(const char *, VALUE, void *), void *);
 
-struct each_obj_args {
+struct each_obj_data {
     rb_objspace_t *objspace;
+    bool reenable_incremental;
+
     each_obj_callback *callback;
     void *data;
+
+    struct heap_page **pages;
+    size_t pages_count;
 };
 
-static void
-objspace_each_objects_without_setup(rb_objspace_t *objspace, each_obj_callback *callback, void *data)
+static VALUE
+objspace_each_objects_ensure(VALUE arg)
 {
-    size_t i;
-    struct heap_page *page;
-    RVALUE *pstart = NULL, *pend;
-
-    i = 0;
-    while (i < heap_allocated_pages) {
-	while (0 < i && pstart < heap_pages_sorted[i-1]->start)              i--;
-	while (i < heap_allocated_pages && heap_pages_sorted[i]->start <= pstart) i++;
-	if (heap_allocated_pages <= i) break;
-
-	page = heap_pages_sorted[i];
-
-	pstart = page->start;
-	pend = pstart + page->total_slots;
+    struct each_obj_data *data = (struct each_obj_data *)arg;
+    rb_objspace_t *objspace = data->objspace;
 
-        if ((*callback)(pstart, pend, sizeof(RVALUE), data)) {
-	    break;
-	}
+    /* Reenable incremental GC */
+    if (data->reenable_incremental) {
+        objspace->flags.dont_incremental = FALSE;
     }
-}
 
-static VALUE
-objspace_each_objects_protected(VALUE arg)
-{
-    struct each_obj_args *args = (struct each_obj_args *)arg;
-    objspace_each_objects_without_setup(args->objspace, args->callback, args->data);
+    /* Free pages buffer */
+    struct heap_page **pages = data->pages;
+    GC_ASSERT(pages);
+    free(pages);
+
     return Qnil;
 }
 
 static VALUE
-incremental_enable(VALUE _)
+objspace_each_objects_try(VALUE arg)
 {
-    rb_objspace_t *objspace = &rb_objspace;
+    struct each_obj_data *data = (struct each_obj_data *)arg;
+    rb_objspace_t *objspace = data->objspace;
+    struct heap_page **pages = data->pages;
+    size_t pages_count = data->pages_count;
+
+    struct heap_page *page = list_top(&heap_eden->pages, struct heap_page, page_node);
+    for (size_t i = 0; i < pages_count; i++) {
+        /* If we have reached the end of the linked list then there are no
+         * more pages, so break. */
+        if (page == NULL) break;
+
+        /* If this page does not match the one in the buffer, then move to
+         * the next page in the buffer. */
+        if (pages[i] != page) continue;
+
+        RVALUE *pstart = page->start;
+        RVALUE *pend = pstart + page->total_slots;
+
+        if ((*data->callback)(pstart, pend, sizeof(RVALUE), data->data)) {
+            break;
+        }
+
+        page = list_next(&heap_eden->pages, page, page_node);
+    }
 
-    objspace->flags.dont_incremental = FALSE;
     return Qnil;
 }
 
@@ -3305,30 +3319,58 @@ incremental_enable(VALUE _) https://github.com/ruby/ruby/blob/trunk/gc.c#L3319
 void
 rb_objspace_each_objects(each_obj_callback *callback, void *data)
 {
-    objspace_each_objects(&rb_objspace, callback, data);
+    objspace_each_objects(&rb_objspace, callback, data, TRUE);
 }
 
 static void
-objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data)
+objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected)
 {
-    int prev_dont_incremental = objspace->flags.dont_incremental;
+    /* Disable incremental GC */
+    bool reenable_incremental = FALSE;
+    if (protected) {
+        reenable_incremental = !objspace->flags.dont_incremental;
 
-    gc_rest(objspace);
-    objspace->flags.dont_incremental = TRUE;
-
-    if (prev_dont_incremental) {
-        objspace_each_objects_without_setup(objspace, callback, data);
+        gc_rest(objspace);
+        objspace->flags.dont_incremental = TRUE;
     }
-    else {
-        struct each_obj_args args = {objspace, callback, data};
-        rb_ensure(objspace_each_objects_protected, (VALUE)&args, incremental_enable, Qnil);
+
+    /* Create pages buffer */
+    size_t size = size_mul_or_raise(heap_allocated_pages, sizeof(struct heap_page *), rb_eRuntimeError);
+    struct heap_page **pages = malloc(size);
+    if (!pages) rb_memerror();
+
+    /* Set up pages buffer by iterating over all pages in the current eden
+     * heap. This will be a snapshot of the state of the heap before we
+     * call the callback over each page that exists in this buffer. Thus it
+     * is safe for the callback to allocate objects without possibly entering
+     * an infinte loop. */
+    struct heap_page *page;
+    size_t pages_count = 0;
+    list_for_each(&heap_eden->pages, page, page_node) {
+        pages[pages_count] = page;
+        pages_count++;
     }
+    GC_ASSERT(pages_count <= heap_allocated_pages);
+
+    /* Run the callback */
+    struct each_obj_data each_obj_data = {
+        .objspace = objspace,
+        .reenable_incremental = reenable_incremental,
+
+        .callback = callback,
+        .data = data,
+
+        .pages = pages,
+        .pages_count = pages_count
+    };
+    rb_ensure(objspace_each_objects_try, (VALUE)&each_obj_data,
+              objspace_each_objects_ensure, (VALUE)&each_obj_data);
 }
 
 void
 rb_objspace_each_objects_without_setup(each_obj_callback *callback, void *data)
 {
-    objspace_each_objects_without_setup(&rb_objspace, callback, data);
+    objspace_each_objects(&rb_objspace, callback, data, FALSE);
 }
 
 struct os_each_struct {
@@ -7133,7 +7175,7 @@ gc_verify_internal_consistency_(rb_objspace_t *objspace) https://github.com/ruby/ruby/blob/trunk/gc.c#L7175
 
     /* check relations */
 
-    objspace_each_objects_without_setup(objspace, verify_internal_consistency_i, &data);
+    objspace_each_objects(objspace, verify_internal_consistency_i, &data, FALSE);
 
     if (data.err_count != 0) {
 #if RGENGC_CHECK_MODE >= 5
@@ -9545,7 +9587,7 @@ gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE do https://github.com/ruby/ruby/blob/trunk/gc.c#L9587
     gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qtrue);
 
     objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i, NULL);
-    objspace_each_objects(objspace, heap_check_moved_i, NULL);
+    objspace_each_objects(objspace, heap_check_moved_i, NULL, TRUE);
 
     return gc_compact_stats(ec, self);
 }
diff --git a/test/ruby/test_objectspace.rb b/test/ruby/test_objectspace.rb
index 02c20aa..b48fbc1 100644
--- a/test/ruby/test_objectspace.rb
+++ b/test/ruby/test_objectspace.rb
@@ -233,4 +233,11 @@ End https://github.com/ruby/ruby/blob/trunk/test/ruby/test_objectspace.rb#L233
     assert_kind_of(meta, sclass)
     assert_include(ObjectSpace.each_object(meta).to_a, sclass)
   end
+
+  def test_each_object_with_allocation
+    assert_normal_exit(<<-End)
+      list = []
+      ObjectSpace.each_object { |o| list << Object.new }
+    End
+  end
 end
-- 
cgit v1.1


--
ML: ruby-changes@q...
Info: http://www.atdot.net/~ko1/quickml/

[前][次][番号順一覧][スレッド一覧]