[前][次][番号順一覧][スレッド一覧]

ruby-changes:62560

From: Jeremy <ko1@a...>
Date: Thu, 13 Aug 2020 03:03:46 +0900 (JST)
Subject: [ruby-changes:62560] f2d7461e85 (master): Improve performance of partial backtraces

https://git.ruby-lang.org/ruby.git/commit/?id=f2d7461e85

From f2d7461e85053cb084e10999b0b8019b0c29e66e Mon Sep 17 00:00:00 2001
From: Jeremy Evans <code@j...>
Date: Thu, 23 Jul 2020 14:10:04 -0700
Subject: Improve performance of partial backtraces

Previously, backtrace_each fully populated the rb_backtrace_t with all
backtrace frames, even if caller only requested a partial backtrace
(e.g. Kernel#caller_locations(1, 1)).  This changes backtrace_each to
only add the requested frames to the rb_backtrace_t.

To do this, backtrace_each needs to be passed the starting frame and
number of frames values passed to Kernel#caller or #caller_locations.

backtrace_each works from the top of the stack to the bottom, where the
bottom is the current frame.  Due to how the location for cfuncs is
tracked using the location of the previous iseq, we need to store an
extra frame for the previous iseq if we are limiting the backtrace and
final backtrace frame (the first one stored) would be a cfunc and not
an iseq.

To limit the amount of work in this case, while scanning until the start
of the requested backtrace, for each iseq, store the cfp.  If the first
backtrace frame we care about is a cfunc, use the stored cfp to find the
related iseq.  Use a function pointer to handle the storage of the cfp
in the iteration arg, and also store the location of the extra frame
in the iteration arg.

backtrace_each needs to return int instead of void in order to signal
when a starting frame larger than backtrace size is given, as caller
and caller_locations needs to return nil and not the empty array in
these cases.

To handle cases where a range is provided with a negative end, and the
backtrace size is needed to calculate the result to pass to
rb_range_beg_len, add a backtrace_size static function to calculate
the size, which copies the logic from backtrace_each.

As backtrace_each only adds the backtrace lines requested,
backtrace_to_*_ary can be simplified to always operate on the entire
backtrace.

Previously, caller_locations(1,1) was about 6.2 times slower for an
800 deep callstack compared to an empty callstack.  With this new
approach, it is only 1.3 times slower. It will always be somewhat
slower as it still needs to scan the cfps from the top of the stack
until it finds the first requested backtrace frame.

Fixes [Bug #17031]

diff --git a/vm_backtrace.c b/vm_backtrace.c
index 04b696c..50cc574 100644
--- a/vm_backtrace.c
+++ b/vm_backtrace.c
@@ -29,6 +29,9 @@ id2str(ID id) https://github.com/ruby/ruby/blob/trunk/vm_backtrace.c#L29
 }
 #define rb_id2str(id) id2str(id)
 
+#define BACKTRACE_START 0
+#define ALL_BACKTRACE_LINES -1
+
 inline static int
 calc_lineno(const rb_iseq_t *iseq, const VALUE *pc)
 {
@@ -126,6 +129,10 @@ location_mark_entry(rb_backtrace_location_t *fi) https://github.com/ruby/ruby/blob/trunk/vm_backtrace.c#L129
 	rb_gc_mark_movable((VALUE)fi->body.iseq.iseq);
 	break;
       case LOCATION_TYPE_CFUNC:
+        if (fi->body.cfunc.prev_loc) {
+            location_mark_entry(fi->body.cfunc.prev_loc);
+        }
+        break;
       case LOCATION_TYPE_IFUNC:
       default:
 	break;
@@ -484,22 +491,47 @@ backtrace_alloc(VALUE klass) https://github.com/ruby/ruby/blob/trunk/vm_backtrace.c#L491
     return obj;
 }
 
-static void
+static long
+backtrace_size(const rb_execution_context_t *ec)
+{
+    const rb_control_frame_t *last_cfp = ec->cfp;
+    const rb_control_frame_t *start_cfp = RUBY_VM_END_CONTROL_FRAME(ec);
+
+    if (start_cfp == NULL) {
+        return -1;
+    }
+
+    start_cfp =
+      RUBY_VM_NEXT_CONTROL_FRAME(
+          RUBY_VM_NEXT_CONTROL_FRAME(start_cfp)); /* skip top frames */
+
+    if (start_cfp < last_cfp) {
+        return 0;
+    }
+
+    return start_cfp - last_cfp + 1;
+}
+
+static int
 backtrace_each(const rb_execution_context_t *ec,
+               ptrdiff_t from_last,
+               long num_frames,
 	       void (*init)(void *arg, size_t size),
 	       void (*iter_iseq)(void *arg, const rb_control_frame_t *cfp),
 	       void (*iter_cfunc)(void *arg, const rb_control_frame_t *cfp, ID mid),
+               void (*iter_skip)(void *arg, const rb_control_frame_t *cfp),
 	       void *arg)
 {
     const rb_control_frame_t *last_cfp = ec->cfp;
     const rb_control_frame_t *start_cfp = RUBY_VM_END_CONTROL_FRAME(ec);
     const rb_control_frame_t *cfp;
-    ptrdiff_t size, i;
+    ptrdiff_t size, i, last, start = 0;
+    int ret = 0;
 
     // In the case the thread vm_stack or cfp is not initialized, there is no backtrace.
     if (start_cfp == NULL) {
         init(arg, 0);
-        return;
+        return ret;
     }
 
     /*                <- start_cfp (end control frame)
@@ -517,16 +549,43 @@ backtrace_each(const rb_execution_context_t *ec, https://github.com/ruby/ruby/blob/trunk/vm_backtrace.c#L549
 	  RUBY_VM_NEXT_CONTROL_FRAME(start_cfp)); /* skip top frames */
 
     if (start_cfp < last_cfp) {
-	size = 0;
+        size = last = 0;
     }
     else {
 	size = start_cfp - last_cfp + 1;
+
+        if (from_last > size) {
+            size = last = 0;
+            ret = 1;
+        }
+        else if (num_frames >= 0 && num_frames < size) {
+            if (from_last + num_frames > size) {
+                size -= from_last;
+                last = size;
+            }
+            else {
+                start = size - from_last - num_frames;
+                size = num_frames;
+                last = start + size;
+            }
+        }
+        else {
+            size -= from_last;
+            last = size;
+        }
     }
 
     init(arg, size);
 
     /* SDR(); */
-    for (i=0, cfp = start_cfp; i<size; i++, cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp)) {
+    for (i=0, cfp = start_cfp; i<last; i++, cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp)) {
+        if (i < start) {
+            if (iter_skip) {
+                iter_skip(arg, cfp);
+            }
+            continue;
+        }
+
 	/* fprintf(stderr, "cfp: %d\n", (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size) - cfp); */
 	if (cfp->iseq) {
 	    if (cfp->pc) {
@@ -540,12 +599,16 @@ backtrace_each(const rb_execution_context_t *ec, https://github.com/ruby/ruby/blob/trunk/vm_backtrace.c#L599
 	    iter_cfunc(arg, cfp, mid);
 	}
     }
+
+    return ret;
 }
 
 struct bt_iter_arg {
     rb_backtrace_t *bt;
     VALUE btobj;
     rb_backtrace_location_t *prev_loc;
+    const rb_control_frame_t *prev_cfp;
+    rb_backtrace_location_t *init_loc;
 };
 
 static void
@@ -554,8 +617,11 @@ bt_init(void *ptr, size_t size) https://github.com/ruby/ruby/blob/trunk/vm_backtrace.c#L617
     struct bt_iter_arg *arg = (struct bt_iter_arg *)ptr;
     arg->btobj = backtrace_alloc(rb_cBacktrace);
     GetCoreDataFromValue(arg->btobj, rb_backtrace_t, arg->bt);
-    arg->bt->backtrace_base = arg->bt->backtrace = ALLOC_N(rb_backtrace_location_t, size);
-    arg->bt->backtrace_size = 0;
+    arg->bt->backtrace_base = arg->bt->backtrace = ALLOC_N(rb_backtrace_location_t, size+1);
+    arg->bt->backtrace_size = 1;
+    arg->prev_cfp = NULL;
+    arg->init_loc = &arg->bt->backtrace_base[size];
+    arg->init_loc->type = 0;
 }
 
 static void
@@ -564,7 +630,7 @@ bt_iter_iseq(void *ptr, const rb_control_frame_t *cfp) https://github.com/ruby/ruby/blob/trunk/vm_backtrace.c#L630
     const rb_iseq_t *iseq = cfp->iseq;
     const VALUE *pc = cfp->pc;
     struct bt_iter_arg *arg = (struct bt_iter_arg *)ptr;
-    rb_backtrace_location_t *loc = &arg->bt->backtrace[arg->bt->backtrace_size++];
+    rb_backtrace_location_t *loc = &arg->bt->backtrace[arg->bt->backtrace_size++-1];
     loc->type = LOCATION_TYPE_ISEQ;
     loc->body.iseq.iseq = iseq;
     loc->body.iseq.lineno.pc = pc;
@@ -575,41 +641,69 @@ static void https://github.com/ruby/ruby/blob/trunk/vm_backtrace.c#L641
 bt_iter_cfunc(void *ptr, const rb_control_frame_t *cfp, ID mid)
 {
     struct bt_iter_arg *arg = (struct bt_iter_arg *)ptr;
-    rb_backtrace_location_t *loc = &arg->bt->backtrace[arg->bt->backtrace_size++];
+    rb_backtrace_location_t *loc = &arg->bt->backtrace[arg->bt->backtrace_size++-1];
     loc->type = LOCATION_TYPE_CFUNC;
     loc->body.cfunc.mid = mid;
-    loc->body.cfunc.prev_loc = arg->prev_loc;
+    if (arg->prev_loc) {
+        loc->body.cfunc.prev_loc = arg->prev_loc;
+    }
+    else if (arg->prev_cfp) {
+        const rb_iseq_t *iseq = arg->prev_cfp->iseq;
+        const VALUE *pc = arg->prev_cfp->pc;
+        arg->init_loc->type = LOCATION_TYPE_ISEQ;
+        arg->init_loc->body.iseq.iseq = iseq;
+        arg->init_loc->body.iseq.lineno.pc = pc;
+        loc->body.cfunc.prev_loc = arg->prev_loc = arg->init_loc;
+    } else {
+        loc->body.cfunc.prev_loc = NULL;
+    }
 }
 
-MJIT_FUNC_EXPORTED VALUE
-rb_ec_backtrace_object(const rb_execution_context_t *ec)
+static void
+bt_iter_skip(void *ptr, const rb_control_frame_t *cfp)
+{
+    if (cfp->iseq && cfp->pc) {
+        ((struct bt_iter_arg *)ptr)->prev_cfp = cfp;
+    }
+}
+
+static VALUE
+rb_ec_partial_backtrace_object(const rb_execution_context_t *ec, long lev, long n, int* level_too_large)
 {
     struct bt_iter_arg arg;
+    int too_large;
     arg.prev_loc = 0;
 
-    backtrace_each(ec,
-		   bt_init,
-		   bt_iter_iseq,
-		   bt_iter_cfunc,
-		   &arg);
+    too_large = backtrace_each(ec,
+                               lev,
+                               n,
+                               bt_init,
+                               bt_iter_iseq,
+                               bt_iter_cfunc,
+                               bt_iter_skip,
+                               &arg);
+
+    if (level_too_large) *level_too_large = too_large;
 
     return arg.btobj;
 }
 
+MJIT_FUNC_EXPORTED VALUE
+rb_ec_backtrace_object(const rb_execution_context_t *ec)
+{
+    return rb_ec_partial_backtrace_object(ec, BACKTRACE_START, ALL_BACKTRACE_LINES, NULL);
+}
+
 static VALUE
-backtrace_collect(rb_backtrace_t *bt, long lev, long n, VALUE (*func)(rb_backtrace_location_t *, void *arg), void *arg)
+backtrace_collect(rb_backtrace_t *bt, VALUE (*func)(rb_backtrace_location_t *, vo (... truncated)

--
ML: ruby-changes@q...
Info: http://www.atdot.net/~ko1/quickml/

[前][次][番号順一覧][スレッド一覧]