[前][次][番号順一覧][スレッド一覧]

ruby-changes:60380

From: Takashi <ko1@a...>
Date: Fri, 13 Mar 2020 14:53:05 +0900 (JST)
Subject: [ruby-changes:60380] 0cd7be99e9 (master): Avoid referring to an old value of realloc

https://git.ruby-lang.org/ruby.git/commit/?id=0cd7be99e9

From 0cd7be99e9a15f649970559e43e3edb704568670 Mon Sep 17 00:00:00 2001
From: Takashi Kokubun <takashikkbn@g...>
Date: Thu, 12 Mar 2020 22:51:33 -0700
Subject: Avoid referring to an old value of realloc

OpenBSD RubyCI has failed with SEGV since 4bcd5981e80d3e1852c8723741a0069779464128.
https://rubyci.org/logs/rubyci.s3.amazonaws.com/openbsd-current/ruby-master/log/20200312T223005Z.fail.html.gz

This was because `status->cc_entries` could be stale after `realloc` call
for inlined iseqs.

diff --git a/mjit_compile.c b/mjit_compile.c
index a75e3fe..cfaa672 100644
--- a/mjit_compile.c
+++ b/mjit_compile.c
@@ -51,8 +51,8 @@ struct compile_status { https://github.com/ruby/ruby/blob/trunk/mjit_compile.c#L51
     bool local_stack_p;
     // Safely-accessible ivar cache entries copied from main thread.
     union iseq_inline_storage_entry *is_entries;
-    // Safely-accessible call cache entries captured to compiled_iseq to be marked on GC
-    const struct rb_callcache **cc_entries;
+    // Index of call cache entries captured to compiled_iseq to be marked on GC
+    int cc_entries_index;
     // A pointer to root (i.e. not inlined) iseq being compiled.
     const struct rb_iseq_constant_body *compiled_iseq;
     // Mutated optimization levels
@@ -82,6 +82,18 @@ call_data_index(CALL_DATA cd, const struct rb_iseq_constant_body *body) https://github.com/ruby/ruby/blob/trunk/mjit_compile.c#L82
     return cd - body->call_data;
 }
 
+const struct rb_callcache ** mjit_iseq_cc_entries(const struct rb_iseq_constant_body *const body);
+
+// Using this function to refer to cc_entries allocated by `mjit_capture_cc_entries`
+// instead of storing cc_entries in status directly so that we always refer to a new address
+// returned by `realloc` inside it.
+static const struct rb_callcache **
+captured_cc_entries(const struct compile_status *status)
+{
+    VM_ASSERT(status->cc_entries_index != -1);
+    return mjit_iseq_cc_entries(status->compiled_iseq) + status->cc_entries_index;
+}
+
 // Returns true if call cache is still not obsoleted and vm_cc_cme(cc)->def->type is available.
 static bool
 has_valid_method_type(CALL_CACHE cc)
@@ -277,7 +289,7 @@ compile_cancel_handler(FILE *f, const struct rb_iseq_constant_body *body, struct https://github.com/ruby/ruby/blob/trunk/mjit_compile.c#L289
     fprintf(f, "    return Qundef;\n");
 }
 
-extern const struct rb_callcache **
+extern int
 mjit_capture_cc_entries(const struct rb_iseq_constant_body *compiled_iseq, const struct rb_iseq_constant_body *captured_iseq);
 
 extern bool mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq,
@@ -375,8 +387,8 @@ inlinable_iseq_p(const struct rb_iseq_constant_body *body) https://github.com/ruby/ruby/blob/trunk/mjit_compile.c#L387
             alloca(sizeof(const struct rb_iseq_constant_body *) * body->iseq_size) : NULL, \
         .is_entries = (body->is_size > 0) ? \
             alloca(sizeof(union iseq_inline_storage_entry) * body->is_size) : NULL, \
-        .cc_entries = (body->ci_size > 0) ? \
-            mjit_capture_cc_entries(status.compiled_iseq, body) : NULL, \
+        .cc_entries_index = (body->ci_size > 0) ? \
+            mjit_capture_cc_entries(status.compiled_iseq, body) : -1, \
         .compiled_iseq = status.compiled_iseq, \
         .compile_info = compile_root_p ? \
             rb_mjit_iseq_compile_info(body) : alloca(sizeof(struct rb_mjit_compile_info)) \
@@ -403,7 +415,7 @@ precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status https://github.com/ruby/ruby/blob/trunk/mjit_compile.c#L415
         if (insn == BIN(opt_send_without_block)) { // `compile_inlined_cancel_handler` supports only `opt_send_without_block`
             CALL_DATA cd = (CALL_DATA)body->iseq_encoded[pos + 1];
             const struct rb_callinfo *ci = cd->ci;
-            const struct rb_callcache *cc = status->cc_entries[call_data_index(cd, body)]; // use copy to avoid race condition
+            const struct rb_callcache *cc = captured_cc_entries(status)[call_data_index(cd, body)]; // use copy to avoid race condition
 
             const rb_iseq_t *child_iseq;
             if (has_valid_method_type(cc) &&
@@ -429,7 +441,7 @@ precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status https://github.com/ruby/ruby/blob/trunk/mjit_compile.c#L441
                     .param_size = child_iseq->body->param.size,
                     .local_size = child_iseq->body->local_table_size
                 };
-                if ((child_iseq->body->ci_size > 0 && child_status.cc_entries == NULL)
+                if ((child_iseq->body->ci_size > 0 && child_status.cc_entries_index == -1)
                     || (child_status.is_entries != NULL && !mjit_copy_cache_from_main_thread(child_iseq, child_status.is_entries))) {
                     return false;
                 }
@@ -462,7 +474,7 @@ mjit_compile(FILE *f, const rb_iseq_t *iseq, const char *funcname) https://github.com/ruby/ruby/blob/trunk/mjit_compile.c#L474
 
     struct compile_status status = { .compiled_iseq = iseq->body };
     INIT_COMPILE_STATUS(status, iseq->body, true);
-    if ((iseq->body->ci_size > 0 && status.cc_entries == NULL)
+    if ((iseq->body->ci_size > 0 && status.cc_entries_index == -1)
         || (status.is_entries != NULL && !mjit_copy_cache_from_main_thread(iseq, status.is_entries))) {
         return false;
     }
diff --git a/mjit_worker.c b/mjit_worker.c
index e9ba24b..e735045 100644
--- a/mjit_worker.c
+++ b/mjit_worker.c
@@ -1151,11 +1151,21 @@ static void mjit_copy_job_handler(void *data); https://github.com/ruby/ruby/blob/trunk/mjit_worker.c#L1151
 // vm_trace.c
 int rb_workqueue_register(unsigned flags, rb_postponed_job_func_t , void *);
 
+// To see cc_entries using index returned by `mjit_capture_cc_entries` in mjit_compile.c
+const struct rb_callcache **
+mjit_iseq_cc_entries(const struct rb_iseq_constant_body *const body)
+{
+    return body->jit_unit->cc_entries;
+}
+
 // Capture cc entries of `captured_iseq` and append them to `compiled_iseq->jit_unit->cc_entries`.
 // This is needed when `captured_iseq` is inlined by `compiled_iseq` and GC needs to mark inlined cc.
 //
+// Index to refer to `compiled_iseq->jit_unit->cc_entries` is returned instead of the address
+// because old addresses may be invalidated by `realloc` later. -1 is returned on failure.
+//
 // This assumes that it's safe to reference cc without acquiring GVL.
-const struct rb_callcache **
+int
 mjit_capture_cc_entries(const struct rb_iseq_constant_body *compiled_iseq, const struct rb_iseq_constant_body *captured_iseq)
 {
     struct rb_mjit_unit *unit = compiled_iseq->jit_unit;
@@ -1164,16 +1174,17 @@ mjit_capture_cc_entries(const struct rb_iseq_constant_body *compiled_iseq, const https://github.com/ruby/ruby/blob/trunk/mjit_worker.c#L1174
 
     // Allocate new cc_entries and append them to unit->cc_entries
     const struct rb_callcache **cc_entries;
+    int cc_entries_index = unit->cc_entries_size;
     if (unit->cc_entries_size == 0) {
         VM_ASSERT(unit->cc_entries == NULL);
         unit->cc_entries = cc_entries = malloc(sizeof(struct rb_callcache *) * new_entries_size);
-        if (cc_entries == NULL) return NULL;
+        if (cc_entries == NULL) return -1;
     }
     else {
         cc_entries = realloc(unit->cc_entries, sizeof(struct rb_callcache *) * new_entries_size);
-        if (cc_entries == NULL) return NULL;
+        if (cc_entries == NULL) return -1;
         unit->cc_entries = cc_entries;
-        cc_entries += unit->cc_entries_size;
+        cc_entries += cc_entries_index;
     }
     unit->cc_entries_size = new_entries_size;
 
@@ -1182,7 +1193,7 @@ mjit_capture_cc_entries(const struct rb_iseq_constant_body *compiled_iseq, const https://github.com/ruby/ruby/blob/trunk/mjit_worker.c#L1193
         cc_entries[i] = captured_iseq->call_data[i].cc;
     }
 
-    return cc_entries;
+    return cc_entries_index;
 }
 
 // Copy inline cache values of `iseq` to `cc_entries` and `is_entries`.
diff --git a/tool/ruby_vm/views/_mjit_compile_send.erb b/tool/ruby_vm/views/_mjit_compile_send.erb
index 6935292..999e161 100644
--- a/tool/ruby_vm/views/_mjit_compile_send.erb
+++ b/tool/ruby_vm/views/_mjit_compile_send.erb
@@ -14,7 +14,7 @@ https://github.com/ruby/ruby/blob/trunk/tool/ruby_vm/views/_mjit_compile_send.erb#L14
     MAYBE_UNUSED(<%= ope.fetch(:decl) %>) = (<%= ope.fetch(:type) %>)operands[<%= i %>];
 % end
 % # compiler: Use copied cc to avoid race condition
-    const struct rb_callcache *captured_cc = status->cc_entries[call_data_index(cd, body)];
+    const struct rb_callcache *captured_cc = captured_cc_entries(status)[call_data_index(cd, body)];
 %
     if (!status->compile_info->disable_send_cache && has_valid_method_type(captured_cc)) {
         const rb_iseq_t *iseq;
diff --git a/tool/ruby_vm/views/mjit_compile.inc.erb b/tool/ruby_vm/views/mjit_compile.inc.erb
index 0b9fa3f..b51e777 100644
--- a/tool/ruby_vm/views/mjit_compile.inc.erb
+++ b/tool/ruby_vm/views/mjit_compile.inc.erb
@@ -57,7 +57,7 @@ switch (insn) { https://github.com/ruby/ruby/blob/trunk/tool/ruby_vm/views/mjit_compile.inc.erb#L57
 %   when *send_compatible_opt_insns
 %     # To avoid cancel, just emit `opt_send_without_block` instead of `opt_*` insn if call cache is populated.
 %     cd_index = insn.opes.index { |o| o.fetch(:type) == 'CALL_DATA' }
-      if (has_valid_method_type(status->cc_entries[call_data_index((CALL_DATA)operands[<%= cd_index %>], body)])) {
+      if (has_valid_method_type(captured_cc_entries(status)[call_data_index((CALL_DATA)operands[<%= cd_index %>], body)])) {
 <%=       render 'mjit_compile_send', locals: { insn: opt_send_without_block } -%>
 <%=       render 'mjit_compile_insn', locals: { insn: opt_send_without_block } -%>
           break;
-- 
cgit v0.10.2


--
ML: ruby-changes@q...
Info: http://www.atdot.net/~ko1/quickml/

[前][次][番号順一覧][スレッド一覧]