ruby-changes:55093
From: k0kubun <ko1@a...>
Date: Tue, 19 Mar 2019 02:20:29 +0900 (JST)
Subject: [ruby-changes:55093] k0kubun:r67300 (trunk): Use alloca again instead of malloc and free
k0kubun 2019-03-19 02:20:21 +0900 (Tue, 19 Mar 2019) New Revision: 67300 https://svn.ruby-lang.org/cgi-bin/viewvc.cgi?view=revision&revision=67300 Log: Use alloca again instead of malloc and free by changing interface of `mjit_copy_cache_from_main_thread`. This is also fixing deadlock introduced by r67299. Modified files: trunk/mjit_compile.c trunk/mjit_worker.c Index: mjit_worker.c =================================================================== --- mjit_worker.c (revision 67299) +++ mjit_worker.c (revision 67300) @@ -1141,13 +1141,14 @@ static void mjit_copy_job_handler(void * https://github.com/ruby/ruby/blob/trunk/mjit_worker.c#L1141 /* vm_trace.c */ int rb_workqueue_register(unsigned flags, rb_postponed_job_func_t , void *); -// Copy inline cache values of `iseq` to `*cc_entries` and `*is_entries`. +// Copy inline cache values of `iseq` to `cc_entries` and `is_entries`. +// These buffers should be pre-allocated properly prior to calling this function. // Return true if copy succeeds or is not needed. // // We're lazily copying cache values from main thread because these cache values // could be different between ones on enqueue timing and ones on dequeue timing. bool -mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache **cc_entries, union iseq_inline_storage_entry **is_entries) +mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache *cc_entries, union iseq_inline_storage_entry *is_entries) { mjit_copy_job_t *job = &mjit_copy_job; // just a short hand @@ -1155,30 +1156,15 @@ mjit_copy_cache_from_main_thread(const r https://github.com/ruby/ruby/blob/trunk/mjit_worker.c#L1156 job->finish_p = true; // disable dispatching this job in mjit_copy_job_handler while it's being modified CRITICAL_SECTION_FINISH(3, "in mjit_copy_cache_from_main_thread"); - const struct rb_iseq_constant_body *body = iseq->body; - job->cc_entries = NULL; - if (body->ci_size > 0 || body->ci_kw_size > 0) { - job->cc_entries = malloc(sizeof(struct rb_call_cache) * (body->ci_size + body->ci_kw_size)); - if (!job->cc_entries) return false; - } - job->is_entries = NULL; - if (body->is_size > 0) { - job->is_entries = malloc(sizeof(union iseq_inline_storage_entry) * body->is_size); - if (!job->is_entries) { - free(job->cc_entries); - return false; - } - } - - // If ISeq has no inline cache, there's no need to run a copy job. - if (job->cc_entries == NULL && job->is_entries == NULL) { - *cc_entries = job->cc_entries; - *is_entries = job->is_entries; - return true; - } + job->cc_entries = cc_entries; + job->is_entries = is_entries; CRITICAL_SECTION_START(3, "in mjit_copy_cache_from_main_thread"); job->iseq = iseq; // Prevernt GC of this ISeq from here + VM_ASSERT(in_jit); + in_jit = false; // To avoid deadlock, allow running GC while waiting for copy job + rb_native_cond_signal(&mjit_client_wakeup); // Unblock main thread waiting in `mjit_gc_start_hook` + job->finish_p = false; // allow dispatching this job in mjit_copy_job_handler CRITICAL_SECTION_FINISH(3, "in mjit_copy_cache_from_main_thread"); @@ -1196,16 +1182,14 @@ mjit_copy_cache_from_main_thread(const r https://github.com/ruby/ruby/blob/trunk/mjit_worker.c#L1182 CRITICAL_SECTION_FINISH(3, "in MJIT copy job wait"); } - // Set result values. - *cc_entries = job->cc_entries; - *is_entries = job->is_entries; - - bool result = job->finish_p; CRITICAL_SECTION_START(3, "in mjit_copy_cache_from_main_thread"); - job->iseq = NULL; // Allow GC of this ISeq from here + bool result = job->finish_p; // Disable dispatching this job in mjit_copy_job_handler while memory allocated by alloca // could be expired after finishing this function. job->finish_p = true; + + in_jit = true; // Prohibit GC during JIT compilation + job->iseq = NULL; // Allow future GC of this ISeq from here CRITICAL_SECTION_FINISH(3, "in mjit_copy_cache_from_main_thread"); return result; } Index: mjit_compile.c =================================================================== --- mjit_compile.c (revision 67299) +++ mjit_compile.c (revision 67300) @@ -196,7 +196,7 @@ compile_cancel_handler(FILE *f, const st https://github.com/ruby/ruby/blob/trunk/mjit_compile.c#L196 fprintf(f, " return Qundef;\n"); } -extern bool mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache **cc_entries, union iseq_inline_storage_entry **is_entries); +extern bool mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache *cc_entries, union iseq_inline_storage_entry *is_entries); // Compile ISeq to C code in `f`. It returns true if it succeeds to compile. bool @@ -211,8 +211,18 @@ mjit_compile(FILE *f, const rb_iseq_t *i https://github.com/ruby/ruby/blob/trunk/mjit_compile.c#L211 if (status.stack_size_for_pos == NULL) return false; memset(status.stack_size_for_pos, NOT_COMPILED_STACK_SIZE, sizeof(int) * body->iseq_size); - if (mjit_copy_cache_from_main_thread(iseq, &status.cc_entries, &status.is_entries) == false) + + status.cc_entries = NULL; + if ((body->ci_size + body->ci_kw_size) > 0) + status.cc_entries = alloca(sizeof(struct rb_call_cache) * (body->ci_size + body->ci_kw_size)); + status.is_entries = NULL; + if (body->is_size > 0) + status.is_entries = alloca(sizeof(union iseq_inline_storage_entry) * body->is_size); + + if ((status.cc_entries != NULL || status.is_entries != NULL) + && !mjit_copy_cache_from_main_thread(iseq, status.cc_entries, status.is_entries)) { return false; + } /* For performance, we verify stack size only on compilation time (mjit_compile.inc.erb) without --jit-debug */ if (!mjit_opts.debug) { @@ -252,8 +262,6 @@ mjit_compile(FILE *f, const rb_iseq_t *i https://github.com/ruby/ruby/blob/trunk/mjit_compile.c#L262 fprintf(f, "\n} /* end of %s */\n", funcname); free(status.stack_size_for_pos); - free(status.cc_entries); - free(status.is_entries); return status.success; } -- ML: ruby-changes@q... Info: http://www.atdot.net/~ko1/quickml/