ruby-changes:60156
From: Koichi <ko1@a...>
Date: Sat, 22 Feb 2020 09:59:23 +0900 (JST)
Subject: [ruby-changes:60156] b9007b6c54 (master): Introduce disposable call-cache.
https://git.ruby-lang.org/ruby.git/commit/?id=b9007b6c54 From b9007b6c548f91e88fd3f2ffa23de740431fa969 Mon Sep 17 00:00:00 2001 From: Koichi Sasada <ko1@a...> Date: Wed, 8 Jan 2020 16:14:01 +0900 Subject: Introduce disposable call-cache. This patch contains several ideas: (1) Disposable inline method cache (IMC) for race-free inline method cache * Making call-cache (CC) as a RVALUE (GC target object) and allocate new CC on cache miss. * This technique allows race-free access from parallel processing elements like RCU. (2) Introduce per-Class method cache (pCMC) * Instead of fixed-size global method cache (GMC), pCMC allows flexible cache size. * Caching CCs reduces CC allocation and allow sharing CC's fast-path between same call-info (CI) call-sites. (3) Invalidate an inline method cache by invalidating corresponding method entries (MEs) * Instead of using class serials, we set "invalidated" flag for method entry itself to represent cache invalidation. * Compare with using class serials, the impact of method modification (add/overwrite/delete) is small. * Updating class serials invalidate all method caches of the class and sub-classes. * Proposed approach only invalidate the method cache of only one ME. See [Feature #16614] for more details. diff --git a/class.c b/class.c index f181f33..98b2a1d 100644 --- a/class.c +++ b/class.c @@ -894,12 +894,21 @@ add_refined_method_entry_i(ID key, VALUE value, void *data) https://github.com/ruby/ruby/blob/trunk/class.c#L894 static void ensure_origin(VALUE klass); +static enum rb_id_table_iterator_result +clear_module_cache_i(ID id, VALUE val, void *data) +{ + VALUE klass = (VALUE)data; + rb_clear_method_cache(klass, id); + return ID_TABLE_CONTINUE; +} + static int include_modules_at(const VALUE klass, VALUE c, VALUE module, int search_super) { VALUE p, iclass; int method_changed = 0, constant_changed = 0; struct rb_id_table *const klass_m_tbl = RCLASS_M_TBL(RCLASS_ORIGIN(klass)); + VALUE original_klass = klass; if (FL_TEST(module, RCLASS_REFINED_BY_ANY)) { ensure_origin(module); @@ -912,7 +921,7 @@ include_modules_at(const VALUE klass, VALUE c, VALUE module, int search_super) https://github.com/ruby/ruby/blob/trunk/class.c#L921 if (klass_m_tbl && klass_m_tbl == RCLASS_M_TBL(module)) return -1; /* ignore if the module included already in superclasses */ - for (p = RCLASS_SUPER(klass); p; p = RCLASS_SUPER(p)) { + for (p = RCLASS_SUPER(klass); p; p = RCLASS_SUPER(p)) { int type = BUILTIN_TYPE(p); if (type == T_ICLASS) { if (RCLASS_M_TBL(p) == RCLASS_M_TBL(module)) { @@ -924,37 +933,53 @@ include_modules_at(const VALUE klass, VALUE c, VALUE module, int search_super) https://github.com/ruby/ruby/blob/trunk/class.c#L933 } else if (type == T_CLASS) { if (!search_super) break; - superclass_seen = TRUE; + superclass_seen = TRUE; } } - iclass = rb_include_class_new(module, RCLASS_SUPER(c)); + + VALUE super_class = RCLASS_SUPER(c); + + // invalidate inline method cache + tbl = RMODULE_M_TBL(module); + if (tbl && rb_id_table_size(tbl)) { + if (search_super) { // include + if (super_class && !RB_TYPE_P(super_class, T_MODULE)) { + rb_id_table_foreach(tbl, clear_module_cache_i, (void *)super_class); + } + } + else { // prepend + if (!RB_TYPE_P(original_klass, T_MODULE)) { + rb_id_table_foreach(tbl, clear_module_cache_i, (void *)original_klass); + } + } + method_changed = 1; + } + + // setup T_ICLASS for the include/prepend module + iclass = rb_include_class_new(module, super_class); c = RCLASS_SET_SUPER(c, iclass); RCLASS_SET_INCLUDER(iclass, klass); { VALUE m = module; - if (BUILTIN_TYPE(m) == T_ICLASS) m = RBASIC(m)->klass; - rb_module_add_to_subclasses_list(m, iclass); + if (BUILTIN_TYPE(m) == T_ICLASS) m = RBASIC(m)->klass; + rb_module_add_to_subclasses_list(m, iclass); } if (FL_TEST(klass, RMODULE_IS_REFINEMENT)) { VALUE refined_class = rb_refinement_module_get_refined_class(klass); - rb_id_table_foreach(RMODULE_M_TBL(module), add_refined_method_entry_i, (void *)refined_class); + rb_id_table_foreach(RMODULE_M_TBL(module), add_refined_method_entry_i, (void *)refined_class); FL_SET(c, RMODULE_INCLUDED_INTO_REFINEMENT); } - tbl = RMODULE_M_TBL(module); - if (tbl && rb_id_table_size(tbl)) method_changed = 1; - tbl = RMODULE_CONST_TBL(module); if (tbl && rb_id_table_size(tbl)) constant_changed = 1; skip: module = RCLASS_SUPER(module); } - if (method_changed) rb_clear_method_cache_by_class(klass); if (constant_changed) rb_clear_constant_cache(); return method_changed; diff --git a/common.mk b/common.mk index 50f2c15..5680573 100644 --- a/common.mk +++ b/common.mk @@ -2946,6 +2946,7 @@ mjit.$(OBJEXT): {$(VPATH)}thread.h https://github.com/ruby/ruby/blob/trunk/common.mk#L2946 mjit.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h mjit.$(OBJEXT): {$(VPATH)}thread_native.h mjit.$(OBJEXT): {$(VPATH)}util.h +mjit.$(OBJEXT): {$(VPATH)}vm_callinfo.h mjit.$(OBJEXT): {$(VPATH)}vm_core.h mjit.$(OBJEXT): {$(VPATH)}vm_opts.h mjit_compile.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h diff --git a/compile.c b/compile.c index e1efbcd..94daa65 100644 --- a/compile.c +++ b/compile.c @@ -566,6 +566,8 @@ static void https://github.com/ruby/ruby/blob/trunk/compile.c#L566 verify_call_cache(rb_iseq_t *iseq) { #if CPDEBUG + // fprintf(stderr, "ci_size:%d\t", iseq->body->ci_size); rp(iseq); + VALUE *original = rb_iseq_original_iseq(iseq); size_t i = 0; while (i < iseq->body->iseq_size) { @@ -574,16 +576,27 @@ verify_call_cache(rb_iseq_t *iseq) https://github.com/ruby/ruby/blob/trunk/compile.c#L576 for (int j=0; types[j]; j++) { if (types[j] == TS_CALLDATA) { - struct rb_call_cache cc; struct rb_call_data *cd = (struct rb_call_data *)original[i+j+1]; - MEMZERO(&cc, cc, 1); - if (memcmp(&cc, &cd->cc, sizeof(cc))) { - rb_bug("call cache not zero for fresh iseq"); + const struct rb_callinfo *ci = cd->ci; + const struct rb_callcache *cc = cd->cc; + if (cc != vm_cc_empty()) { + vm_ci_dump(ci); + rb_bug("call cache is not initialized by vm_cc_empty()"); } } } i += insn_len(insn); } + + for (unsigned int i=0; i<iseq->body->ci_size; i++) { + struct rb_call_data *cd = &iseq->body->call_data[i]; + const struct rb_callinfo *ci = cd->ci; + const struct rb_callcache *cc = cd->cc; + if (cc != NULL && cc != vm_cc_empty()) { + vm_ci_dump(ci); + rb_bug("call cache is not initialized by vm_cc_empty()"); + } + } #endif } @@ -661,7 +674,7 @@ rb_iseq_compile_node(rb_iseq_t *iseq, const NODE *node) https://github.com/ruby/ruby/blob/trunk/compile.c#L674 DECL_ANCHOR(ret); INIT_ANCHOR(ret); - if (imemo_type_p((VALUE)node, imemo_ifunc)) { + if (IMEMO_TYPE_P(node, imemo_ifunc)) { rb_raise(rb_eArgError, "unexpected imemo_ifunc"); } @@ -1212,6 +1225,7 @@ new_callinfo(rb_iseq_t *iseq, ID mid, int argc, unsigned int flag, struct rb_cal https://github.com/ruby/ruby/blob/trunk/compile.c#L1225 argc += kw_arg->keyword_len; } + // fprintf(stderr, "[%d] id:%s\t", (int)iseq->body->ci_size, rb_id2name(mid)); rp(iseq); iseq->body->ci_size++; const struct rb_callinfo *ci = vm_ci_new(mid, flag, argc, kw_arg); RB_OBJ_WRITTEN(iseq, Qundef, ci); @@ -2223,6 +2237,7 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor) https://github.com/ruby/ruby/blob/trunk/compile.c#L2237 struct rb_call_data *cd = &body->call_data[ISEQ_COMPILE_DATA(iseq)->ci_index++]; assert(ISEQ_COMPILE_DATA(iseq)->ci_index <= body->ci_size); cd->ci = source_ci; + cd->cc = vm_cc_empty(); generated_iseq[code_index + 1 + j] = (VALUE)cd; break; } @@ -10301,16 +10316,18 @@ ibf_dump_ci_entries(struct ibf_dump *dump, const rb_iseq_t *iseq) https://github.com/ruby/ruby/blob/trunk/compile.c#L10316 } /* note that we dump out rb_call_info but load back rb_call_data */ -static struct rb_call_data * +static void ibf_load_ci_entries(const struct ibf_load *load, ibf_offset_t ci_entries_offset, - unsigned int ci_size) + unsigned int ci_size, + struct rb_call_data **cd_ptr) { ibf_offset_t reading_pos = ci_entries_offset; unsigned int i; struct rb_call_data *cds = ZALLOC_N(struct rb_call_data, ci_size); + *cd_ptr = cds; for (i = 0; i < ci_size; i++) { VALUE mid_index = ibf_load_small_value(load, &reading_pos); @@ -10331,10 +10348,9 @@ ibf_load_ci_entries(const struct ibf_load *load, https://github.com/ruby/ruby/blob/trunk/compile.c#L10348 cds[i].ci = vm_ci_new(mid, flag, argc, kwarg); RB_OBJ_WRITTEN(load->iseq, Qundef, cds[i].ci); + cds[i].cc = vm_cc_empty(); } - - return cds; -} +} static ibf_offset_t ibf_dump_iseq_each(struct ibf_dump *dump, const rb_iseq_t *iseq) @@ -10588,7 +10604,7 @@ ibf_load_iseq_each(struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t offset) https://github.com/ruby/ruby/blob/trunk/compile.c#L10604 load_body->catch_except_p = catch_except_p; load_body->is_entries = ZALLOC_N(union iseq_inline_storage_entry, is_size); - load_body->call_d (... truncated) -- ML: ruby-changes@q... Info: http://www.atdot.net/~ko1/quickml/