mirror of
https://github.com/ruby/ruby.git
synced 2025-08-15 13:39:04 +02:00
Refactor vm_lookup_cc
to allow lock-free lookups in RClass.cc_tbl
In multi-ractor mode, the `cc_tbl` mutations use the RCU pattern, which allow lock-less reads. Based on the assumption that invalidations and misses should be increasingly rare as the process ages, locking on modification isn't a big concern.
This commit is contained in:
parent
f2a7e48dea
commit
547f111b5b
6 changed files with 231 additions and 107 deletions
|
@ -2315,3 +2315,33 @@ assert_equal "2", %q{
|
||||||
raise unless $msg.all?{/Ractor#take/ =~ it}
|
raise unless $msg.all?{/Ractor#take/ =~ it}
|
||||||
$msg.size
|
$msg.size
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Cause lots of inline CC misses.
|
||||||
|
assert_equal 'ok', <<~'RUBY'
|
||||||
|
class A; def test; 1 + 1; end; end
|
||||||
|
class B; def test; 1 + 1; end; end
|
||||||
|
class C; def test; 1 + 1; end; end
|
||||||
|
class D; def test; 1 + 1; end; end
|
||||||
|
class E; def test; 1 + 1; end; end
|
||||||
|
class F; def test; 1 + 1; end; end
|
||||||
|
class G; def test; 1 + 1; end; end
|
||||||
|
|
||||||
|
objs = [A.new, B.new, C.new, D.new, E.new, F.new, G.new].freeze
|
||||||
|
|
||||||
|
def call_test(obj)
|
||||||
|
obj.test
|
||||||
|
end
|
||||||
|
|
||||||
|
ractors = 7.times.map do
|
||||||
|
Ractor.new(objs) do |objs|
|
||||||
|
objs = objs.shuffle
|
||||||
|
100_000.times do
|
||||||
|
objs.each do |o|
|
||||||
|
call_test(o)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
ractors.each(&:join)
|
||||||
|
:ok
|
||||||
|
RUBY
|
||||||
|
|
|
@ -395,7 +395,7 @@ VALUE
|
||||||
rb_managed_id_table_dup(VALUE old_table)
|
rb_managed_id_table_dup(VALUE old_table)
|
||||||
{
|
{
|
||||||
struct rb_id_table *new_tbl;
|
struct rb_id_table *new_tbl;
|
||||||
VALUE obj = TypedData_Make_Struct(0, struct rb_id_table, &rb_managed_id_table_type, new_tbl);
|
VALUE obj = TypedData_Make_Struct(0, struct rb_id_table, RTYPEDDATA_TYPE(old_table), new_tbl);
|
||||||
struct rb_id_table *old_tbl = managed_id_table_ptr(old_table);
|
struct rb_id_table *old_tbl = managed_id_table_ptr(old_table);
|
||||||
rb_id_table_init(new_tbl, old_tbl->num + 1);
|
rb_id_table_init(new_tbl, old_tbl->num + 1);
|
||||||
rb_id_table_foreach(old_tbl, managed_id_table_dup_i, new_tbl);
|
rb_id_table_foreach(old_tbl, managed_id_table_dup_i, new_tbl);
|
||||||
|
|
35
imemo.c
35
imemo.c
|
@ -515,41 +515,6 @@ rb_free_const_table(struct rb_id_table *tbl)
|
||||||
rb_id_table_free(tbl);
|
rb_id_table_free(tbl);
|
||||||
}
|
}
|
||||||
|
|
||||||
// alive: if false, target pointers can be freed already.
|
|
||||||
static void
|
|
||||||
vm_ccs_free(struct rb_class_cc_entries *ccs, int alive, VALUE klass)
|
|
||||||
{
|
|
||||||
if (ccs->entries) {
|
|
||||||
for (int i=0; i<ccs->len; i++) {
|
|
||||||
const struct rb_callcache *cc = ccs->entries[i].cc;
|
|
||||||
if (!alive) {
|
|
||||||
// ccs can be free'ed.
|
|
||||||
if (rb_gc_pointer_to_heap_p((VALUE)cc) &&
|
|
||||||
!rb_objspace_garbage_object_p((VALUE)cc) &&
|
|
||||||
IMEMO_TYPE_P(cc, imemo_callcache) &&
|
|
||||||
cc->klass == klass) {
|
|
||||||
// OK. maybe target cc.
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
VM_ASSERT(!vm_cc_super_p(cc) && !vm_cc_refinement_p(cc));
|
|
||||||
vm_cc_invalidate(cc);
|
|
||||||
}
|
|
||||||
ruby_xfree(ccs->entries);
|
|
||||||
}
|
|
||||||
ruby_xfree(ccs);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
rb_vm_ccs_free(struct rb_class_cc_entries *ccs)
|
|
||||||
{
|
|
||||||
RB_DEBUG_COUNTER_INC(ccs_free);
|
|
||||||
vm_ccs_free(ccs, true, Qundef);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
imemo_fields_free(struct rb_fields *fields)
|
imemo_fields_free(struct rb_fields *fields)
|
||||||
{
|
{
|
||||||
|
|
|
@ -330,6 +330,8 @@ cc_check_class(VALUE klass)
|
||||||
}
|
}
|
||||||
|
|
||||||
VALUE rb_vm_cc_table_create(size_t capa);
|
VALUE rb_vm_cc_table_create(size_t capa);
|
||||||
|
VALUE rb_vm_cc_table_dup(VALUE old_table);
|
||||||
|
void rb_vm_cc_table_delete(VALUE table, ID mid);
|
||||||
|
|
||||||
static inline const struct rb_callcache *
|
static inline const struct rb_callcache *
|
||||||
vm_cc_new(VALUE klass,
|
vm_cc_new(VALUE klass,
|
||||||
|
@ -600,11 +602,14 @@ vm_ccs_p(const struct rb_class_cc_entries *ccs)
|
||||||
static inline bool
|
static inline bool
|
||||||
vm_cc_check_cme(const struct rb_callcache *cc, const rb_callable_method_entry_t *cme)
|
vm_cc_check_cme(const struct rb_callcache *cc, const rb_callable_method_entry_t *cme)
|
||||||
{
|
{
|
||||||
if (vm_cc_cme(cc) == cme ||
|
bool valid;
|
||||||
(cme->def->iseq_overload && vm_cc_cme(cc) == rb_vm_lookup_overloaded_cme(cme))) {
|
RB_VM_LOCKING() {
|
||||||
|
valid = vm_cc_cme(cc) == cme ||
|
||||||
|
(cme->def->iseq_overload && vm_cc_cme(cc) == rb_vm_lookup_overloaded_cme(cme));
|
||||||
|
}
|
||||||
|
if (valid) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
else {
|
|
||||||
#if 1
|
#if 1
|
||||||
// debug print
|
// debug print
|
||||||
|
|
||||||
|
@ -616,13 +621,9 @@ vm_cc_check_cme(const struct rb_callcache *cc, const rb_callable_method_entry_t
|
||||||
rp(vm_cc_cme(cc));
|
rp(vm_cc_cme(cc));
|
||||||
rp(rb_vm_lookup_overloaded_cme(cme));
|
rp(rb_vm_lookup_overloaded_cme(cme));
|
||||||
#endif
|
#endif
|
||||||
return false;
|
return false;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// gc.c
|
|
||||||
void rb_vm_ccs_free(struct rb_class_cc_entries *ccs);
|
|
||||||
|
|
||||||
#endif /* RUBY_VM_CALLINFO_H */
|
#endif /* RUBY_VM_CALLINFO_H */
|
||||||
|
|
192
vm_insnhelper.c
192
vm_insnhelper.c
|
@ -2060,58 +2060,49 @@ vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
|
||||||
|
|
||||||
const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
|
const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
|
||||||
|
|
||||||
static const struct rb_callcache *
|
static void
|
||||||
vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
|
vm_evict_cc(VALUE klass, VALUE cc_tbl, ID mid)
|
||||||
{
|
{
|
||||||
const ID mid = vm_ci_mid(ci);
|
ASSERT_vm_locking();
|
||||||
VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
|
|
||||||
struct rb_class_cc_entries *ccs = NULL;
|
|
||||||
VALUE ccs_data;
|
|
||||||
|
|
||||||
if (cc_tbl) {
|
if (rb_multi_ractor_p()) {
|
||||||
// CCS data is keyed on method id, so we don't need the method id
|
if (RCLASS_WRITABLE_CC_TBL(klass) != cc_tbl) {
|
||||||
// for doing comparisons in the `for` loop below.
|
// Another ractor updated the CC table while we were waiting on the VM lock.
|
||||||
if (rb_managed_id_table_lookup(cc_tbl, mid, &ccs_data)) {
|
// We have to retry.
|
||||||
ccs = (struct rb_class_cc_entries *)ccs_data;
|
return;
|
||||||
const int ccs_len = ccs->len;
|
|
||||||
|
|
||||||
if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
|
|
||||||
rb_managed_id_table_delete(cc_tbl, mid);
|
|
||||||
rb_vm_ccs_free(ccs);
|
|
||||||
ccs = NULL;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
|
|
||||||
|
|
||||||
// We already know the method id is correct because we had
|
|
||||||
// to look up the ccs_data by method id. All we need to
|
|
||||||
// compare is argc and flag
|
|
||||||
unsigned int argc = vm_ci_argc(ci);
|
|
||||||
unsigned int flag = vm_ci_flag(ci);
|
|
||||||
|
|
||||||
for (int i=0; i<ccs_len; i++) {
|
|
||||||
unsigned int ccs_ci_argc = ccs->entries[i].argc;
|
|
||||||
unsigned int ccs_ci_flag = ccs->entries[i].flag;
|
|
||||||
const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
|
|
||||||
|
|
||||||
VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
|
|
||||||
|
|
||||||
if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
|
|
||||||
RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
|
|
||||||
|
|
||||||
VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
|
|
||||||
VM_ASSERT(ccs_cc->klass == klass);
|
|
||||||
VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
|
|
||||||
|
|
||||||
return ccs_cc;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct rb_class_cc_entries *ccs = NULL;
|
||||||
|
rb_managed_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs);
|
||||||
|
|
||||||
|
if (!ccs || !METHOD_ENTRY_INVALIDATED(ccs->cme)) {
|
||||||
|
// Another ractor replaced that entry while we were waiting on the VM lock.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
VALUE new_table = rb_vm_cc_table_dup(cc_tbl);
|
||||||
|
rb_vm_cc_table_delete(new_table, mid);
|
||||||
|
RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), new_table);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
cc_tbl = rb_vm_cc_table_create(2);
|
rb_vm_cc_table_delete(cc_tbl, mid);
|
||||||
RCLASS_WRITE_CC_TBL(klass, cc_tbl);
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct rb_callcache *
|
||||||
|
vm_populate_cc(VALUE klass, const struct rb_callinfo * const ci, ID mid)
|
||||||
|
{
|
||||||
|
ASSERT_vm_locking();
|
||||||
|
|
||||||
|
VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
|
||||||
|
const VALUE original_cc_table = cc_tbl;
|
||||||
|
struct rb_class_cc_entries *ccs = NULL;
|
||||||
|
|
||||||
|
if (!cc_tbl) {
|
||||||
|
cc_tbl = rb_vm_cc_table_create(1);
|
||||||
|
}
|
||||||
|
else if (rb_multi_ractor_p()) {
|
||||||
|
cc_tbl = rb_vm_cc_table_dup(cc_tbl);
|
||||||
}
|
}
|
||||||
|
|
||||||
RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
|
RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
|
||||||
|
@ -2143,11 +2134,7 @@ vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
|
||||||
if (ccs == NULL) {
|
if (ccs == NULL) {
|
||||||
VM_ASSERT(cc_tbl);
|
VM_ASSERT(cc_tbl);
|
||||||
|
|
||||||
if (LIKELY(rb_managed_id_table_lookup(cc_tbl, mid, &ccs_data))) {
|
if (!LIKELY(rb_managed_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs))) {
|
||||||
// rb_callable_method_entry() prepares ccs.
|
|
||||||
ccs = (struct rb_class_cc_entries *)ccs_data;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
// TODO: required?
|
// TODO: required?
|
||||||
ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
|
ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
|
||||||
}
|
}
|
||||||
|
@ -2162,6 +2149,91 @@ vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
|
||||||
VM_ASSERT(cme->called_id == mid);
|
VM_ASSERT(cme->called_id == mid);
|
||||||
VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
|
VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
|
||||||
|
|
||||||
|
if (original_cc_table != cc_tbl) {
|
||||||
|
RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), cc_tbl);
|
||||||
|
}
|
||||||
|
|
||||||
|
return cc;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct rb_callcache *
|
||||||
|
vm_lookup_cc(const VALUE klass, const struct rb_callinfo * const ci, ID mid)
|
||||||
|
{
|
||||||
|
VALUE cc_tbl;
|
||||||
|
struct rb_class_cc_entries *ccs;
|
||||||
|
retry:
|
||||||
|
cc_tbl = RUBY_ATOMIC_VALUE_LOAD(RCLASS_WRITABLE_CC_TBL(klass));
|
||||||
|
ccs = NULL;
|
||||||
|
|
||||||
|
if (cc_tbl) {
|
||||||
|
// CCS data is keyed on method id, so we don't need the method id
|
||||||
|
// for doing comparisons in the `for` loop below.
|
||||||
|
|
||||||
|
if (rb_managed_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs)) {
|
||||||
|
const int ccs_len = ccs->len;
|
||||||
|
|
||||||
|
if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
|
||||||
|
RB_VM_LOCKING() {
|
||||||
|
vm_evict_cc(klass, cc_tbl, mid);
|
||||||
|
}
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
|
||||||
|
|
||||||
|
// We already know the method id is correct because we had
|
||||||
|
// to look up the ccs_data by method id. All we need to
|
||||||
|
// compare is argc and flag
|
||||||
|
unsigned int argc = vm_ci_argc(ci);
|
||||||
|
unsigned int flag = vm_ci_flag(ci);
|
||||||
|
|
||||||
|
for (int i=0; i<ccs_len; i++) {
|
||||||
|
unsigned int ccs_ci_argc = ccs->entries[i].argc;
|
||||||
|
unsigned int ccs_ci_flag = ccs->entries[i].flag;
|
||||||
|
const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
|
||||||
|
|
||||||
|
VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
|
||||||
|
|
||||||
|
if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
|
||||||
|
RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
|
||||||
|
|
||||||
|
VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
|
||||||
|
VM_ASSERT(ccs_cc->klass == klass);
|
||||||
|
VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
|
||||||
|
|
||||||
|
return ccs_cc;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RB_GC_GUARD(cc_tbl);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct rb_callcache *
|
||||||
|
vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
|
||||||
|
{
|
||||||
|
const ID mid = vm_ci_mid(ci);
|
||||||
|
|
||||||
|
const struct rb_callcache *cc = vm_lookup_cc(klass, ci, mid);
|
||||||
|
if (cc) {
|
||||||
|
return cc;
|
||||||
|
}
|
||||||
|
|
||||||
|
RB_VM_LOCKING() {
|
||||||
|
if (rb_multi_ractor_p()) {
|
||||||
|
// The CC may have been populated by another ractor while we were waiting on the lock,
|
||||||
|
// so we must lookup a second time.
|
||||||
|
cc = vm_lookup_cc(klass, ci, mid);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!cc) {
|
||||||
|
cc = vm_populate_cc(klass, ci, mid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return cc;
|
return cc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2172,16 +2244,14 @@ rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
|
||||||
|
|
||||||
VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
|
VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
|
||||||
|
|
||||||
RB_VM_LOCKING() {
|
cc = vm_search_cc(klass, ci);
|
||||||
cc = vm_search_cc(klass, ci);
|
|
||||||
|
|
||||||
VM_ASSERT(cc);
|
VM_ASSERT(cc);
|
||||||
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
|
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
|
||||||
VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
|
VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
|
||||||
VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
|
VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
|
||||||
VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
|
VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
|
||||||
VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
|
VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
|
||||||
}
|
|
||||||
|
|
||||||
return cc;
|
return cc;
|
||||||
}
|
}
|
||||||
|
|
62
vm_method.c
62
vm_method.c
|
@ -142,6 +142,64 @@ rb_vm_cc_table_create(size_t capa)
|
||||||
return rb_managed_id_table_create(&cc_table_type, capa);
|
return rb_managed_id_table_create(&cc_table_type, capa);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static enum rb_id_table_iterator_result
|
||||||
|
vm_cc_table_dup_i(ID key, VALUE old_ccs_ptr, void *data)
|
||||||
|
{
|
||||||
|
struct rb_class_cc_entries *old_ccs = (struct rb_class_cc_entries *)old_ccs_ptr;
|
||||||
|
struct rb_class_cc_entries *new_ccs = ALLOC(struct rb_class_cc_entries);
|
||||||
|
MEMCPY(new_ccs, old_ccs, struct rb_class_cc_entries, 1);
|
||||||
|
#if VM_CHECK_MODE > 0
|
||||||
|
new_ccs->debug_sig = ~(VALUE)new_ccs;
|
||||||
|
#endif
|
||||||
|
new_ccs->entries = ALLOC_N(struct rb_class_cc_entries_entry, new_ccs->capa);
|
||||||
|
MEMCPY(new_ccs->entries, old_ccs->entries, struct rb_class_cc_entries_entry, new_ccs->capa);
|
||||||
|
|
||||||
|
VALUE new_table = (VALUE)data;
|
||||||
|
rb_managed_id_table_insert(new_table, key, (VALUE)new_ccs);
|
||||||
|
for (int index = 0; index < new_ccs->len; index++) {
|
||||||
|
RB_OBJ_WRITTEN(new_table, Qundef, new_ccs->entries[index].cc);
|
||||||
|
}
|
||||||
|
return ID_TABLE_CONTINUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
VALUE
|
||||||
|
rb_vm_cc_table_dup(VALUE old_table)
|
||||||
|
{
|
||||||
|
VALUE new_table = rb_vm_cc_table_create(rb_managed_id_table_size(old_table));
|
||||||
|
rb_managed_id_table_foreach(old_table, vm_cc_table_dup_i, (void *)new_table);
|
||||||
|
return new_table;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
vm_ccs_invalidate(struct rb_class_cc_entries *ccs)
|
||||||
|
{
|
||||||
|
if (ccs->entries) {
|
||||||
|
for (int i=0; i<ccs->len; i++) {
|
||||||
|
const struct rb_callcache *cc = ccs->entries[i].cc;
|
||||||
|
VM_ASSERT(!vm_cc_super_p(cc) && !vm_cc_refinement_p(cc));
|
||||||
|
vm_cc_invalidate(cc);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
rb_vm_ccs_invalidate_and_free(struct rb_class_cc_entries *ccs)
|
||||||
|
{
|
||||||
|
RB_DEBUG_COUNTER_INC(ccs_free);
|
||||||
|
vm_ccs_invalidate(ccs);
|
||||||
|
vm_ccs_free(ccs);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
rb_vm_cc_table_delete(VALUE table, ID mid)
|
||||||
|
{
|
||||||
|
struct rb_class_cc_entries *ccs;
|
||||||
|
if (rb_managed_id_table_lookup(table, mid, (VALUE *)&ccs)) {
|
||||||
|
rb_managed_id_table_delete(table, mid);
|
||||||
|
rb_vm_ccs_invalidate_and_free(ccs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static enum rb_id_table_iterator_result
|
static enum rb_id_table_iterator_result
|
||||||
vm_ccs_dump_i(ID mid, VALUE val, void *data)
|
vm_ccs_dump_i(ID mid, VALUE val, void *data)
|
||||||
{
|
{
|
||||||
|
@ -296,7 +354,7 @@ invalidate_method_cache_in_cc_table(VALUE tbl, ID mid)
|
||||||
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_data;
|
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_data;
|
||||||
rb_yjit_cme_invalidate((rb_callable_method_entry_t *)ccs->cme);
|
rb_yjit_cme_invalidate((rb_callable_method_entry_t *)ccs->cme);
|
||||||
if (NIL_P(ccs->cme->owner)) invalidate_negative_cache(mid);
|
if (NIL_P(ccs->cme->owner)) invalidate_negative_cache(mid);
|
||||||
rb_vm_ccs_free(ccs);
|
rb_vm_ccs_invalidate_and_free(ccs);
|
||||||
rb_managed_id_table_delete(tbl, mid);
|
rb_managed_id_table_delete(tbl, mid);
|
||||||
RB_DEBUG_COUNTER_INC(cc_invalidate_leaf_ccs);
|
RB_DEBUG_COUNTER_INC(cc_invalidate_leaf_ccs);
|
||||||
}
|
}
|
||||||
|
@ -1692,8 +1750,8 @@ cached_callable_method_entry(VALUE klass, ID mid)
|
||||||
return ccs->cme;
|
return ccs->cme;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
rb_vm_ccs_free(ccs);
|
|
||||||
rb_managed_id_table_delete(cc_tbl, mid);
|
rb_managed_id_table_delete(cc_tbl, mid);
|
||||||
|
rb_vm_ccs_invalidate_and_free(ccs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue