Make RClass.cc_table a managed object

For now this doesn't change anything, but now that the table
is managed by GC, it opens the door to use RCU when in multi-ractor
mode, hence allow unsynchornized reads.
This commit is contained in:
Jean Boussier 2025-07-30 12:44:39 +02:00
parent fc5e1541e4
commit f2a7e48dea
10 changed files with 191 additions and 159 deletions

104
gc.c
View file

@ -1208,7 +1208,6 @@ classext_free(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg)
struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
rb_id_table_free(RCLASSEXT_M_TBL(ext));
rb_cc_tbl_free(RCLASSEXT_CC_TBL(ext), args->klass);
if (!RCLASSEXT_SHARED_CONST_TBL(ext) && (tbl = RCLASSEXT_CONST_TBL(ext)) != NULL) {
rb_free_const_table(tbl);
@ -1239,7 +1238,6 @@ classext_iclass_free(rb_classext_t *ext, bool is_prime, VALUE namespace, void *a
if (RCLASSEXT_CALLABLE_M_TBL(ext) != NULL) {
rb_id_table_free(RCLASSEXT_CALLABLE_M_TBL(ext));
}
rb_cc_tbl_free(RCLASSEXT_CC_TBL(ext), args->klass);
rb_class_classext_free_subclasses(ext, args->klass);
@ -2263,24 +2261,6 @@ rb_gc_after_updating_jit_code(void)
#endif
}
static enum rb_id_table_iterator_result
cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr)
{
size_t *total_size = data_ptr;
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
*total_size += sizeof(*ccs);
*total_size += sizeof(ccs->entries[0]) * ccs->capa;
return ID_TABLE_CONTINUE;
}
static size_t
cc_table_memsize(struct rb_id_table *cc_table)
{
size_t total = rb_id_table_memsize(cc_table);
rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
return total;
}
static void
classext_memsize(rb_classext_t *ext, bool prime, VALUE namespace, void *arg)
{
@ -2296,9 +2276,6 @@ classext_memsize(rb_classext_t *ext, bool prime, VALUE namespace, void *arg)
if (RCLASSEXT_CONST_TBL(ext)) {
s += rb_id_table_memsize(RCLASSEXT_CONST_TBL(ext));
}
if (RCLASSEXT_CC_TBL(ext)) {
s += cc_table_memsize(RCLASSEXT_CC_TBL(ext));
}
if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
s += (RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1) * sizeof(VALUE);
}
@ -2349,9 +2326,6 @@ rb_obj_memsize_of(VALUE obj)
size += rb_id_table_memsize(RCLASS_M_TBL(obj));
}
}
if (RCLASS_WRITABLE_CC_TBL(obj)) {
size += cc_table_memsize(RCLASS_WRITABLE_CC_TBL(obj));
}
break;
case T_STRING:
size += rb_str_memsize(obj);
@ -2836,47 +2810,6 @@ mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
}
struct mark_cc_entry_args {
rb_objspace_t *objspace;
VALUE klass;
};
static enum rb_id_table_iterator_result
mark_cc_entry_i(VALUE ccs_ptr, void *data)
{
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
VM_ASSERT(vm_ccs_p(ccs));
if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
rb_vm_ccs_free(ccs);
return ID_TABLE_DELETE;
}
else {
gc_mark_internal((VALUE)ccs->cme);
for (int i=0; i<ccs->len; i++) {
VM_ASSERT(((struct mark_cc_entry_args *)data)->klass == ccs->entries[i].cc->klass);
VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme));
gc_mark_internal((VALUE)ccs->entries[i].cc);
}
return ID_TABLE_CONTINUE;
}
}
static void
mark_cc_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl, VALUE klass)
{
struct mark_cc_entry_args args;
if (!tbl) return;
args.objspace = objspace;
args.klass = klass;
rb_id_table_foreach_values(tbl, mark_cc_entry_i, (void *)&args);
}
static enum rb_id_table_iterator_result
mark_cvc_tbl_i(VALUE cvc_entry, void *objspace)
{
@ -3114,7 +3047,6 @@ gc_mark_classext_module(rb_classext_t *ext, bool prime, VALUE namespace, void *a
{
struct gc_mark_classext_foreach_arg *foreach_arg = (struct gc_mark_classext_foreach_arg *)arg;
rb_objspace_t *objspace = foreach_arg->objspace;
VALUE obj = foreach_arg->obj;
if (RCLASSEXT_SUPER(ext)) {
gc_mark_internal(RCLASSEXT_SUPER(ext));
@ -3125,7 +3057,7 @@ gc_mark_classext_module(rb_classext_t *ext, bool prime, VALUE namespace, void *a
mark_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
}
mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
mark_cc_tbl(objspace, RCLASSEXT_CC_TBL(ext), obj);
gc_mark_internal(RCLASSEXT_CC_TBL(ext));
mark_cvc_tbl(objspace, RCLASSEXT_CVC_TBL(ext));
gc_mark_internal(RCLASSEXT_CLASSPATH(ext));
}
@ -3135,7 +3067,6 @@ gc_mark_classext_iclass(rb_classext_t *ext, bool prime, VALUE namespace, void *a
{
struct gc_mark_classext_foreach_arg *foreach_arg = (struct gc_mark_classext_foreach_arg *)arg;
rb_objspace_t *objspace = foreach_arg->objspace;
VALUE iclass = foreach_arg->obj;
if (RCLASSEXT_SUPER(ext)) {
gc_mark_internal(RCLASSEXT_SUPER(ext));
@ -3147,7 +3078,7 @@ gc_mark_classext_iclass(rb_classext_t *ext, bool prime, VALUE namespace, void *a
gc_mark_internal(RCLASSEXT_INCLUDER(ext));
}
mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
mark_cc_tbl(objspace, RCLASSEXT_CC_TBL(ext), iclass);
gc_mark_internal(RCLASSEXT_CC_TBL(ext));
}
#define TYPED_DATA_REFS_OFFSET_LIST(d) (size_t *)(uintptr_t)RTYPEDDATA_TYPE(d)->function.dmark
@ -3711,33 +3642,6 @@ update_m_tbl(void *objspace, struct rb_id_table *tbl)
}
}
static enum rb_id_table_iterator_result
update_cc_tbl_i(VALUE ccs_ptr, void *objspace)
{
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
VM_ASSERT(vm_ccs_p(ccs));
if (rb_gc_impl_object_moved_p(objspace, (VALUE)ccs->cme)) {
ccs->cme = (const rb_callable_method_entry_t *)gc_location_internal(objspace, (VALUE)ccs->cme);
}
for (int i=0; i<ccs->len; i++) {
if (rb_gc_impl_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) {
ccs->entries[i].cc = (struct rb_callcache *)gc_location_internal(objspace, (VALUE)ccs->entries[i].cc);
}
}
// do not replace
return ID_TABLE_CONTINUE;
}
static void
update_cc_tbl(void *objspace, struct rb_id_table *tbl)
{
if (!tbl) return;
rb_id_table_foreach_values(tbl, update_cc_tbl_i, objspace);
}
static enum rb_id_table_iterator_result
update_cvc_tbl_i(VALUE cvc_entry, void *objspace)
{
@ -3836,7 +3740,7 @@ update_classext(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg)
if (!RCLASSEXT_SHARED_CONST_TBL(ext)) {
update_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
}
update_cc_tbl(objspace, RCLASSEXT_CC_TBL(ext));
UPDATE_IF_MOVED(objspace, RCLASSEXT_CC_TBL(ext));
update_cvc_tbl(objspace, RCLASSEXT_CVC_TBL(ext));
update_superclasses(objspace, ext);
update_subclasses(objspace, ext);
@ -3855,7 +3759,7 @@ update_iclass_classext(rb_classext_t *ext, bool is_prime, VALUE namespace, void
}
update_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
update_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
update_cc_tbl(objspace, RCLASSEXT_CC_TBL(ext));
UPDATE_IF_MOVED(objspace, RCLASSEXT_CC_TBL(ext));
update_subclasses(objspace, ext);
update_classext_values(objspace, ext, true);

View file

@ -47,7 +47,7 @@ struct rb_id_table {
#if SIZEOF_VALUE == 8
#define ITEM_GET_KEY(tbl, i) ((tbl)->items[i].key)
#define ITEM_KEY_ISSET(tbl, i) ((tbl)->items[i].key)
#define ITEM_KEY_ISSET(tbl, i) ((tbl)->items && (tbl)->items[i].key)
#define ITEM_COLLIDED(tbl, i) ((tbl)->items[i].collision)
#define ITEM_SET_COLLIDED(tbl, i) ((tbl)->items[i].collision = 1)
static inline void
@ -298,6 +298,10 @@ rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_f
{
int i, capa = tbl->capa;
if (!tbl->items) {
return;
}
for (i=0; i<capa; i++) {
if (ITEM_KEY_ISSET(tbl, i)) {
enum rb_id_table_iterator_result ret = (*func)(tbl->items[i].val, data);
@ -345,7 +349,7 @@ managed_id_table_memsize(const void *data)
return rb_id_table_memsize(tbl) - sizeof(struct rb_id_table);
}
static const rb_data_type_t managed_id_table_type = {
const rb_data_type_t rb_managed_id_table_type = {
.wrap_struct_name = "VM/managed_id_table",
.function = {
.dmark = NULL, // Nothing to mark
@ -359,20 +363,26 @@ static inline struct rb_id_table *
managed_id_table_ptr(VALUE obj)
{
RUBY_ASSERT(RB_TYPE_P(obj, T_DATA));
RUBY_ASSERT(rb_typeddata_inherited_p(RTYPEDDATA_TYPE(obj), &managed_id_table_type));
RUBY_ASSERT(rb_typeddata_inherited_p(RTYPEDDATA_TYPE(obj), &rb_managed_id_table_type));
return RTYPEDDATA_GET_DATA(obj);
}
VALUE
rb_managed_id_table_new(size_t capa)
rb_managed_id_table_create(const rb_data_type_t *type, size_t capa)
{
struct rb_id_table *tbl;
VALUE obj = TypedData_Make_Struct(0, struct rb_id_table, &managed_id_table_type, tbl);
VALUE obj = TypedData_Make_Struct(0, struct rb_id_table, type, tbl);
rb_id_table_init(tbl, capa);
return obj;
}
VALUE
rb_managed_id_table_new(size_t capa)
{
return rb_managed_id_table_create(&rb_managed_id_table_type, capa);
}
static enum rb_id_table_iterator_result
managed_id_table_dup_i(ID id, VALUE val, void *data)
{
@ -385,7 +395,7 @@ VALUE
rb_managed_id_table_dup(VALUE old_table)
{
struct rb_id_table *new_tbl;
VALUE obj = TypedData_Make_Struct(0, struct rb_id_table, &managed_id_table_type, new_tbl);
VALUE obj = TypedData_Make_Struct(0, struct rb_id_table, &rb_managed_id_table_type, new_tbl);
struct rb_id_table *old_tbl = managed_id_table_ptr(old_table);
rb_id_table_init(new_tbl, old_tbl->num + 1);
rb_id_table_foreach(old_tbl, managed_id_table_dup_i, new_tbl);
@ -415,3 +425,15 @@ rb_managed_id_table_foreach(VALUE table, rb_id_table_foreach_func_t *func, void
{
rb_id_table_foreach(managed_id_table_ptr(table), func, data);
}
void
rb_managed_id_table_foreach_values(VALUE table, rb_id_table_foreach_values_func_t *func, void *data)
{
rb_id_table_foreach_values(managed_id_table_ptr(table), func, data);
}
int
rb_managed_id_table_delete(VALUE table, ID id)
{
return rb_id_table_delete(managed_id_table_ptr(table), id);
}

View file

@ -35,12 +35,17 @@ void rb_id_table_foreach(struct rb_id_table *tbl, rb_id_table_foreach_func_t *fu
void rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, void *data);
void rb_id_table_foreach_values_with_replace(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, rb_id_table_update_value_callback_func_t *replace, void *data);
VALUE rb_managed_id_table_create(const rb_data_type_t *type, size_t capa);
VALUE rb_managed_id_table_new(size_t capa);
VALUE rb_managed_id_table_dup(VALUE table);
int rb_managed_id_table_insert(VALUE table, ID id, VALUE val);
int rb_managed_id_table_lookup(VALUE table, ID id, VALUE *valp);
size_t rb_managed_id_table_size(VALUE table);
void rb_managed_id_table_foreach(VALUE table, rb_id_table_foreach_func_t *func, void *data);
void rb_managed_id_table_foreach_values(VALUE table, rb_id_table_foreach_values_func_t *func, void *data);
int rb_managed_id_table_delete(VALUE table, ID id);
extern const rb_data_type_t rb_managed_id_table_type;
RUBY_SYMBOL_EXPORT_BEGIN
size_t rb_id_table_size(const struct rb_id_table *tbl);

20
imemo.c
View file

@ -550,26 +550,6 @@ rb_vm_ccs_free(struct rb_class_cc_entries *ccs)
vm_ccs_free(ccs, true, Qundef);
}
static enum rb_id_table_iterator_result
cc_tbl_free_i(VALUE ccs_ptr, void *data)
{
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
VALUE klass = (VALUE)data;
VM_ASSERT(vm_ccs_p(ccs));
vm_ccs_free(ccs, false, klass);
return ID_TABLE_CONTINUE;
}
void
rb_cc_tbl_free(struct rb_id_table *cc_tbl, VALUE klass)
{
if (!cc_tbl) return;
rb_id_table_foreach_values(cc_tbl, cc_tbl_free_i, (void *)klass);
rb_id_table_free(cc_tbl);
}
static inline void
imemo_fields_free(struct rb_fields *fields)
{

View file

@ -83,7 +83,7 @@ struct rb_classext_struct {
struct rb_id_table *m_tbl;
struct rb_id_table *const_tbl;
struct rb_id_table *callable_m_tbl;
struct rb_id_table *cc_tbl; /* ID -> [[ci1, cc1], [ci2, cc2] ...] */
VALUE cc_tbl; /* { ID => { cme, [cc1, cc2, ...] }, ... } */
struct rb_id_table *cvc_tbl;
VALUE *superclasses;
/**
@ -262,7 +262,7 @@ static inline void RCLASS_WRITE_SUPER(VALUE klass, VALUE super);
static inline void RCLASS_SET_CONST_TBL(VALUE klass, struct rb_id_table *table, bool shared);
static inline void RCLASS_WRITE_CONST_TBL(VALUE klass, struct rb_id_table *table, bool shared);
static inline void RCLASS_WRITE_CALLABLE_M_TBL(VALUE klass, struct rb_id_table *table);
static inline void RCLASS_WRITE_CC_TBL(VALUE klass, struct rb_id_table *table);
static inline void RCLASS_WRITE_CC_TBL(VALUE klass, VALUE table);
static inline void RCLASS_SET_CVC_TBL(VALUE klass, struct rb_id_table *table);
static inline void RCLASS_WRITE_CVC_TBL(VALUE klass, struct rb_id_table *table);
@ -628,9 +628,9 @@ RCLASS_WRITE_CALLABLE_M_TBL(VALUE klass, struct rb_id_table *table)
}
static inline void
RCLASS_WRITE_CC_TBL(VALUE klass, struct rb_id_table *table)
RCLASS_WRITE_CC_TBL(VALUE klass, VALUE table)
{
RCLASSEXT_CC_TBL(RCLASS_EXT_WRITABLE(klass)) = table;
RB_OBJ_WRITE(klass, &RCLASSEXT_CC_TBL(RCLASS_EXT_WRITABLE(klass)), table);
}
static inline void

View file

@ -148,7 +148,6 @@ static inline void MEMO_V2_SET(struct MEMO *m, VALUE v);
size_t rb_imemo_memsize(VALUE obj);
void rb_imemo_mark_and_move(VALUE obj, bool reference_updating);
void rb_cc_tbl_free(struct rb_id_table *cc_tbl, VALUE klass);
void rb_imemo_free(VALUE obj);
RUBY_SYMBOL_EXPORT_BEGIN

View file

@ -259,6 +259,6 @@ void rb_vm_delete_cc_refinement(const struct rb_callcache *cc);
void rb_clear_method_cache(VALUE klass_or_module, ID mid);
void rb_clear_all_refinement_method_cache(void);
void rb_invalidate_method_caches(struct rb_id_table *cm_tbl, struct rb_id_table *cc_tbl);
void rb_invalidate_method_caches(struct rb_id_table *cm_tbl, VALUE cc_tbl);
#endif /* RUBY_METHOD_H */

View file

@ -329,6 +329,8 @@ cc_check_class(VALUE klass)
return klass;
}
VALUE rb_vm_cc_table_create(size_t capa);
static inline const struct rb_callcache *
vm_cc_new(VALUE klass,
const struct rb_callable_method_entry_struct *cme,

View file

@ -1979,7 +1979,7 @@ static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg
static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
static struct rb_class_cc_entries *
vm_ccs_create(VALUE klass, struct rb_id_table *cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
vm_ccs_create(VALUE klass, VALUE cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
{
struct rb_class_cc_entries *ccs = ALLOC(struct rb_class_cc_entries);
#if VM_CHECK_MODE > 0
@ -1991,13 +1991,13 @@ vm_ccs_create(VALUE klass, struct rb_id_table *cc_tbl, ID mid, const rb_callable
METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
ccs->entries = NULL;
rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
RB_OBJ_WRITTEN(klass, Qundef, cme);
rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
RB_OBJ_WRITTEN(cc_tbl, Qundef, cme);
return ccs;
}
static void
vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
vm_ccs_push(VALUE cc_tbl, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
{
if (! vm_cc_markable(cc)) {
return;
@ -2018,7 +2018,7 @@ vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callin
const int pos = ccs->len++;
ccs->entries[pos].argc = vm_ci_argc(ci);
ccs->entries[pos].flag = vm_ci_flag(ci);
RB_OBJ_WRITE(klass, &ccs->entries[pos].cc, cc);
RB_OBJ_WRITE(cc_tbl, &ccs->entries[pos].cc, cc);
if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
// for tuning
@ -2064,20 +2064,20 @@ static const struct rb_callcache *
vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
{
const ID mid = vm_ci_mid(ci);
struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
struct rb_class_cc_entries *ccs = NULL;
VALUE ccs_data;
if (cc_tbl) {
// CCS data is keyed on method id, so we don't need the method id
// for doing comparisons in the `for` loop below.
if (rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
if (rb_managed_id_table_lookup(cc_tbl, mid, &ccs_data)) {
ccs = (struct rb_class_cc_entries *)ccs_data;
const int ccs_len = ccs->len;
if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
rb_managed_id_table_delete(cc_tbl, mid);
rb_vm_ccs_free(ccs);
rb_id_table_delete(cc_tbl, mid);
ccs = NULL;
}
else {
@ -2110,7 +2110,7 @@ vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
}
}
else {
cc_tbl = rb_id_table_create(2);
cc_tbl = rb_vm_cc_table_create(2);
RCLASS_WRITE_CC_TBL(klass, cc_tbl);
}
@ -2141,9 +2141,9 @@ vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
if (ccs == NULL) {
VM_ASSERT(cc_tbl != NULL);
VM_ASSERT(cc_tbl);
if (LIKELY(rb_id_table_lookup(cc_tbl, mid, &ccs_data))) {
if (LIKELY(rb_managed_id_table_lookup(cc_tbl, mid, &ccs_data))) {
// rb_callable_method_entry() prepares ccs.
ccs = (struct rb_class_cc_entries *)ccs_data;
}
@ -2156,7 +2156,7 @@ vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
cme = rb_check_overloaded_cme(cme, ci);
const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
vm_ccs_push(klass, ccs, ci, cc);
vm_ccs_push(cc_tbl, ccs, ci, cc);
VM_ASSERT(vm_cc_cme(cc) != NULL);
VM_ASSERT(cme->called_id == mid);

View file

@ -22,6 +22,126 @@ static inline rb_method_entry_t *lookup_method_table(VALUE klass, ID id);
#define ruby_running (GET_VM()->running)
/* int ruby_running = 0; */
static void
vm_ccs_free(struct rb_class_cc_entries *ccs)
{
if (ccs->entries) {
ruby_xfree(ccs->entries);
}
ruby_xfree(ccs);
}
static enum rb_id_table_iterator_result
mark_cc_entry_i(VALUE ccs_ptr, void *data)
{
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
VM_ASSERT(vm_ccs_p(ccs));
if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
vm_ccs_free(ccs);
return ID_TABLE_DELETE;
}
else {
rb_gc_mark_movable((VALUE)ccs->cme);
for (int i=0; i<ccs->len; i++) {
VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme));
rb_gc_mark_movable((VALUE)ccs->entries[i].cc);
}
return ID_TABLE_CONTINUE;
}
}
static void
vm_cc_table_mark(void *data)
{
struct rb_id_table *tbl = (struct rb_id_table *)data;
if (tbl) {
rb_id_table_foreach_values(tbl, mark_cc_entry_i, NULL);
}
}
static enum rb_id_table_iterator_result
cc_table_free_i(VALUE ccs_ptr, void *data)
{
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
VM_ASSERT(vm_ccs_p(ccs));
vm_ccs_free(ccs);
return ID_TABLE_CONTINUE;
}
static void
vm_cc_table_free(void *data)
{
struct rb_id_table *tbl = (struct rb_id_table *)data;
rb_id_table_foreach_values(tbl, cc_table_free_i, NULL);
rb_managed_id_table_type.function.dfree(data);
}
static enum rb_id_table_iterator_result
cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr)
{
size_t *total_size = data_ptr;
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
*total_size += sizeof(*ccs);
*total_size += sizeof(ccs->entries[0]) * ccs->capa;
return ID_TABLE_CONTINUE;
}
static size_t
vm_cc_table_memsize(const void *data)
{
size_t memsize = rb_managed_id_table_type.function.dsize(data);
struct rb_id_table *tbl = (struct rb_id_table *)data;
rb_id_table_foreach_values(tbl, cc_table_memsize_i, &memsize);
return memsize;
}
static enum rb_id_table_iterator_result
compact_cc_entry_i(VALUE ccs_ptr, void *data)
{
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
ccs->cme = (const struct rb_callable_method_entry_struct *)rb_gc_location((VALUE)ccs->cme);
VM_ASSERT(vm_ccs_p(ccs));
for (int i=0; i<ccs->len; i++) {
ccs->entries[i].cc = (const struct rb_callcache *)rb_gc_location((VALUE)ccs->entries[i].cc);
}
return ID_TABLE_CONTINUE;
}
static void
vm_cc_table_compact(void *data)
{
struct rb_id_table *tbl = (struct rb_id_table *)data;
rb_id_table_foreach_values(tbl, compact_cc_entry_i, NULL);
}
static const rb_data_type_t cc_table_type = {
.wrap_struct_name = "VM/cc_table",
.function = {
.dmark = vm_cc_table_mark,
.dfree = vm_cc_table_free,
.dsize = vm_cc_table_memsize,
.dcompact = vm_cc_table_compact,
},
.parent = &rb_managed_id_table_type,
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE,
};
VALUE
rb_vm_cc_table_create(size_t capa)
{
return rb_managed_id_table_create(&cc_table_type, capa);
}
static enum rb_id_table_iterator_result
vm_ccs_dump_i(ID mid, VALUE val, void *data)
{
@ -39,18 +159,18 @@ vm_ccs_dump_i(ID mid, VALUE val, void *data)
static void
vm_ccs_dump(VALUE klass, ID target_mid)
{
struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
if (cc_tbl) {
VALUE ccs;
if (target_mid) {
if (rb_id_table_lookup(cc_tbl, target_mid, &ccs)) {
if (rb_managed_id_table_lookup(cc_tbl, target_mid, &ccs)) {
fprintf(stderr, " [CCTB] %p\n", (void *)cc_tbl);
vm_ccs_dump_i(target_mid, ccs, NULL);
}
}
else {
fprintf(stderr, " [CCTB] %p\n", (void *)cc_tbl);
rb_id_table_foreach(cc_tbl, vm_ccs_dump_i, (void *)target_mid);
rb_managed_id_table_foreach(cc_tbl, vm_ccs_dump_i, (void *)target_mid);
}
}
}
@ -169,15 +289,15 @@ static const rb_callable_method_entry_t *complemented_callable_method_entry(VALU
static const rb_callable_method_entry_t *lookup_overloaded_cme(const rb_callable_method_entry_t *cme);
static void
invalidate_method_cache_in_cc_table(struct rb_id_table *tbl, ID mid)
invalidate_method_cache_in_cc_table(VALUE tbl, ID mid)
{
VALUE ccs_data;
if (tbl && rb_id_table_lookup(tbl, mid, &ccs_data)) {
if (tbl && rb_managed_id_table_lookup(tbl, mid, &ccs_data)) {
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_data;
rb_yjit_cme_invalidate((rb_callable_method_entry_t *)ccs->cme);
if (NIL_P(ccs->cme->owner)) invalidate_negative_cache(mid);
rb_vm_ccs_free(ccs);
rb_id_table_delete(tbl, mid);
rb_managed_id_table_delete(tbl, mid);
RB_DEBUG_COUNTER_INC(cc_invalidate_leaf_ccs);
}
}
@ -253,7 +373,7 @@ clear_method_cache_by_id_in_class(VALUE klass, ID mid)
// check only current class
// invalidate CCs
struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
invalidate_method_cache_in_cc_table(cc_tbl, mid);
if (RCLASS_CC_TBL_NOT_PRIME_P(klass, cc_tbl)) {
invalidate_method_cache_in_cc_table(RCLASS_PRIME_CC_TBL(klass), mid);
@ -385,13 +505,13 @@ invalidate_ccs_in_iclass_cc_tbl(VALUE value, void *data)
}
void
rb_invalidate_method_caches(struct rb_id_table *cm_tbl, struct rb_id_table *cc_tbl)
rb_invalidate_method_caches(struct rb_id_table *cm_tbl, VALUE cc_tbl)
{
if (cm_tbl) {
rb_id_table_foreach_values(cm_tbl, invalidate_method_entry_in_iclass_callable_m_tbl, NULL);
}
if (cc_tbl) {
rb_id_table_foreach_values(cc_tbl, invalidate_ccs_in_iclass_cc_tbl, NULL);
rb_managed_id_table_foreach_values(cc_tbl, invalidate_ccs_in_iclass_cc_tbl, NULL);
}
}
@ -1559,10 +1679,10 @@ cached_callable_method_entry(VALUE klass, ID mid)
{
ASSERT_vm_locking();
struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
VALUE ccs_data;
if (cc_tbl && rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
if (cc_tbl && rb_managed_id_table_lookup(cc_tbl, mid, &ccs_data)) {
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_data;
VM_ASSERT(vm_ccs_p(ccs));
@ -1573,7 +1693,7 @@ cached_callable_method_entry(VALUE klass, ID mid)
}
else {
rb_vm_ccs_free(ccs);
rb_id_table_delete(cc_tbl, mid);
rb_managed_id_table_delete(cc_tbl, mid);
}
}
@ -1587,15 +1707,15 @@ cache_callable_method_entry(VALUE klass, ID mid, const rb_callable_method_entry_
ASSERT_vm_locking();
VM_ASSERT(cme != NULL);
struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
VALUE ccs_data;
if (!cc_tbl) {
cc_tbl = rb_id_table_create(2);
cc_tbl = rb_vm_cc_table_create(2);
RCLASS_WRITE_CC_TBL(klass, cc_tbl);
}
if (rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
if (rb_managed_id_table_lookup(cc_tbl, mid, &ccs_data)) {
#if VM_CHECK_MODE > 0
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_data;
VM_ASSERT(ccs->cme == cme);