Convert rb_class_cc_entries.entries in a flexible array member

`rb_class_cc_entries` is little more than a `len` and `capa`.
Hence embedding the entries doesn't cost much extra copying and
saves a bit of memory and some pointer chasing.

Co-Authored-By: Étienne Barrié <etienne.barrie@gmail.com>
This commit is contained in:
Jean Boussier 2025-08-01 12:21:04 +02:00
parent c6dd3cefa1
commit bc789ca804
3 changed files with 31 additions and 36 deletions

View file

@ -585,9 +585,15 @@ struct rb_class_cc_entries {
unsigned int argc;
unsigned int flag;
const struct rb_callcache *cc;
} *entries;
} entries[FLEX_ARY_LEN];
};
static inline size_t
vm_ccs_alloc_size(size_t capa)
{
return offsetof(struct rb_class_cc_entries, entries) + (sizeof(struct rb_class_cc_entries_entry) * capa);
}
#if VM_CHECK_MODE > 0
const rb_callable_method_entry_t *rb_vm_lookup_overloaded_cme(const rb_callable_method_entry_t *cme);

View file

@ -1981,15 +1981,15 @@ static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
static struct rb_class_cc_entries *
vm_ccs_create(VALUE klass, VALUE cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
{
struct rb_class_cc_entries *ccs = ALLOC(struct rb_class_cc_entries);
int initial_capa = 2;
struct rb_class_cc_entries *ccs = ruby_xmalloc(vm_ccs_alloc_size(initial_capa));
#if VM_CHECK_MODE > 0
ccs->debug_sig = ~(VALUE)ccs;
#endif
ccs->capa = 0;
ccs->capa = initial_capa;
ccs->len = 0;
ccs->cme = cme;
METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
ccs->entries = NULL;
rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
RB_OBJ_WRITTEN(cc_tbl, Qundef, cme);
@ -1997,21 +1997,21 @@ vm_ccs_create(VALUE klass, VALUE cc_tbl, ID mid, const rb_callable_method_entry_
}
static void
vm_ccs_push(VALUE cc_tbl, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
vm_ccs_push(VALUE cc_tbl, ID mid, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
{
if (! vm_cc_markable(cc)) {
return;
}
if (UNLIKELY(ccs->len == ccs->capa)) {
if (ccs->capa == 0) {
ccs->capa = 1;
ccs->entries = ALLOC_N(struct rb_class_cc_entries_entry, ccs->capa);
}
else {
RUBY_ASSERT(ccs->capa > 0);
ccs->capa *= 2;
REALLOC_N(ccs->entries, struct rb_class_cc_entries_entry, ccs->capa);
}
ccs = ruby_xrealloc(ccs, vm_ccs_alloc_size(ccs->capa));
#if VM_CHECK_MODE > 0
ccs->debug_sig = ~(VALUE)ccs;
#endif
// GC?
rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs);
}
VM_ASSERT(ccs->len < ccs->capa);
@ -2143,7 +2143,7 @@ vm_populate_cc(VALUE klass, const struct rb_callinfo * const ci, ID mid)
cme = rb_check_overloaded_cme(cme, ci);
const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
vm_ccs_push(cc_tbl, ccs, ci, cc);
vm_ccs_push(cc_tbl, mid, ccs, ci, cc);
VM_ASSERT(vm_cc_cme(cc) != NULL);
VM_ASSERT(cme->called_id == mid);

View file

@ -22,15 +22,6 @@ static inline rb_method_entry_t *lookup_method_table(VALUE klass, ID id);
#define ruby_running (GET_VM()->running)
/* int ruby_running = 0; */
static void
vm_ccs_free(struct rb_class_cc_entries *ccs)
{
if (ccs->entries) {
ruby_xfree(ccs->entries);
}
ruby_xfree(ccs);
}
static enum rb_id_table_iterator_result
mark_cc_entry_i(VALUE ccs_ptr, void *data)
{
@ -39,7 +30,7 @@ mark_cc_entry_i(VALUE ccs_ptr, void *data)
VM_ASSERT(vm_ccs_p(ccs));
if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
vm_ccs_free(ccs);
ruby_xfree(ccs);
return ID_TABLE_DELETE;
}
else {
@ -69,7 +60,7 @@ cc_table_free_i(VALUE ccs_ptr, void *data)
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
VM_ASSERT(vm_ccs_p(ccs));
vm_ccs_free(ccs);
ruby_xfree(ccs);
return ID_TABLE_CONTINUE;
}
@ -146,13 +137,13 @@ static enum rb_id_table_iterator_result
vm_cc_table_dup_i(ID key, VALUE old_ccs_ptr, void *data)
{
struct rb_class_cc_entries *old_ccs = (struct rb_class_cc_entries *)old_ccs_ptr;
struct rb_class_cc_entries *new_ccs = ALLOC(struct rb_class_cc_entries);
MEMCPY(new_ccs, old_ccs, struct rb_class_cc_entries, 1);
size_t memsize = vm_ccs_alloc_size(old_ccs->capa);
struct rb_class_cc_entries *new_ccs = ruby_xmalloc(memsize);
memcpy(new_ccs, old_ccs, memsize);
#if VM_CHECK_MODE > 0
new_ccs->debug_sig = ~(VALUE)new_ccs;
#endif
new_ccs->entries = ALLOC_N(struct rb_class_cc_entries_entry, new_ccs->capa);
MEMCPY(new_ccs->entries, old_ccs->entries, struct rb_class_cc_entries_entry, new_ccs->capa);
VALUE new_table = (VALUE)data;
rb_managed_id_table_insert(new_table, key, (VALUE)new_ccs);
@ -173,21 +164,19 @@ rb_vm_cc_table_dup(VALUE old_table)
static void
vm_ccs_invalidate(struct rb_class_cc_entries *ccs)
{
if (ccs->entries) {
for (int i=0; i<ccs->len; i++) {
const struct rb_callcache *cc = ccs->entries[i].cc;
VM_ASSERT(!vm_cc_super_p(cc) && !vm_cc_refinement_p(cc));
vm_cc_invalidate(cc);
}
}
}
void
rb_vm_ccs_invalidate_and_free(struct rb_class_cc_entries *ccs)
{
RB_DEBUG_COUNTER_INC(ccs_free);
vm_ccs_invalidate(ccs);
vm_ccs_free(ccs);
ruby_xfree(ccs);
}
void