Cleanup M_TBL workarounds and comments

Previously we had an assertion that the method table was only set on
young objects, and a comment stating that was how it needed to be used.
I think that confused the complexity of the write barriers that may be
needed here.

* Setting an empty M_TBL never needs a write barrier
* T_CLASS and T_MODULE should always fire a write barrier to newly added
  methods
* T_ICLASS only needs a write barrier to methods when
  RCLASSEXT_ICLASS_IS_ORIGIN(x) && !RCLASSEXT_ICLASS_ORIGIN_SHARED_MTBL(x)

We shouldn't assume that the object being young is sufficient, because
we also need write barriers for incremental marking and it's unreliable.
This commit is contained in:
John Hawthorn 2025-07-23 12:12:58 -07:00
parent d67eb07f75
commit 9256442615
2 changed files with 7 additions and 25 deletions

View file

@ -259,9 +259,6 @@ static inline void RCLASSEXT_SET_INCLUDER(rb_classext_t *ext, VALUE klass, VALUE
static inline void RCLASS_SET_SUPER(VALUE klass, VALUE super);
static inline void RCLASS_WRITE_SUPER(VALUE klass, VALUE super);
// TODO: rename RCLASS_SET_M_TBL_WORKAROUND (and _WRITE_) to RCLASS_SET_M_TBL with write barrier
static inline void RCLASS_SET_M_TBL_WORKAROUND(VALUE klass, struct rb_id_table *table, bool check_promoted);
static inline void RCLASS_WRITE_M_TBL_WORKAROUND(VALUE klass, struct rb_id_table *table, bool check_promoted);
static inline void RCLASS_SET_CONST_TBL(VALUE klass, struct rb_id_table *table, bool shared);
static inline void RCLASS_WRITE_CONST_TBL(VALUE klass, struct rb_id_table *table, bool shared);
static inline void RCLASS_WRITE_CALLABLE_M_TBL(VALUE klass, struct rb_id_table *table);
@ -594,25 +591,15 @@ RCLASS_FIELDS_COUNT(VALUE obj)
return 0;
}
#define RCLASS_SET_M_TBL_EVEN_WHEN_PROMOTED(klass, table) RCLASS_SET_M_TBL_WORKAROUND(klass, table, false)
#define RCLASS_SET_M_TBL(klass, table) RCLASS_SET_M_TBL_WORKAROUND(klass, table, true)
static inline void
RCLASS_SET_M_TBL_WORKAROUND(VALUE klass, struct rb_id_table *table, bool check_promoted)
RCLASS_SET_M_TBL(VALUE klass, struct rb_id_table *table)
{
RUBY_ASSERT(!check_promoted || !RB_OBJ_PROMOTED(klass));
RCLASSEXT_M_TBL(RCLASS_EXT_PRIME(klass)) = table;
}
#define RCLASS_WRITE_M_TBL_EVEN_WHEN_PROMOTED(klass, table) RCLASS_WRITE_M_TBL_WORKAROUND(klass, table, false)
#define RCLASS_WRITE_M_TBL(klass, table) RCLASS_WRITE_M_TBL_WORKAROUND(klass, table, true)
static inline void
RCLASS_WRITE_M_TBL_WORKAROUND(VALUE klass, struct rb_id_table *table, bool check_promoted)
RCLASS_WRITE_M_TBL(VALUE klass, struct rb_id_table *table)
{
RUBY_ASSERT(!check_promoted || !RB_OBJ_PROMOTED(klass));
// TODO: add write barrier here to guard assigning m_tbl
// see commit 28a6e4ea9d9379a654a8f7c4b37fa33aa3ccd0b7
RCLASSEXT_M_TBL(RCLASS_EXT_WRITABLE(klass)) = table;
}