mirror of
https://github.com/ruby/ruby.git
synced 2025-08-15 05:29:10 +02:00
Cleanup M_TBL workarounds and comments
Previously we had an assertion that the method table was only set on young objects, and a comment stating that was how it needed to be used. I think that confused the complexity of the write barriers that may be needed here. * Setting an empty M_TBL never needs a write barrier * T_CLASS and T_MODULE should always fire a write barrier to newly added methods * T_ICLASS only needs a write barrier to methods when RCLASSEXT_ICLASS_IS_ORIGIN(x) && !RCLASSEXT_ICLASS_ORIGIN_SHARED_MTBL(x) We shouldn't assume that the object being young is sufficient, because we also need write barriers for incremental marking and it's unreliable.
This commit is contained in:
parent
d67eb07f75
commit
9256442615
2 changed files with 7 additions and 25 deletions
15
class.c
15
class.c
|
@ -734,13 +734,13 @@ static void
|
||||||
class_initialize_method_table(VALUE c)
|
class_initialize_method_table(VALUE c)
|
||||||
{
|
{
|
||||||
// initialize the prime classext m_tbl
|
// initialize the prime classext m_tbl
|
||||||
RCLASS_SET_M_TBL_EVEN_WHEN_PROMOTED(c, rb_id_table_create(0));
|
RCLASS_SET_M_TBL(c, rb_id_table_create(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
class_clear_method_table(VALUE c)
|
class_clear_method_table(VALUE c)
|
||||||
{
|
{
|
||||||
RCLASS_WRITE_M_TBL_EVEN_WHEN_PROMOTED(c, rb_id_table_create(0));
|
RCLASS_WRITE_M_TBL(c, rb_id_table_create(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
static VALUE
|
static VALUE
|
||||||
|
@ -978,7 +978,7 @@ copy_tables(VALUE clone, VALUE orig)
|
||||||
RCLASS_WRITE_CVC_TBL(clone, rb_cvc_tbl_dup);
|
RCLASS_WRITE_CVC_TBL(clone, rb_cvc_tbl_dup);
|
||||||
}
|
}
|
||||||
rb_id_table_free(RCLASS_M_TBL(clone));
|
rb_id_table_free(RCLASS_M_TBL(clone));
|
||||||
RCLASS_WRITE_M_TBL_EVEN_WHEN_PROMOTED(clone, 0);
|
RCLASS_WRITE_M_TBL(clone, 0);
|
||||||
if (!RB_TYPE_P(clone, T_ICLASS)) {
|
if (!RB_TYPE_P(clone, T_ICLASS)) {
|
||||||
rb_fields_tbl_copy(clone, orig);
|
rb_fields_tbl_copy(clone, orig);
|
||||||
}
|
}
|
||||||
|
@ -1053,9 +1053,7 @@ rb_mod_init_copy(VALUE clone, VALUE orig)
|
||||||
struct clone_method_arg arg;
|
struct clone_method_arg arg;
|
||||||
arg.old_klass = orig;
|
arg.old_klass = orig;
|
||||||
arg.new_klass = clone;
|
arg.new_klass = clone;
|
||||||
// TODO: use class_initialize_method_table() instead of RCLASS_SET_M_TBL_*
|
class_initialize_method_table(clone);
|
||||||
// after RCLASS_SET_M_TBL is protected by write barrier
|
|
||||||
RCLASS_SET_M_TBL_EVEN_WHEN_PROMOTED(clone, rb_id_table_create(0));
|
|
||||||
rb_id_table_foreach(RCLASS_M_TBL(orig), clone_method_i, &arg);
|
rb_id_table_foreach(RCLASS_M_TBL(orig), clone_method_i, &arg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1081,9 +1079,6 @@ rb_mod_init_copy(VALUE clone, VALUE orig)
|
||||||
rb_bug("non iclass between module/class and origin");
|
rb_bug("non iclass between module/class and origin");
|
||||||
}
|
}
|
||||||
clone_p = class_alloc(T_ICLASS, METACLASS_OF(p));
|
clone_p = class_alloc(T_ICLASS, METACLASS_OF(p));
|
||||||
/* We should set the m_tbl right after allocation before anything
|
|
||||||
* that can trigger GC to avoid clone_p from becoming old and
|
|
||||||
* needing to fire write barriers. */
|
|
||||||
RCLASS_SET_M_TBL(clone_p, RCLASS_M_TBL(p));
|
RCLASS_SET_M_TBL(clone_p, RCLASS_M_TBL(p));
|
||||||
rb_class_set_super(prev_clone_p, clone_p);
|
rb_class_set_super(prev_clone_p, clone_p);
|
||||||
prev_clone_p = clone_p;
|
prev_clone_p = clone_p;
|
||||||
|
@ -1973,7 +1968,7 @@ rb_prepend_module(VALUE klass, VALUE module)
|
||||||
if (klass_had_no_origin && klass_origin_m_tbl == RCLASS_M_TBL(subclass)) {
|
if (klass_had_no_origin && klass_origin_m_tbl == RCLASS_M_TBL(subclass)) {
|
||||||
// backfill an origin iclass to handle refinements and future prepends
|
// backfill an origin iclass to handle refinements and future prepends
|
||||||
rb_id_table_foreach(RCLASS_M_TBL(subclass), clear_module_cache_i, (void *)subclass);
|
rb_id_table_foreach(RCLASS_M_TBL(subclass), clear_module_cache_i, (void *)subclass);
|
||||||
RCLASS_WRITE_M_TBL_EVEN_WHEN_PROMOTED(subclass, klass_m_tbl);
|
RCLASS_WRITE_M_TBL(subclass, klass_m_tbl);
|
||||||
VALUE origin = rb_include_class_new(klass_origin, RCLASS_SUPER(subclass));
|
VALUE origin = rb_include_class_new(klass_origin, RCLASS_SUPER(subclass));
|
||||||
rb_class_set_super(subclass, origin);
|
rb_class_set_super(subclass, origin);
|
||||||
RCLASS_SET_INCLUDER(origin, RCLASS_INCLUDER(subclass));
|
RCLASS_SET_INCLUDER(origin, RCLASS_INCLUDER(subclass));
|
||||||
|
|
|
@ -259,9 +259,6 @@ static inline void RCLASSEXT_SET_INCLUDER(rb_classext_t *ext, VALUE klass, VALUE
|
||||||
|
|
||||||
static inline void RCLASS_SET_SUPER(VALUE klass, VALUE super);
|
static inline void RCLASS_SET_SUPER(VALUE klass, VALUE super);
|
||||||
static inline void RCLASS_WRITE_SUPER(VALUE klass, VALUE super);
|
static inline void RCLASS_WRITE_SUPER(VALUE klass, VALUE super);
|
||||||
// TODO: rename RCLASS_SET_M_TBL_WORKAROUND (and _WRITE_) to RCLASS_SET_M_TBL with write barrier
|
|
||||||
static inline void RCLASS_SET_M_TBL_WORKAROUND(VALUE klass, struct rb_id_table *table, bool check_promoted);
|
|
||||||
static inline void RCLASS_WRITE_M_TBL_WORKAROUND(VALUE klass, struct rb_id_table *table, bool check_promoted);
|
|
||||||
static inline void RCLASS_SET_CONST_TBL(VALUE klass, struct rb_id_table *table, bool shared);
|
static inline void RCLASS_SET_CONST_TBL(VALUE klass, struct rb_id_table *table, bool shared);
|
||||||
static inline void RCLASS_WRITE_CONST_TBL(VALUE klass, struct rb_id_table *table, bool shared);
|
static inline void RCLASS_WRITE_CONST_TBL(VALUE klass, struct rb_id_table *table, bool shared);
|
||||||
static inline void RCLASS_WRITE_CALLABLE_M_TBL(VALUE klass, struct rb_id_table *table);
|
static inline void RCLASS_WRITE_CALLABLE_M_TBL(VALUE klass, struct rb_id_table *table);
|
||||||
|
@ -594,25 +591,15 @@ RCLASS_FIELDS_COUNT(VALUE obj)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define RCLASS_SET_M_TBL_EVEN_WHEN_PROMOTED(klass, table) RCLASS_SET_M_TBL_WORKAROUND(klass, table, false)
|
|
||||||
#define RCLASS_SET_M_TBL(klass, table) RCLASS_SET_M_TBL_WORKAROUND(klass, table, true)
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
RCLASS_SET_M_TBL_WORKAROUND(VALUE klass, struct rb_id_table *table, bool check_promoted)
|
RCLASS_SET_M_TBL(VALUE klass, struct rb_id_table *table)
|
||||||
{
|
{
|
||||||
RUBY_ASSERT(!check_promoted || !RB_OBJ_PROMOTED(klass));
|
|
||||||
RCLASSEXT_M_TBL(RCLASS_EXT_PRIME(klass)) = table;
|
RCLASSEXT_M_TBL(RCLASS_EXT_PRIME(klass)) = table;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define RCLASS_WRITE_M_TBL_EVEN_WHEN_PROMOTED(klass, table) RCLASS_WRITE_M_TBL_WORKAROUND(klass, table, false)
|
|
||||||
#define RCLASS_WRITE_M_TBL(klass, table) RCLASS_WRITE_M_TBL_WORKAROUND(klass, table, true)
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
RCLASS_WRITE_M_TBL_WORKAROUND(VALUE klass, struct rb_id_table *table, bool check_promoted)
|
RCLASS_WRITE_M_TBL(VALUE klass, struct rb_id_table *table)
|
||||||
{
|
{
|
||||||
RUBY_ASSERT(!check_promoted || !RB_OBJ_PROMOTED(klass));
|
|
||||||
// TODO: add write barrier here to guard assigning m_tbl
|
|
||||||
// see commit 28a6e4ea9d9379a654a8f7c4b37fa33aa3ccd0b7
|
|
||||||
RCLASSEXT_M_TBL(RCLASS_EXT_WRITABLE(klass)) = table;
|
RCLASSEXT_M_TBL(RCLASS_EXT_WRITABLE(klass)) = table;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue