merge revision(s) 0130e17a410d60a10e7041ce98748b8de6946971,32b7dcfb56a417c1d1c354102351fc1825d653bf,79cc566ab4cdf75f125ecf413a27d353a9756c08: [Backport #18394]

Always enabled read barrier even on GC.compact

	Some objects can survive the GC before compaction, but get collected in
	the second compaction.  This means we could have objects reference
	T_MOVED during "free" in the second, compacting GC.  If that is the
	case, we need to invalidate those "moved" addresses.  Invalidation is
	done via read barrier, so we need to make sure the read barrier is
	active even during `GC.compact`.

	This also means we don't actually need to do one GC before compaction,
	we can just do the compaction and GC in one step.
	---
	 gc.c | 20 +++-----------------
	 1 file changed, 3 insertions(+), 17 deletions(-)

	Fix more assumptions about the read barrier

	This is a continuation of 0130e17a41.  We
	need to always use the read barrier
	---
	 gc.c | 10 ----------
	 1 file changed, 10 deletions(-)

	Make during_compacting flag in GC one bit

	Commit c32218de1b turned during_compacting
	flag to 2 bits to support the case when there is no write barrier. But
	commit 32b7dcfb56 changed compaction to
	always enable the write barrier. This commit cleans up some of the
	leftover code.
	---
	 gc.c | 4 ++--
	 1 file changed, 2 insertions(+), 2 deletions(-)
This commit is contained in:
nagachika 2022-03-12 15:58:42 +09:00
parent e413a8ff97
commit 3ce60f44b8
2 changed files with 6 additions and 30 deletions

30
gc.c
View file

@ -679,7 +679,7 @@ typedef struct rb_objspace {
unsigned int dont_gc : 1;
unsigned int dont_incremental : 1;
unsigned int during_gc : 1;
unsigned int during_compacting : 2;
unsigned int during_compacting : 1;
unsigned int gc_stressful: 1;
unsigned int has_hook: 1;
unsigned int during_minor_gc : 1;
@ -4479,11 +4479,6 @@ static VALUE gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free);
static void
lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
{
/* If this is an explicit compaction (GC.compact), we don't need a read
* barrier, so just return early. */
if (objspace->flags.during_compacting >> 1) {
return;
}
#if defined(_WIN32)
DWORD old_protect;
@ -4500,11 +4495,6 @@ lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
static void
unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
{
/* If this is an explicit compaction (GC.compact), we don't need a read
* barrier, so just return early. */
if (objspace->flags.during_compacting >> 1) {
return;
}
#if defined(_WIN32)
DWORD old_protect;
@ -4746,12 +4736,8 @@ gc_compact_finish(rb_objspace_t *objspace, rb_heap_t *heap)
{
GC_ASSERT(heap->sweeping_page == heap->compact_cursor);
/* If this is an explicit compaction (GC.compact), no read barrier was set
* so we don't need to unprotect pages or uninstall the SEGV handler */
if (!(objspace->flags.during_compacting >> 1)) {
gc_unprotect_pages(objspace, heap);
uninstall_handlers();
}
/* The mutator is allowed to run during incremental sweeping. T_MOVED
* objects can get pushed on the stack and when the compaction process
@ -5255,12 +5241,6 @@ gc_compact_start(rb_objspace_t *objspace, rb_heap_t *heap)
memset(objspace->rcompactor.considered_count_table, 0, T_MASK * sizeof(size_t));
memset(objspace->rcompactor.moved_count_table, 0, T_MASK * sizeof(size_t));
/* If this is an explicit compaction (GC.compact), we don't need a read
* barrier, so just return early. */
if (objspace->flags.during_compacting >> 1) {
return;
}
/* Set up read barrier for pages containing MOVED objects */
install_handlers();
}
@ -8236,7 +8216,7 @@ gc_start(rb_objspace_t *objspace, int reason)
objspace->flags.immediate_sweep = !!((unsigned)reason & GPR_FLAG_IMMEDIATE_SWEEP);
/* Explicitly enable compaction (GC.compact) */
objspace->flags.during_compacting = (!!((unsigned)reason & GPR_FLAG_COMPACT) << 1);
objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
if (!heap_allocated_pages) return FALSE; /* heap is not ready */
if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
@ -9472,11 +9452,7 @@ heap_check_moved_i(void *vstart, void *vend, size_t stride, void *data)
static VALUE
gc_compact(rb_execution_context_t *ec, VALUE self)
{
/* Clear the heap. */
gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qfalse);
/* At this point, all references are live and the mutator is not allowed
* to run, so we don't need a read barrier. */
/* Run GC with compaction enabled */
gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qtrue);
return gc_compact_stats(ec, self);

View file

@ -12,7 +12,7 @@
# define RUBY_VERSION_MINOR RUBY_API_VERSION_MINOR
#define RUBY_VERSION_TEENY 4
#define RUBY_RELEASE_DATE RUBY_RELEASE_YEAR_STR"-"RUBY_RELEASE_MONTH_STR"-"RUBY_RELEASE_DAY_STR
#define RUBY_PATCHLEVEL 182
#define RUBY_PATCHLEVEL 183
#define RUBY_RELEASE_YEAR 2022
#define RUBY_RELEASE_MONTH 3