diff --git a/misc/tsan_suppressions.txt b/misc/tsan_suppressions.txt index a34e040913..692c1be388 100644 --- a/misc/tsan_suppressions.txt +++ b/misc/tsan_suppressions.txt @@ -30,13 +30,6 @@ race:check_reserved_signal_ race_top:rb_check_deadlock -# lock_owner -race_top:thread_sched_setup_running_threads -race_top:vm_lock_enter -race_top:rb_ec_vm_lock_rec -race_top:vm_lock_enter -race_top:vm_locked - # vm->ractor.sched.grq_cnt++ race_top:ractor_sched_enq race_top:ractor_sched_deq diff --git a/vm_core.h b/vm_core.h index 8da7a08119..569aebaba4 100644 --- a/vm_core.h +++ b/vm_core.h @@ -2065,12 +2065,21 @@ void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec, unsigned int recorded_lock_rec, unsigned int current_lock_rec); +/* This technically is a data race, as it's checked without the lock, however we + * check against a value only our own thread will write. */ +NO_SANITIZE("thread", static inline bool +vm_locked_by_ractor_p(rb_vm_t *vm, rb_ractor_t *cr)) +{ + VM_ASSERT(cr == GET_RACTOR()); + return vm->ractor.sync.lock_owner == cr; +} + static inline unsigned int rb_ec_vm_lock_rec(const rb_execution_context_t *ec) { rb_vm_t *vm = rb_ec_vm_ptr(ec); - if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) { + if (!vm_locked_by_ractor_p(vm, rb_ec_ractor_ptr(ec))) { return 0; } else { diff --git a/vm_sync.c b/vm_sync.c index 772a3239db..ba311a00e9 100644 --- a/vm_sync.c +++ b/vm_sync.c @@ -12,7 +12,7 @@ void rb_ractor_sched_barrier_end(rb_vm_t *vm, rb_ractor_t *cr); static bool vm_locked(rb_vm_t *vm) { - return vm->ractor.sync.lock_owner == GET_RACTOR(); + return vm_locked_by_ractor_p(vm, GET_RACTOR()); } #if RUBY_DEBUG > 0