Rename to Fiber#set_scheduler.

This commit is contained in:
Samuel Williams 2020-10-16 14:25:58 +13:00
parent 656d4cddaf
commit a08ee8330d
Notes: git 2020-11-07 19:40:25 +09:00
17 changed files with 157 additions and 133 deletions

View file

@ -774,7 +774,7 @@ thread_do_start(rb_thread_t *th)
rb_bug("unreachable");
}
rb_thread_scheduler_set(th->self, Qnil);
rb_scheduler_set(Qnil);
}
void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
@ -1175,7 +1175,7 @@ thread_join_sleep(VALUE arg)
}
while (target_th->status != THREAD_KILLED) {
VALUE scheduler = rb_thread_current_scheduler();
VALUE scheduler = rb_scheduler_current();
if (scheduler != Qnil) {
rb_scheduler_block(scheduler, target_th->self, p->timeout);
@ -1522,7 +1522,7 @@ rb_thread_sleep_interruptible(void)
static void
rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker)
{
VALUE scheduler = rb_thread_current_scheduler();
VALUE scheduler = rb_scheduler_current();
if (scheduler != Qnil) {
rb_scheduler_block(scheduler, blocker, Qnil);
} else {
@ -3796,80 +3796,6 @@ rb_thread_variables(VALUE thread)
return ary;
}
VALUE
rb_thread_scheduler_get(VALUE thread)
{
rb_thread_t * th = rb_thread_ptr(thread);
VM_ASSERT(th);
return th->scheduler;
}
VALUE
rb_thread_scheduler_set(VALUE thread, VALUE scheduler)
{
rb_thread_t * th = rb_thread_ptr(thread);
VM_ASSERT(th);
// We invoke Scheduler#close when setting it to something else, to ensure the previous scheduler runs to completion before changing the scheduler. That way, we do not need to consider interactions, e.g., of a Fiber from the previous scheduler with the new scheduler.
if (th->scheduler != Qnil) {
rb_scheduler_close(th->scheduler);
}
th->scheduler = scheduler;
return th->scheduler;
}
#if 0 // no longer used
/*
* call-seq:
* Thread.scheduler -> scheduler or nil
*
* Returns the current scheduler if scheduling operations are permitted.
*
*/
static VALUE
rb_thread_scheduler(VALUE klass)
{
return rb_thread_scheduler_if_nonblocking(rb_thread_current());
}
#endif
VALUE
rb_thread_current_scheduler()
{
return rb_thread_scheduler_if_nonblocking(rb_thread_current());
}
VALUE
rb_thread_scheduler_if_nonblocking(VALUE thread)
{
rb_thread_t * th = rb_thread_ptr(thread);
VM_ASSERT(th);
if (th->blocking == 0) {
return th->scheduler;
} else {
return Qnil;
}
}
static VALUE
rb_thread_blocking_p(VALUE thread)
{
unsigned blocking = rb_thread_ptr(thread)->blocking;
if (blocking == 0)
return Qfalse;
return INT2NUM(blocking);
}
/*
* call-seq:
* thr.thread_variable?(key) -> true or false
@ -5558,7 +5484,6 @@ Init_Thread(void)
rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
rb_define_method(rb_cThread, "blocking?", rb_thread_blocking_p, 0);
rb_define_method(rb_cThread, "status", rb_thread_status, 0);
rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
@ -5574,10 +5499,6 @@ Init_Thread(void)
rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
// rb_define_singleton_method(rb_cThread, "scheduler", rb_thread_scheduler, 0);
rb_define_method(rb_cThread, "scheduler", rb_thread_scheduler_get, 0);
rb_define_method(rb_cThread, "scheduler=", rb_thread_scheduler_set, 1);
rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);