Standardised scheduler interface.

This commit is contained in:
Samuel Williams 2020-08-20 13:51:45 +12:00
parent 905e9c8093
commit d387029f39
Notes: git 2020-09-14 13:44:39 +09:00
13 changed files with 313 additions and 216 deletions

View file

@ -112,8 +112,6 @@ static VALUE sym_immediate;
static VALUE sym_on_blocking;
static VALUE sym_never;
static ID id_wait_for_single_fd;
enum SLEEP_FLAGS {
SLEEP_DEADLOCKABLE = 0x1,
SLEEP_SPURIOUS_CHECK = 0x2
@ -1603,7 +1601,6 @@ rb_nogvl(void *(*func)(void *), void *data1,
rb_thread_t *th = rb_ec_thread_ptr(ec);
int saved_errno = 0;
VALUE ubf_th = Qfalse;
VALUE scheduler = th->scheduler;
if (ubf == RUBY_UBF_IO || ubf == RUBY_UBF_PROCESS) {
ubf = ubf_select;
@ -1618,10 +1615,6 @@ rb_nogvl(void *(*func)(void *), void *data1,
}
}
if (scheduler != Qnil) {
rb_funcall(scheduler, rb_intern("enter_blocking_region"), 0);
}
BLOCKING_REGION(th, {
val = func(data1);
saved_errno = errno;
@ -1637,10 +1630,6 @@ rb_nogvl(void *(*func)(void *), void *data1,
thread_value(rb_thread_kill(ubf_th));
}
if (scheduler != Qnil) {
rb_funcall(scheduler, rb_intern("exit_blocking_region"), 0);
}
errno = saved_errno;
return val;
@ -3749,7 +3738,7 @@ rb_thread_scheduler(VALUE klass)
return rb_thread_scheduler_if_nonblocking(rb_thread_current());
}
static VALUE
VALUE
rb_thread_current_scheduler()
{
return rb_thread_scheduler_if_nonblocking(rb_thread_current());
@ -4332,15 +4321,6 @@ rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t *
return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
}
static VALUE
rb_thread_timeout(struct timeval *timeout) {
if (timeout) {
return rb_float_new((double)timeout->tv_sec + (0.000001f * timeout->tv_usec));
}
return Qnil;
}
#ifdef USE_POLL
/* The same with linux kernel. TODO: make platform independent definition. */
@ -4356,7 +4336,7 @@ rb_thread_timeout(struct timeval *timeout) {
* returns a mask of events
*/
int
rb_wait_for_single_fd(int fd, int events, struct timeval *timeout)
rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
{
struct pollfd fds[2];
int result = 0, lerrno;
@ -4367,14 +4347,6 @@ rb_wait_for_single_fd(int fd, int events, struct timeval *timeout)
struct waiting_fd wfd;
int state;
VALUE scheduler = rb_thread_scheduler_if_nonblocking(rb_thread_current());
if (scheduler != Qnil) {
VALUE result = rb_funcall(scheduler, id_wait_for_single_fd, 3, INT2NUM(fd), INT2NUM(events),
rb_thread_timeout(timeout)
);
return RTEST(result);
}
wfd.th = GET_THREAD();
wfd.fd = fd;
@ -4513,16 +4485,8 @@ select_single_cleanup(VALUE ptr)
}
int
rb_wait_for_single_fd(int fd, int events, struct timeval *timeout)
rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
{
VALUE scheduler = rb_thread_scheduler_if_nonblocking(rb_thread_current());
if (scheduler != Qnil) {
VALUE result = rb_funcall(scheduler, id_wait_for_single_fd, 3, INT2NUM(fd), INT2NUM(events),
rb_thread_timeout(timeout)
);
return RTEST(result);
}
rb_fdset_t rfds, wfds, efds;
struct select_args args;
int r;
@ -5450,8 +5414,6 @@ Init_Thread(void)
sym_immediate = ID2SYM(rb_intern("immediate"));
sym_on_blocking = ID2SYM(rb_intern("on_blocking"));
id_wait_for_single_fd = rb_intern("wait_for_single_fd");
rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);