mirror of
https://github.com/ruby/ruby.git
synced 2025-08-15 13:39:04 +02:00
KQueue support for M:N threads
* Allows macOS users to use M:N threads (and technically FreeBSD, though it has not been verified on FreeBSD) * Include sys/event.h header check for macros, and include sys/event.h when present * Rename epoll_fd to more generic kq_fd (Kernel event Queue) for use by both epoll and kqueue * MAP_STACK is not available on macOS so conditionall apply it to mmap flags * Set fd to close on exec * Log debug messages specific to kqueue and epoll on creation * close_invalidate raises an error for the kqueue fd on child process fork. It's unclear rn if that's a bug, or if it's kqueue specific behavior Use kq with rb_thread_wait_for_single_fd * Only platforms with `USE_POLL` (linux) had changes applied to take advantage of kernel event queues. It needed to be applied to the `select` so that kqueue could be properly applied * Clean up kqueue specific code and make sure only flags that were actually set are removed (or an error is raised) * Also handle kevent specific errnos, since most don't apply from epoll to kqueue * Use the more platform standard close-on-exec approach of `fcntl` and `FD_CLOEXEC`. The io-event gem uses `ioctl`, but fcntl seems to be the recommended choice. It is also what Go, Bun, and Libuv use * We're making changes in this file anyways - may as well fix a couple spelling mistakes while here Make sure FD_CLOEXEC carries over in dup * Otherwise the kqueue descriptor should have FD_CLOEXEC, but doesn't and fails in assert_close_on_exec
This commit is contained in:
parent
7ef90b3978
commit
8782e02138
5 changed files with 250 additions and 42 deletions
44
thread.c
44
thread.c
|
@ -4265,6 +4265,27 @@ rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t *
|
|||
return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
|
||||
}
|
||||
|
||||
#ifdef RUBY_THREAD_PTHREAD_H
|
||||
|
||||
static bool
|
||||
thread_sched_wait_events_timeval(int fd, int events, struct timeval *timeout)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_hrtime_t rel, *prel;
|
||||
|
||||
if (timeout) {
|
||||
rel = rb_timeval2hrtime(timeout);
|
||||
prel = &rel;
|
||||
}
|
||||
else {
|
||||
prel = NULL;
|
||||
}
|
||||
|
||||
return thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef USE_POLL
|
||||
|
||||
/* The same with linux kernel. TODO: make platform independent definition. */
|
||||
|
@ -4294,18 +4315,8 @@ rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
|
|||
wfd.busy = NULL;
|
||||
|
||||
#ifdef RUBY_THREAD_PTHREAD_H
|
||||
if (!th->nt->dedicated) {
|
||||
rb_hrtime_t rel, *prel;
|
||||
|
||||
if (timeout) {
|
||||
rel = rb_timeval2hrtime(timeout);
|
||||
prel = &rel;
|
||||
}
|
||||
else {
|
||||
prel = NULL;
|
||||
}
|
||||
|
||||
if (thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel)) {
|
||||
if (!th_has_dedicated_nt(th)) {
|
||||
if (thread_sched_wait_events_timeval(fd, events, timeout)) {
|
||||
return 0; // timeout
|
||||
}
|
||||
}
|
||||
|
@ -4445,6 +4456,15 @@ rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
|
|||
int r;
|
||||
VALUE ptr = (VALUE)&args;
|
||||
|
||||
#ifdef RUBY_THREAD_PTHREAD_H
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
if (!th_has_dedicated_nt(th)) {
|
||||
if (thread_sched_wait_events_timeval(fd, events, timeout)) {
|
||||
return 0; // timeout
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
args.as.fd = fd;
|
||||
args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
|
||||
args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue