deps: upgrade to libuv 1.44.2

Notable changes:

- Build regression fixes for various platform updates
  (https://github.com/libuv/libuv/pull/3428,
  https://github.com/libuv/libuv/pull/3419,
  https://github.com/libuv/libuv/pull/3423,
  https://github.com/libuv/libuv/pull/3413,
  https://github.com/libuv/libuv/pull/3431)
- Support for GNU/Hurd (https://github.com/libuv/libuv/pull/3450)
- Release tool improvements
  (https://github.com/libuv/libuv-release-tool/pull/13)
- Better performing rw locks on Win32 (https://github.com/libuv/libuv/pull/3383)
- Support for posix_spawn API (https://github.com/libuv/libuv/pull/3257)
- Fix regression on OpenBSD (https://github.com/libuv/libuv/pull/3506)
- Add uv_available_parallelism() (https://github.com/libuv/libuv/pull/3499)
- Don't use thread-unsafe strtok() (https://github.com/libuv/libuv/pull/3524)
- Fix hang after NOTE_EXIT (https://github.com/libuv/libuv/pull/3521)
- Better align order-of-events behavior between platforms
  (https://github.com/libuv/libuv/pull/3598)
- Fix fs event not fired if the watched file is moved/removed/recreated
  (https://github.com/libuv/libuv/pull/3540)
- Fix pipe resource leak if closed during connect (and other bugs)
  (https://github.com/libuv/libuv/pull/3611)
- Don't error when killing a zombie process
  (https://github.com/libuv/libuv/pull/3625)
- Avoid posix_spawnp() cwd bug (https://github.com/libuv/libuv/pull/3597)
- Skip EVFILT_PROC events when invalidating events for an fd
  (https://github.com/libuv/libuv/pull/3629)

Fixes: https://github.com/nodejs/node/issues/42290
PR-URL: https://github.com/nodejs/node/pull/42340
Reviewed-By: Michaël Zasso <targos@protonmail.com>
Reviewed-By: Juan José Arboleda <soyjuanarbol@gmail.com>
Reviewed-By: Yagiz Nizipli <yagiz@nizipli.com>
Reviewed-By: Darshan Sen <raisinten@gmail.com>
Reviewed-By: Antoine du Hamel <duhamelantoine1995@gmail.com>
This commit is contained in:
Luigi Pinca 2022-03-15 09:34:01 +01:00
parent 65ec9f35e9
commit 28bf0317c2
97 changed files with 3842 additions and 2716 deletions

21
deps/uv/AUTHORS vendored
View file

@ -496,3 +496,24 @@ Jesper Storm Bache <jsbache@users.noreply.github.com>
Campbell He <duskmoon314@users.noreply.github.com>
Andrey Hohutkin <andrey.hohutkin@gmail.com>
deal <halx99@live.com>
David Machaj <46852402+dmachaj@users.noreply.github.com>
Jessica Clarke <jrtc27@jrtc27.com>
Jeremy Rose <nornagon@nornagon.net>
woclass <git@wo-class.cn>
Luca Adrian L <info@lucalindhorst.de>
WenTao Ou <owt5008137@live.com>
jonilaitinen <joni.laitinen@iki.fi>
UMU <UMU618@users.noreply.github.com>
Paul Evans <leonerd@leonerd.org.uk>
wyckster <wyckster@hotmail.com>
Vittore F. Scolari <vittore.scolari@gmail.com>
roflcopter4 <15476346+roflcopter4@users.noreply.github.com>
V-for-Vasili <vasili.skurydzin@protonmail.com>
Denny C. Dai <dennycd@me.com>
Hannah Shi <hannahshisfb@gmail.com>
tuftedocelot <tuftedocelot@fastmail.fm>
blogdaren <blogdaren@163.com>
chucksilvers <chuq@chuq.com>
Sergey Fedorov <vital.had@gmail.com>
theanarkh <2923878201@qq.com>
Samuel Cabrero <samuelcabrero@gmail.com>

View file

@ -125,6 +125,7 @@ set(uv_sources
src/inet.c
src/random.c
src/strscpy.c
src/strtok.c
src/threadpool.c
src/timer.c
src/uv-common.c
@ -215,7 +216,6 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Android")
list(APPEND uv_defines _GNU_SOURCE)
list(APPEND uv_libraries dl)
list(APPEND uv_sources
src/unix/android-ifaddrs.c
src/unix/linux-core.c
src/unix/linux-inotify.c
src/unix/linux-syscalls.c
@ -259,6 +259,22 @@ if(APPLE)
src/unix/fsevents.c)
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "GNU")
list(APPEND uv_libraries dl)
list(APPEND uv_sources
src/unix/bsd-ifaddrs.c
src/unix/no-fsevents.c
src/unix/no-proctitle.c
src/unix/posix-hrtime.c
src/unix/posix-poll.c
src/unix/hurd.c)
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "kFreeBSD")
list(APPEND uv_defines _GNU_SOURCE)
list(APPEND uv_libraries dl freebsd-glue)
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112)
list(APPEND uv_libraries dl rt)
@ -418,6 +434,7 @@ if(LIBUV_BUILD_TESTS)
test/benchmark-fs-stat.c
test/benchmark-getaddrinfo.c
test/benchmark-loop-count.c
test/benchmark-queue-work.c
test/benchmark-million-async.c
test/benchmark-million-timers.c
test/benchmark-multi-accept.c
@ -447,7 +464,6 @@ if(LIBUV_BUILD_TESTS)
test/test-async-null-cb.c
test/test-async.c
test/test-barrier.c
test/test-callback-order.c
test/test-callback-stack.c
test/test-close-fd.c
test/test-close-order.c
@ -546,10 +562,12 @@ if(LIBUV_BUILD_TESTS)
test/test-spawn.c
test/test-stdio-over-pipes.c
test/test-strscpy.c
test/test-strtok.c
test/test-tcp-alloc-cb-fail.c
test/test-tcp-bind-error.c
test/test-tcp-bind6-error.c
test/test-tcp-close-accept.c
test/test-tcp-close-after-read-timeout.c
test/test-tcp-close-while-connecting.c
test/test-tcp-close.c
test/test-tcp-close-reset.c
@ -563,6 +581,7 @@ if(LIBUV_BUILD_TESTS)
test/test-tcp-open.c
test/test-tcp-read-stop.c
test/test-tcp-read-stop-start.c
test/test-tcp-rst.c
test/test-tcp-shutdown-after-write.c
test/test-tcp-try-write.c
test/test-tcp-try-write-error.c
@ -663,9 +682,11 @@ install(FILES LICENSE DESTINATION ${CMAKE_INSTALL_DOCDIR})
install(FILES ${PROJECT_BINARY_DIR}/libuv.pc ${PROJECT_BINARY_DIR}/libuv-static.pc
DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
install(TARGETS uv EXPORT libuvConfig
RUNTIME DESTINATION ${CMAKE_INSTALL_LIBDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(TARGETS uv_a ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(TARGETS uv_a EXPORT libuvConfig
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(EXPORT libuvConfig DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/libuv)
if(MSVC)

312
deps/uv/ChangeLog vendored
View file

@ -1,3 +1,253 @@
2022.07.12, Version 1.44.2 (Stable)
Changes since version 1.44.1:
* Add SHA to ChangeLog (Jameson Nash)
* aix, ibmi: handle server hang when remote sends TCP RST (V-for-Vasili)
* build: make CI a bit noisier (Jameson Nash)
* process: reset the signal mask if the fork fails (Jameson Nash)
* zos: implement cmpxchgi() using assembly (Shuowang (Wayne) Zhang)
* build: AC_SUBST for AM_CFLAGS (Claes Nästén)
* ibmi: Implement UDP disconnect (V-for-Vasili)
* doc: update active maintainers list (Ben Noordhuis)
* build: fix kFreeBSD build (James McCoy)
* build: remove Windows 2016 workflows (Darshan Sen)
* Revert "win,errors: remap ERROR_ACCESS_DENIED to UV_EACCES" (Darshan Sen)
* unix: simplify getpwuid call (Jameson Nash)
* build: filter CI by paths and branches (Jameson Nash)
* build: add iOS to macos CI (Jameson Nash)
* build: re-enable CI for windows changes (Jameson Nash)
* process,iOS: fix build breakage in process.c (Denny C. Dai)
* test: remove unused declarations in tcp_rst test (V-for-Vasili)
* core: add thread-safe strtok implementation (Guilherme Íscaro)
* win: fix incompatible-types warning (twosee)
* test: fix flaky file watcher test (Ben Noordhuis)
* build: fix AIX xlc autotools build (V-for-Vasili)
* unix,win: fix UV_RUN_ONCE + uv_idle_stop loop hang (Ben Noordhuis)
* win: fix unexpected ECONNRESET error on TCP socket (twosee)
* doc: make sample cross-platform build (gengjiawen)
* test: separate some static variables by test cases (Hannah Shi)
* sunos: fs-event callback can be called after uv_close() (Andy Fiddaman)
* uv: re-register interest in a file after change (Shuowang (Wayne) Zhang)
* uv: register UV_RENAME event for _RFIM_UNLINK (Shuowang (Wayne) Zhang)
* uv: register __rfim_event 156 as UV_RENAME (Shuowang (Wayne) Zhang)
* doc: remove smartos from supported platforms (Ben Noordhuis)
* macos: avoid posix_spawnp() cwd bug (Jameson Nash)
* release: check versions of autogen scripts are newer (Jameson Nash)
* test: rewrite embed test (Ben Noordhuis)
* openbsd: use utimensat instead of lutimes (tuftedocelot)
* doc: fix link to uvwget example main() function (blogdaren)
* unix: use MSG_CMSG_CLOEXEC where supported (Ben Noordhuis)
* test: remove disabled callback_order test (Ben Noordhuis)
* win,pipe: fix bugs with pipe resource lifetime management (Jameson Nash)
* loop: better align order-of-events behavior between platforms (Jameson Nash)
* aix,test: uv_backend_fd is not supported by poll (V-for-Vasili)
* kqueue: skip EVFILT_PROC when invalidating fds (chucksilvers)
* darwin: fix atomic-ops.h ppc64 build (Sergey Fedorov)
* zos: don't err when killing a zombie process (Shuowang (Wayne) Zhang)
* zos: avoid fs event callbacks after uv_close() (Shuowang (Wayne) Zhang)
* zos: correctly format interface addresses names (Shuowang (Wayne) Zhang)
* zos: add uv_interface_addresses() netmask support (Shuowang (Wayne) Zhang)
* zos: improve memory management of ip addresses (Shuowang (Wayne) Zhang)
* tcp,pipe: fail `bind` or `listen` after `close` (theanarkh)
* zos: implement uv_available_parallelism() (Shuowang (Wayne) Zhang)
* udp,win: fix UDP compiler warning (Jameson Nash)
* zos: fix early exit of epoll_wait() (Shuowang (Wayne) Zhang)
* unix,tcp: fix errno handling in uv__tcp_bind() (Samuel Cabrero)
* shutdown,unix: reduce code duplication (Jameson Nash)
* unix: fix c99 comments (Ben Noordhuis)
* unix: retry tcgetattr/tcsetattr() on EINTR (Ben Noordhuis)
* docs: update introduction.rst (Ikko Ashimine)
* unix,stream: optimize uv_shutdown() codepath (Jameson Nash)
* zos: delay signal handling until after normal i/o (Shuowang (Wayne) Zhang)
* stream: uv__drain() always needs to stop POLLOUT (Jameson Nash)
* unix,tcp: allow EINVAL errno from setsockopt in uv_tcp_close_reset() (Stacey
Marshall)
* win,shutdown: improve how shutdown is dispatched (Jameson Nash)
2022.03.09, Version 1.44.1 (Stable), e8b7eb6908a847ffbe6ab2eec7428e43a0aa53a2
Changes since version 1.44.0:
* process: simplify uv__write_int calls (Jameson Nash)
* macos: don't use thread-unsafe strtok() (Ben Noordhuis)
* process: fix hang after NOTE_EXIT (Jameson Nash)
2022.03.07, Version 1.44.0 (Stable), d2bff508457336d808ba7148b33088f6acbfe0a6
Changes since version 1.43.0:
* darwin: remove EPROTOTYPE error workaround (Ben Noordhuis)
* doc: fix v1.43.0 changelog entries (cjihrig)
* win: replace CRITICAL_SECTION+Semaphore with SRWLock (David Machaj)
* darwin: translate EPROTOTYPE to ECONNRESET (Ben Noordhuis)
* android: use libc getifaddrs() (Ben Noordhuis)
* unix: fix STATIC_ASSERT to check what it means to check (Jessica Clarke)
* unix: ensure struct msghdr is zeroed in recvmmsg (Ondřej Surý)
* test: test with maximum recvmmsg buffer (Ondřej Surý)
* unix: don't allow too small thread stack size (Ben Noordhuis)
* bsd: ensure mutex is initialized (Ben Noordhuis)
* doc: add gengjiawen as maintainer (gengjiawen)
* process: monitor for exit with kqueue on BSDs (Jeremy Rose)
* test: fix flaky uv_fs_lutime test (Momtchil Momtchev)
* build: fix cmake install locations (Jameson Nash)
* thread,win: fix C90 style nit (ssrlive)
* build: rename CFLAGS to AM_CFLAGS (Jameson Nash)
* doc/guide: update content and sample code (woclass)
* process,bsd: handle kevent NOTE_EXIT failure (Jameson Nash)
* test: remove flaky test ipc_closed_handle (Ben Noordhuis)
* darwin: bump minimum supported version to 10.15 (Ben Noordhuis)
* win: return fractional seconds in uv_uptime() (Luca Adrian L)
* build: export uv_a for cmake (WenTao Ou)
* loop: add pending work to loop-alive check (Jameson Nash)
* win: use GetTickCount64 for uptime again (Jameson Nash)
* win: restrict system DLL load paths (jonilaitinen)
* win,errors: remap ERROR_ACCESS_DENIED to UV_EACCES (Darshan Sen)
* bench: add `uv_queue_work` ping-pong measurement (Momtchil Momtchev)
* build: fix error C4146 on MSVC (UMU)
* test: fix benchmark-ping-udp (Ryan Liptak)
* win,fs: consider broken pipe error a normal EOF (Momtchil Momtchev)
* document the values of enum uv_stdio_flags (Paul Evans)
* win,loop: add missing uv_update_time (twosee)
* win,fs: avoid closing an invalid handle (Jameson Nash)
* fix oopsie from
* doc: clarify android api level (Ben Noordhuis)
* win: fix style nits [NFC] (Jameson Nash)
* test: fix flaky udp_mmsg test (Santiago Gimeno)
* test: fix ipc_send_recv_pipe flakiness (Ben Noordhuis)
* doc: checkout -> check out (wyckster)
* core: change uv_get_password uid/gid to unsigned (Jameson Nash)
* hurd: unbreak build on GNU/Hurd (Vittore F. Scolari)
* freebsd: use copy_file_range() in uv_fs_sendfile() (David Carlier)
* test: use closefd in runner-unix.c (Guilherme Íscaro)
* Reland "macos: use posix_spawn instead of fork" (Jameson Nash)
* android: fix build error when no ifaddrs.h (ssrlive)
* unix,win: add uv_available_parallelism() (Ben Noordhuis)
* process: remove OpenBSD from kevent list (Jameson Nash)
* zos: fix build breakage (Ben Noordhuis)
* process: only use F_DUPFD_CLOEXEC if it is defined (Jameson Nash)
* win,poll: add the MSAFD GUID for AF_UNIX (roflcopter4)
* unix: simplify uv__cloexec_fcntl() (Ben Noordhuis)
* doc: add secondary GPG ID for vtjnash (Jameson Nash)
* unix: remove uv__cloexec_ioctl() (Jameson Nash)
2022.01.05, Version 1.43.0 (Stable), 988f2bfc4defb9a85a536a3e645834c161143ee0
Changes since version 1.42.0:
@ -18,73 +268,73 @@ Changes since version 1.42.0:
* win,fsevent: fix uv_fs_event_stop() assert (Ben Noordhuis)
* unix: remove redundant include in unix.h (
* unix: remove redundant include in unix.h (Juan José Arboleda)
* doc: mark SmartOS as Tier 3 support (
* doc: mark SmartOS as Tier 3 support (Ben Noordhuis)
* doc: fix broken links for netbsd's sysctl manpage (
* doc: fix broken links for netbsd's sysctl manpage (YAKSH BARIYA)
* misc: adjust stalebot deadline (
* misc: adjust stalebot deadline (Ben Noordhuis)
* test: remove `dns-server.c` as it is not used anywhere (
* test: remove `dns-server.c` as it is not used anywhere (Darshan Sen)
* build: fix non-cmake android builds (
* build: fix non-cmake android builds (YAKSH BARIYA)
* doc: replace pyuv with uvloop (
* doc: replace pyuv with uvloop (Ofek Lev)
* asan: fix some tests (
* asan: fix some tests (Jameson Nash)
* build: add experimental TSAN configuration (
* build: add experimental TSAN configuration (Jameson Nash)
* pipe: remove useless assertion (
* pipe: remove useless assertion (~locpyl-tidnyd)
* bsd: destroy mutex in uv__process_title_cleanup() (
* bsd: destroy mutex in uv__process_title_cleanup() (Darshan Sen)
* build: add windows build to CI (
* build: add windows build to CI (Darshan Sen)
* win,fs: fix error code in uv_fs_read() and uv_fs_write() ( Sen)
* win,fs: fix error code in uv_fs_read() and uv_fs_write() (Darshan Sen)
* build: add macos-latest to ci matrix (
* build: add macos-latest to ci matrix (Ben Noordhuis)
* udp: fix &/&& typo in macro condition (
* udp: fix &/&& typo in macro condition (Evan Miller)
* build: install cmake package module (Petr Menšík)
* win: fix build for mingw32 (
* win: fix build for mingw32 (Nicolas Noble)
* build: fix build failures with MinGW new headers (erw7)
* build: fix win build with cmake versions before v3.14 (
* build: fix win build with cmake versions before v3.14 (AJ Heller)
* unix: support aarch64 in uv_cpu_info() (
* unix: support aarch64 in uv_cpu_info() (Juan José Arboleda)
* linux: work around CIFS EPERM bug (
* linux: work around CIFS EPERM bug (Ben Noordhuis)
* sunos: Oracle Developer Studio support (
* sunos: Oracle Developer Studio support (Stacey Marshall)
* Revert "sunos: Oracle Developer Studio support (
* Revert "sunos: Oracle Developer Studio support (cjihrig)
* sunos: Oracle Developer Studio support (
* sunos: Oracle Developer Studio support (Stacey Marshall)
* stream: permit read after seeing EOF (
* stream: permit read after seeing EOF (Jameson Nash)
* thread: initialize uv_thread_self for all threads (
* thread: initialize uv_thread_self for all threads (Jameson Nash)
* kqueue: ignore write-end closed notifications (
* kqueue: ignore write-end closed notifications (Jameson Nash)
* macos: fix the cfdata length in uv__get_cpu_speed ( Bache)
* macos: fix the cfdata length in uv__get_cpu_speed (Jesper Storm Bache)
* unix,win: add uv_ip_name to get name from sockaddr (
* unix,win: add uv_ip_name to get name from sockaddr (Campbell He)
* win,test: fix a few typos (AJ Heller)
* zos: use destructor for uv__threadpool_cleanup() ( Zhang)
* zos: use destructor for uv__threadpool_cleanup() (Wayne Zhang)
* linux: use MemAvailable instead of MemFree (
* linux: use MemAvailable instead of MemFree (Andrey Hohutkin)
* freebsd: call dlerror() only if necessary (
* freebsd: call dlerror() only if necessary (Jameson Nash)
* bsd,windows,zos: fix udp disconnect EINVAL (
* bsd,windows,zos: fix udp disconnect EINVAL (deal)
2021.07.21, Version 1.42.0 (Stable), 6ce14710da7079eb248868171f6343bc409ea3a4

4
deps/uv/LICENSE vendored
View file

@ -64,7 +64,3 @@ The externally maintained libraries used by libuv are:
- pthread-fixes.c, copyright Google Inc. and Sony Mobile Communications AB.
Three clause BSD license.
- android-ifaddrs.h, android-ifaddrs.c, copyright Berkeley Software Design
Inc, Kenneth MacKay and Emergya (Cloud4all, FP7/2007-2013, grant agreement
n° 289016). Three clause BSD license.

View file

@ -1,10 +1,7 @@
# Project Maintainers
libuv is currently managed by the following individuals:
* **Anna Henningsen** ([@addaleax](https://github.com/addaleax))
* **Bartosz Sosnowski** ([@bzoz](https://github.com/bzoz))
* **Ben Noordhuis** ([@bnoordhuis](https://github.com/bnoordhuis))
- GPG key: D77B 1E34 243F BAF0 5F8E 9CC3 4F55 C8C8 46AB 89B9 (pubkey-bnoordhuis)
* **Bert Belder** ([@piscisaureus](https://github.com/piscisaureus))
@ -13,11 +10,10 @@ libuv is currently managed by the following individuals:
- GPG key: 5735 3E0D BDAA A7E8 39B6 6A1A FF47 D5E4 AD8B 4FDC (pubkey-cjihrig-kb)
* **Fedor Indutny** ([@indutny](https://github.com/indutny))
- GPG key: AF2E EA41 EC34 47BF DD86 FED9 D706 3CCE 19B7 E890 (pubkey-indutny)
* **Imran Iqbal** ([@imran-iq](https://github.com/imran-iq))
- GPG key: 9DFE AA5F 481B BF77 2D90 03CE D592 4925 2F8E C41A (pubkey-iwuzhere)
* **Jameson Nash** ([@vtjnash](https://github.com/vtjnash))
- GPG key: AEAD 0A4B 6867 6775 1A0E 4AEF 34A2 5FB1 2824 6514 (pubkey-vtjnash)
* **John Barboza** ([@jbarz](https://github.com/jbarz))
- GPG key: CFBB 9CA9 A5BE AFD7 0E2B 3C5A 79A6 7C55 A367 9C8B (pubkey2022-vtjnash)
* **Jiawen Geng** ([@gengjiawen](https://github.com/gengjiawen))
* **Kaoru Takanashi** ([@erw7](https://github.com/erw7))
- GPG Key: 5804 F999 8A92 2AFB A398 47A0 7183 5090 6134 887F (pubkey-erw7)
* **Richard Lau** ([@richardlau](https://github.com/richardlau))
@ -27,6 +23,13 @@ libuv is currently managed by the following individuals:
* **Saúl Ibarra Corretgé** ([@saghul](https://github.com/saghul))
- GPG key: FDF5 1936 4458 319F A823 3DC9 410E 5553 AE9B C059 (pubkey-saghul)
## Project Maintainers emeriti
* **Anna Henningsen** ([@addaleax](https://github.com/addaleax))
* **Bartosz Sosnowski** ([@bzoz](https://github.com/bzoz))
* **Imran Iqbal** ([@imran-iq](https://github.com/imran-iq))
* **John Barboza** ([@jbarz](https://github.com/jbarz))
## Storing a maintainer key in Git
It's quite handy to store a maintainer's signature as a git blob, and have

31
deps/uv/Makefile.am vendored
View file

@ -27,8 +27,8 @@ uvinclude_HEADERS = include/uv/errno.h \
CLEANFILES =
lib_LTLIBRARIES = libuv.la
libuv_la_CFLAGS = @CFLAGS@
libuv_la_LDFLAGS = -no-undefined -version-info 1:0:0
libuv_la_CFLAGS = $(AM_CFLAGS)
libuv_la_LDFLAGS = $(AM_LDFLAGS) -no-undefined -version-info 1:0:0
libuv_la_SOURCES = src/fs-poll.c \
src/heap-inl.h \
src/idna.c \
@ -43,7 +43,9 @@ libuv_la_SOURCES = src/fs-poll.c \
src/uv-data-getter-setters.c \
src/uv-common.c \
src/uv-common.h \
src/version.c
src/version.c \
src/strtok.c \
src/strtok.h
if SUNOS
# Can't be turned into a CC_CHECK_CFLAGS in configure.ac, it makes compilers
@ -131,7 +133,7 @@ EXTRA_DIST = test/fixtures/empty_file \
TESTS = test/run-tests
check_PROGRAMS = test/run-tests
test_run_tests_CFLAGS =
test_run_tests_CFLAGS = $(AM_CFLAGS)
if SUNOS
# Can't be turned into a CC_CHECK_CFLAGS in configure.ac, it makes compilers
@ -139,7 +141,7 @@ if SUNOS
test_run_tests_CFLAGS += -pthreads
endif
test_run_tests_LDFLAGS =
test_run_tests_LDFLAGS = $(AM_LDFLAGS)
test_run_tests_SOURCES = test/blackhole-server.c \
test/echo-server.c \
test/run-tests.c \
@ -150,7 +152,6 @@ test_run_tests_SOURCES = test/blackhole-server.c \
test/test-async.c \
test/test-async-null-cb.c \
test/test-barrier.c \
test/test-callback-order.c \
test/test-callback-stack.c \
test/test-close-fd.c \
test/test-close-order.c \
@ -250,11 +251,13 @@ test_run_tests_SOURCES = test/blackhole-server.c \
test/test-spawn.c \
test/test-stdio-over-pipes.c \
test/test-strscpy.c \
test/test-strtok.c \
test/test-tcp-alloc-cb-fail.c \
test/test-tcp-bind-error.c \
test/test-tcp-bind6-error.c \
test/test-tcp-close-accept.c \
test/test-tcp-close-while-connecting.c \
test/test-tcp-close-after-read-timeout.c \
test/test-tcp-close.c \
test/test-tcp-close-reset.c \
test/test-tcp-create-socket-early.c \
@ -266,6 +269,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \
test/test-tcp-open.c \
test/test-tcp-read-stop.c \
test/test-tcp-read-stop-start.c \
test/test-tcp-rst.c \
test/test-tcp-shutdown-after-write.c \
test/test-tcp-unexpected-read.c \
test/test-tcp-oob.c \
@ -388,10 +392,8 @@ libuv_la_SOURCES += src/unix/aix-common.c \
endif
if ANDROID
uvinclude_HEADERS += include/uv/android-ifaddrs.h
libuv_la_CFLAGS += -D_GNU_SOURCE
libuv_la_SOURCES += src/unix/android-ifaddrs.c \
src/unix/pthread-fixes.c
libuv_la_SOURCES += src/unix/pthread-fixes.c
endif
if CYGWIN
@ -457,9 +459,16 @@ endif
if HURD
uvinclude_HEADERS += include/uv/posix.h
libuv_la_SOURCES += src/unix/no-fsevents.c \
libuv_la_SOURCES += src/unix/bsd-ifaddrs.c \
src/unix/no-fsevents.c \
src/unix/no-proctitle.c \
src/unix/posix-hrtime.c \
src/unix/posix-poll.c
src/unix/posix-poll.c \
src/unix/hurd.c
endif
if KFREEBSD
libuv_la_CFLAGS += -D_GNU_SOURCE
endif
if LINUX

View file

@ -3,15 +3,14 @@
| System | Support type | Supported versions | Notes |
|---|---|---|---|
| GNU/Linux | Tier 1 | Linux >= 2.6.32 with glibc >= 2.12 | |
| macOS | Tier 1 | macOS >= 10.7 | |
| macOS | Tier 1 | macOS >= 10.15 | Current and previous macOS release |
| Windows | Tier 1 | >= Windows 8 | VS 2015 and later are supported |
| FreeBSD | Tier 1 | >= 10 | |
| AIX | Tier 2 | >= 6 | Maintainers: @libuv/aix |
| IBM i | Tier 2 | >= IBM i 7.2 | Maintainers: @libuv/ibmi |
| z/OS | Tier 2 | >= V2R2 | Maintainers: @libuv/zos |
| Linux with musl | Tier 2 | musl >= 1.0 | |
| SmartOS | Tier 3 | >= 14.4 | |
| Android | Tier 3 | NDK >= r15b | |
| Android | Tier 3 | NDK >= r15b | Android 7.0, `-DANDROID_PLATFORM=android-24` |
| MinGW | Tier 3 | MinGW32 and MinGW-w64 | |
| SunOS | Tier 3 | Solaris 121 and later | |
| Other | Tier 3 | N/A | |

35
deps/uv/autogen.sh vendored
View file

@ -14,9 +14,16 @@
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
set -eu
cd `dirname "$0"`
if [ "$LIBTOOLIZE" = "" ] && [ "`uname`" = "Darwin" ]; then
if [ "${1:-dev}" == "release" ]; then
export LIBUV_RELEASE=true
else
export LIBUV_RELEASE=false
fi
if [ "${LIBTOOLIZE:-}" = "" ] && [ "`uname`" = "Darwin" ]; then
LIBTOOLIZE=glibtoolize
fi
@ -25,9 +32,17 @@ AUTOCONF=${AUTOCONF:-autoconf}
AUTOMAKE=${AUTOMAKE:-automake}
LIBTOOLIZE=${LIBTOOLIZE:-libtoolize}
aclocal_version=`"$ACLOCAL" --version | head -n 1 | sed 's/[^.0-9]//g'`
autoconf_version=`"$AUTOCONF" --version | head -n 1 | sed 's/[^.0-9]//g'`
automake_version=`"$AUTOMAKE" --version | head -n 1 | sed 's/[^.0-9]//g'`
automake_version_major=`echo "$automake_version" | cut -d. -f1`
automake_version_minor=`echo "$automake_version" | cut -d. -f2`
libtoolize_version=`"$LIBTOOLIZE" --version | head -n 1 | sed 's/[^.0-9]//g'`
if [ $aclocal_version != $automake_version ]; then
echo "FATAL: aclocal version appears not to be from the same as automake"
exit 1
fi
UV_EXTRA_AUTOMAKE_FLAGS=
if test "$automake_version_major" -gt 1 || \
@ -39,8 +54,22 @@ fi
echo "m4_define([UV_EXTRA_AUTOMAKE_FLAGS], [$UV_EXTRA_AUTOMAKE_FLAGS])" \
> m4/libuv-extra-automake-flags.m4
set -ex
"$LIBTOOLIZE" --copy
(set -x
"$LIBTOOLIZE" --copy --force
"$ACLOCAL" -I m4
)
if $LIBUV_RELEASE; then
"$AUTOCONF" -o /dev/null m4/libuv-check-versions.m4
echo "
AC_PREREQ($autoconf_version)
AC_INIT([libuv-release-check], [0.0])
AM_INIT_AUTOMAKE([$automake_version])
LT_PREREQ($libtoolize_version)
AC_OUTPUT
" > m4/libuv-check-versions.m4
fi
(
set -x
"$AUTOCONF"
"$AUTOMAKE" --add-missing --copy
)

View file

@ -13,7 +13,7 @@
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
AC_PREREQ(2.57)
AC_INIT([libuv], [1.43.0], [https://github.com/libuv/libuv/issues])
AC_INIT([libuv], [1.44.2], [https://github.com/libuv/libuv/issues])
AC_CONFIG_MACRO_DIR([m4])
m4_include([m4/libuv-extra-automake-flags.m4])
m4_include([m4/as_case.m4])
@ -28,7 +28,9 @@ AM_PROG_CC_C_O
CC_ATTRIBUTE_VISIBILITY([default], [
CC_FLAG_VISIBILITY([CFLAGS="${CFLAGS} -fvisibility=hidden"])
])
CC_CHECK_CFLAGS_APPEND([-fno-strict-aliasing])
# Xlc has a flag "-f<filename>". Need to use CC_CHECK_FLAG_SUPPORTED_APPEND so
# we exclude -fno-strict-aliasing for xlc
CC_CHECK_FLAG_SUPPORTED_APPEND([-fno-strict-aliasing])
CC_CHECK_CFLAGS_APPEND([-g])
CC_CHECK_CFLAGS_APPEND([-std=gnu89])
CC_CHECK_CFLAGS_APPEND([-Wall])
@ -60,6 +62,7 @@ AM_CONDITIONAL([CYGWIN], [AS_CASE([$host_os],[cygwin*], [true], [false])
AM_CONDITIONAL([DARWIN], [AS_CASE([$host_os],[darwin*], [true], [false])])
AM_CONDITIONAL([DRAGONFLY],[AS_CASE([$host_os],[dragonfly*], [true], [false])])
AM_CONDITIONAL([FREEBSD], [AS_CASE([$host_os],[*freebsd*], [true], [false])])
AM_CONDITIONAL([KFREEBSD], [AS_CASE([$host_os],[kfreebsd*], [true], [false])])
AM_CONDITIONAL([HAIKU], [AS_CASE([$host_os],[haiku], [true], [false])])
AM_CONDITIONAL([HURD], [AS_CASE([$host_os],[gnu*], [true], [false])])
AM_CONDITIONAL([LINUX], [AS_CASE([$host_os],[linux*], [true], [false])])

View file

@ -125,7 +125,7 @@ File I/O
Unlike network I/O, there are no platform-specific file I/O primitives libuv could rely on,
so the current approach is to run blocking file I/O operations in a thread pool.
For a thorough explanation of the cross-platform file I/O landscape, checkout
For a thorough explanation of the cross-platform file I/O landscape, check out
`this post <https://blog.libtorrent.org/2012/10/asynchronous-disk-io/>`_.
libuv currently uses a global thread pool on which all loops can queue work. 3 types of

View file

@ -312,7 +312,7 @@ API
.. c:function:: int uv_uptime(double* uptime)
Gets the current system uptime.
Gets the current system uptime. Depending on the system full or fractional seconds are returned.
.. c:function:: int uv_getrusage(uv_rusage_t* rusage)
@ -334,11 +334,30 @@ API
.. versionadded:: 1.16.0
.. c:function:: unsigned int uv_available_parallelism(void)
Returns an estimate of the default amount of parallelism a program should
use. Always returns a non-zero value.
On Linux, inspects the calling thread's CPU affinity mask to determine if
it has been pinned to specific CPUs.
On Windows, the available parallelism may be underreported on systems with
more than 64 logical CPUs.
On other platforms, reports the number of CPUs that the operating system
considers to be online.
.. versionadded:: 1.44.0
.. c:function:: int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count)
Gets information about the CPUs on the system. The `cpu_infos` array will
have `count` elements and needs to be freed with :c:func:`uv_free_cpu_info`.
Use :c:func:`uv_available_parallelism` if you need to know how many CPUs
are available for threads or child processes.
.. c:function:: void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count)
Frees the `cpu_infos` array previously allocated with :c:func:`uv_cpu_info`.

View file

@ -109,10 +109,39 @@ Data types
::
typedef enum {
/*
* The following four options are mutually-exclusive, and define
* the operation to perform for the corresponding file descriptor
* in the child process:
*/
/*
* No file descriptor will be provided (or redirected to
* `/dev/null` if it is fd 0, 1 or 2).
*/
UV_IGNORE = 0x00,
/*
* Open a new pipe into `data.stream`, per the flags below. The
* `data.stream` field must point to a uv_pipe_t object that has
* been initialized with `uv_pipe_init(loop, data.stream, ipc);`,
* but not yet opened or connected.
/*
UV_CREATE_PIPE = 0x01,
/*
* The child process will be given a duplicate of the parent's
* file descriptor given by `data.fd`.
*/
UV_INHERIT_FD = 0x02,
/*
* The child process will be given a duplicate of the parent's
* file descriptor being used by the stream handle given by
* `data.stream`.
*/
UV_INHERIT_STREAM = 0x04,
/*
* When UV_CREATE_PIPE is specified, UV_READABLE_PIPE and UV_WRITABLE_PIPE
* determine the direction of flow, from the child process' perspective. Both
@ -120,6 +149,7 @@ Data types
*/
UV_READABLE_PIPE = 0x10,
UV_WRITABLE_PIPE = 0x20,
/*
* When UV_CREATE_PIPE is specified, specifying UV_NONBLOCK_PIPE opens the
* handle in non-blocking mode in the child. This may cause loss of data,

View file

@ -1133,8 +1133,8 @@ struct uv_interface_address_s {
struct uv_passwd_s {
char* username;
long uid;
long gid;
unsigned long uid;
unsigned long gid;
char* shell;
char* homedir;
};
@ -1242,6 +1242,7 @@ UV_EXTERN uv_pid_t uv_os_getppid(void);
UV_EXTERN int uv_os_getpriority(uv_pid_t pid, int* priority);
UV_EXTERN int uv_os_setpriority(uv_pid_t pid, int priority);
UV_EXTERN unsigned int uv_available_parallelism(void);
UV_EXTERN int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count);
UV_EXTERN void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count);

View file

@ -1,54 +0,0 @@
/*
* Copyright (c) 1995, 1999
* Berkeley Software Design, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* THIS SOFTWARE IS PROVIDED BY Berkeley Software Design, Inc. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design, Inc. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* BSDI ifaddrs.h,v 2.5 2000/02/23 14:51:59 dab Exp
*/
#ifndef _IFADDRS_H_
#define _IFADDRS_H_
struct ifaddrs {
struct ifaddrs *ifa_next;
char *ifa_name;
unsigned int ifa_flags;
struct sockaddr *ifa_addr;
struct sockaddr *ifa_netmask;
struct sockaddr *ifa_dstaddr;
void *ifa_data;
};
/*
* This may have been defined in <net/if.h>. Note that if <net/if.h> is
* to be included it must be included before this header file.
*/
#ifndef ifa_broadaddr
#define ifa_broadaddr ifa_dstaddr /* broadcast address interface */
#endif
#include <sys/cdefs.h>
__BEGIN_DECLS
extern int getifaddrs(struct ifaddrs **ifap);
extern void freeifaddrs(struct ifaddrs *ifa);
__END_DECLS
#endif

View file

@ -31,8 +31,8 @@
*/
#define UV_VERSION_MAJOR 1
#define UV_VERSION_MINOR 43
#define UV_VERSION_PATCH 0
#define UV_VERSION_MINOR 44
#define UV_VERSION_PATCH 2
#define UV_VERSION_IS_RELEASE 1
#define UV_VERSION_SUFFIX ""

View file

@ -263,21 +263,14 @@ typedef union {
} unused_; /* TODO: retained for ABI compatibility; remove me in v2.x. */
} uv_cond_t;
typedef union {
struct {
unsigned int num_readers_;
CRITICAL_SECTION num_readers_lock_;
HANDLE write_semaphore_;
} state_;
/* TODO: remove me in v2.x. */
struct {
SRWLOCK unused_;
} unused1_;
/* TODO: remove me in v2.x. */
struct {
uv_mutex_t unused1_;
uv_mutex_t unused2_;
} unused2_;
typedef struct {
SRWLOCK read_write_lock_;
/* TODO: retained for ABI compatibility; remove me in v2.x */
#ifdef _WIN64
unsigned char padding_[72];
#else
unsigned char padding_[44];
#endif
} uv_rwlock_t;
typedef struct {
@ -384,6 +377,12 @@ typedef struct {
OVERLAPPED overlapped; \
size_t queued_bytes; \
} io; \
/* in v2, we can move these to the UV_CONNECT_PRIVATE_FIELDS */ \
struct { \
ULONG_PTR result; /* overlapped.Internal is reused to hold the result */\
HANDLE pipeHandle; \
DWORD duplex_flags; \
} connect; \
} u; \
struct uv_req_s* next_req;

View file

@ -1,6 +1,7 @@
dnl Macros to check the presence of generic (non-typed) symbols.
dnl Copyright (c) 2006-2008 Diego Pettenò <flameeyes gmail com>
dnl Copyright (c) 2006-2008 xine project
dnl Copyright (c) 2021 libuv project
dnl
dnl This program is free software; you can redistribute it and/or modify
dnl it under the terms of the GNU General Public License as published by
@ -63,7 +64,7 @@ AC_DEFUN([CC_CHECK_CFLAGS], [
])
dnl CC_CHECK_CFLAG_APPEND(FLAG, [action-if-found], [action-if-not-found])
dnl Check for CFLAG and appends them to CFLAGS if supported
dnl Check for CFLAG and appends them to AM_CFLAGS if supported
AC_DEFUN([CC_CHECK_CFLAG_APPEND], [
AC_CACHE_CHECK([if $CC supports $1 flag],
AS_TR_SH([cc_cv_cflags_$1]),
@ -71,7 +72,9 @@ AC_DEFUN([CC_CHECK_CFLAG_APPEND], [
)
AS_IF([eval test x$]AS_TR_SH([cc_cv_cflags_$1])[ = xyes],
[CFLAGS="$CFLAGS $1"; DEBUG_CFLAGS="$DEBUG_CFLAGS $1"; $2], [$3])
[AM_CFLAGS="$AM_CFLAGS $1"; DEBUG_CFLAGS="$DEBUG_CFLAGS $1"; $2], [$3])
AC_SUBST([AM_CFLAGS])
])
dnl CC_CHECK_CFLAGS_APPEND([FLAG1 FLAG2], [action-if-found], [action-if-not])
@ -101,6 +104,20 @@ AC_DEFUN([CC_CHECK_LDFLAGS], [
[$2], [$3])
])
dnl Check if flag is supported by both compiler and linker
dnl If so, append it to AM_CFLAGS
dnl CC_CHECK_FLAG_SUPPORTED_APPEND([FLAG])
AC_DEFUN([CC_CHECK_FLAG_SUPPORTED_APPEND], [
CC_CHECK_CFLAGS([$1],
[CC_CHECK_LDFLAGS([$1],
[AM_CFLAGS="$AM_CFLAGS $1";
DEBUG_CFLAGS="$DEBUG_CFLAGS $1";
AC_SUBST([AM_CFLAGS])
])
])
])
dnl define the LDFLAGS_NOUNDEFINED variable with the correct value for
dnl the current linker to avoid undefined references in a shared object.
AC_DEFUN([CC_NOUNDEFINED], [

View file

@ -25,7 +25,7 @@
#ifdef _WIN32
#include "win/internal.h"
#include "win/handle-inl.h"
#define uv__make_close_pending(h) uv_want_endgame((h)->loop, (h))
#define uv__make_close_pending(h) uv__want_endgame((h)->loop, (h))
#else
#include "unix/internal.h"
#endif

11
deps/uv/src/idna.c vendored
View file

@ -21,6 +21,7 @@
#include "idna.h"
#include <assert.h>
#include <string.h>
#include <limits.h> /* UINT_MAX */
static unsigned uv__utf8_decode1_slow(const char** p,
const char* pe,
@ -129,7 +130,7 @@ static int uv__idna_toascii_label(const char* s, const char* se,
while (s < se) {
c = uv__utf8_decode1(&s, se);
if (c == -1u)
if (c == UINT_MAX)
return UV_EINVAL;
if (c < 128)
@ -151,7 +152,7 @@ static int uv__idna_toascii_label(const char* s, const char* se,
s = ss;
while (s < se) {
c = uv__utf8_decode1(&s, se);
assert(c != -1u);
assert(c != UINT_MAX);
if (c > 127)
continue;
@ -182,7 +183,7 @@ static int uv__idna_toascii_label(const char* s, const char* se,
while (s < se) {
c = uv__utf8_decode1(&s, se);
assert(c != -1u);
assert(c != UINT_MAX);
if (c >= n)
if (c < m)
@ -201,7 +202,7 @@ static int uv__idna_toascii_label(const char* s, const char* se,
s = ss;
while (s < se) {
c = uv__utf8_decode1(&s, se);
assert(c != -1u);
assert(c != UINT_MAX);
if (c < n)
if (++delta == 0)
@ -280,7 +281,7 @@ long uv__idna_toascii(const char* s, const char* se, char* d, char* de) {
st = si;
c = uv__utf8_decode1(&si, se);
if (c == -1u)
if (c == UINT_MAX)
return UV_EINVAL;
if (c != '.')

View file

@ -28,7 +28,7 @@
*/
#include "uv.h"
/* Copies up to |n-1| bytes from |d| to |s| and always zero-terminates
/* Copies up to |n-1| bytes from |s| to |d| and always zero-terminates
* the result, except when |n==0|. Returns the number of bytes copied
* or UV_E2BIG if |d| is too small.
*

52
deps/uv/src/strtok.c vendored Normal file
View file

@ -0,0 +1,52 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <stdlib.h>
#include "strtok.h"
char* uv__strtok(char* str, const char* sep, char** itr) {
const char* sep_itr;
char* tmp;
char* start;
if (str == NULL)
start = tmp = *itr;
else
start = tmp = str;
if (tmp == NULL)
return NULL;
while (*tmp != '\0') {
sep_itr = sep;
while (*sep_itr != '\0') {
if (*tmp == *sep_itr) {
*itr = tmp + 1;
*tmp = '\0';
return start;
}
sep_itr++;
}
tmp++;
}
*itr = NULL;
return start;
}

27
deps/uv/src/strtok.h vendored Normal file
View file

@ -0,0 +1,27 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_STRTOK_H_
#define UV_STRTOK_H_
char* uv__strtok(char* str, const char* sep, char** itr);
#endif /* UV_STRTOK_H_ */

View file

@ -1,713 +0,0 @@
/*
Copyright (c) 2013, Kenneth MacKay
Copyright (c) 2014, Emergya (Cloud4all, FP7/2007-2013 grant agreement #289016)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "uv/android-ifaddrs.h"
#include "uv-common.h"
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <unistd.h>
#include <sys/socket.h>
#include <net/if_arp.h>
#include <netinet/in.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/if_packet.h>
typedef struct NetlinkList
{
struct NetlinkList *m_next;
struct nlmsghdr *m_data;
unsigned int m_size;
} NetlinkList;
static int netlink_socket(pid_t *p_pid)
{
struct sockaddr_nl l_addr;
socklen_t l_len;
int l_socket = socket(PF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
if(l_socket < 0)
{
return -1;
}
memset(&l_addr, 0, sizeof(l_addr));
l_addr.nl_family = AF_NETLINK;
if(bind(l_socket, (struct sockaddr *)&l_addr, sizeof(l_addr)) < 0)
{
close(l_socket);
return -1;
}
l_len = sizeof(l_addr);
if(getsockname(l_socket, (struct sockaddr *)&l_addr, &l_len) < 0)
{
close(l_socket);
return -1;
}
*p_pid = l_addr.nl_pid;
return l_socket;
}
static int netlink_send(int p_socket, int p_request)
{
char l_buffer[NLMSG_ALIGN(sizeof(struct nlmsghdr)) + NLMSG_ALIGN(sizeof(struct rtgenmsg))];
struct nlmsghdr *l_hdr;
struct rtgenmsg *l_msg;
struct sockaddr_nl l_addr;
memset(l_buffer, 0, sizeof(l_buffer));
l_hdr = (struct nlmsghdr *)l_buffer;
l_msg = (struct rtgenmsg *)NLMSG_DATA(l_hdr);
l_hdr->nlmsg_len = NLMSG_LENGTH(sizeof(*l_msg));
l_hdr->nlmsg_type = p_request;
l_hdr->nlmsg_flags = NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST;
l_hdr->nlmsg_pid = 0;
l_hdr->nlmsg_seq = p_socket;
l_msg->rtgen_family = AF_UNSPEC;
memset(&l_addr, 0, sizeof(l_addr));
l_addr.nl_family = AF_NETLINK;
return (sendto(p_socket, l_hdr, l_hdr->nlmsg_len, 0, (struct sockaddr *)&l_addr, sizeof(l_addr)));
}
static int netlink_recv(int p_socket, void *p_buffer, size_t p_len)
{
struct sockaddr_nl l_addr;
struct msghdr l_msg;
struct iovec l_iov;
l_iov.iov_base = p_buffer;
l_iov.iov_len = p_len;
for(;;)
{
int l_result;
l_msg.msg_name = (void *)&l_addr;
l_msg.msg_namelen = sizeof(l_addr);
l_msg.msg_iov = &l_iov;
l_msg.msg_iovlen = 1;
l_msg.msg_control = NULL;
l_msg.msg_controllen = 0;
l_msg.msg_flags = 0;
l_result = recvmsg(p_socket, &l_msg, 0);
if(l_result < 0)
{
if(errno == EINTR)
{
continue;
}
return -2;
}
/* Buffer was too small */
if(l_msg.msg_flags & MSG_TRUNC)
{
return -1;
}
return l_result;
}
}
static struct nlmsghdr *getNetlinkResponse(int p_socket, pid_t p_pid, int *p_size, int *p_done)
{
size_t l_size = 4096;
void *l_buffer = NULL;
for(;;)
{
int l_read;
uv__free(l_buffer);
l_buffer = uv__malloc(l_size);
if (l_buffer == NULL)
{
return NULL;
}
l_read = netlink_recv(p_socket, l_buffer, l_size);
*p_size = l_read;
if(l_read == -2)
{
uv__free(l_buffer);
return NULL;
}
if(l_read >= 0)
{
struct nlmsghdr *l_hdr;
for(l_hdr = (struct nlmsghdr *)l_buffer; NLMSG_OK(l_hdr, (unsigned int)l_read); l_hdr = (struct nlmsghdr *)NLMSG_NEXT(l_hdr, l_read))
{
if((pid_t)l_hdr->nlmsg_pid != p_pid || (int)l_hdr->nlmsg_seq != p_socket)
{
continue;
}
if(l_hdr->nlmsg_type == NLMSG_DONE)
{
*p_done = 1;
break;
}
if(l_hdr->nlmsg_type == NLMSG_ERROR)
{
uv__free(l_buffer);
return NULL;
}
}
return l_buffer;
}
l_size *= 2;
}
}
static NetlinkList *newListItem(struct nlmsghdr *p_data, unsigned int p_size)
{
NetlinkList *l_item = uv__malloc(sizeof(NetlinkList));
if (l_item == NULL)
{
return NULL;
}
l_item->m_next = NULL;
l_item->m_data = p_data;
l_item->m_size = p_size;
return l_item;
}
static void freeResultList(NetlinkList *p_list)
{
NetlinkList *l_cur;
while(p_list)
{
l_cur = p_list;
p_list = p_list->m_next;
uv__free(l_cur->m_data);
uv__free(l_cur);
}
}
static NetlinkList *getResultList(int p_socket, int p_request, pid_t p_pid)
{
int l_size;
int l_done;
NetlinkList *l_list;
NetlinkList *l_end;
if(netlink_send(p_socket, p_request) < 0)
{
return NULL;
}
l_list = NULL;
l_end = NULL;
l_done = 0;
while(!l_done)
{
NetlinkList *l_item;
struct nlmsghdr *l_hdr = getNetlinkResponse(p_socket, p_pid, &l_size, &l_done);
/* Error */
if(!l_hdr)
{
freeResultList(l_list);
return NULL;
}
l_item = newListItem(l_hdr, l_size);
if (!l_item)
{
freeResultList(l_list);
return NULL;
}
if(!l_list)
{
l_list = l_item;
}
else
{
l_end->m_next = l_item;
}
l_end = l_item;
}
return l_list;
}
static size_t maxSize(size_t a, size_t b)
{
return (a > b ? a : b);
}
static size_t calcAddrLen(sa_family_t p_family, int p_dataSize)
{
switch(p_family)
{
case AF_INET:
return sizeof(struct sockaddr_in);
case AF_INET6:
return sizeof(struct sockaddr_in6);
case AF_PACKET:
return maxSize(sizeof(struct sockaddr_ll), offsetof(struct sockaddr_ll, sll_addr) + p_dataSize);
default:
return maxSize(sizeof(struct sockaddr), offsetof(struct sockaddr, sa_data) + p_dataSize);
}
}
static void makeSockaddr(sa_family_t p_family, struct sockaddr *p_dest, void *p_data, size_t p_size)
{
switch(p_family)
{
case AF_INET:
memcpy(&((struct sockaddr_in*)p_dest)->sin_addr, p_data, p_size);
break;
case AF_INET6:
memcpy(&((struct sockaddr_in6*)p_dest)->sin6_addr, p_data, p_size);
break;
case AF_PACKET:
memcpy(((struct sockaddr_ll*)p_dest)->sll_addr, p_data, p_size);
((struct sockaddr_ll*)p_dest)->sll_halen = p_size;
break;
default:
memcpy(p_dest->sa_data, p_data, p_size);
break;
}
p_dest->sa_family = p_family;
}
static void addToEnd(struct ifaddrs **p_resultList, struct ifaddrs *p_entry)
{
if(!*p_resultList)
{
*p_resultList = p_entry;
}
else
{
struct ifaddrs *l_cur = *p_resultList;
while(l_cur->ifa_next)
{
l_cur = l_cur->ifa_next;
}
l_cur->ifa_next = p_entry;
}
}
static int interpretLink(struct nlmsghdr *p_hdr, struct ifaddrs **p_resultList)
{
struct ifaddrs *l_entry;
char *l_index;
char *l_name;
char *l_addr;
char *l_data;
struct ifinfomsg *l_info = (struct ifinfomsg *)NLMSG_DATA(p_hdr);
size_t l_nameSize = 0;
size_t l_addrSize = 0;
size_t l_dataSize = 0;
size_t l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifinfomsg));
struct rtattr *l_rta;
for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
{
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
switch(l_rta->rta_type)
{
case IFLA_ADDRESS:
case IFLA_BROADCAST:
l_addrSize += NLMSG_ALIGN(calcAddrLen(AF_PACKET, l_rtaDataSize));
break;
case IFLA_IFNAME:
l_nameSize += NLMSG_ALIGN(l_rtaSize + 1);
break;
case IFLA_STATS:
l_dataSize += NLMSG_ALIGN(l_rtaSize);
break;
default:
break;
}
}
l_entry = uv__malloc(sizeof(struct ifaddrs) + sizeof(int) + l_nameSize + l_addrSize + l_dataSize);
if (l_entry == NULL)
{
return -1;
}
memset(l_entry, 0, sizeof(struct ifaddrs));
l_entry->ifa_name = "";
l_index = ((char *)l_entry) + sizeof(struct ifaddrs);
l_name = l_index + sizeof(int);
l_addr = l_name + l_nameSize;
l_data = l_addr + l_addrSize;
/* Save the interface index so we can look it up when handling the
* addresses.
*/
memcpy(l_index, &l_info->ifi_index, sizeof(int));
l_entry->ifa_flags = l_info->ifi_flags;
l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifinfomsg));
for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
{
void *l_rtaData = RTA_DATA(l_rta);
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
switch(l_rta->rta_type)
{
case IFLA_ADDRESS:
case IFLA_BROADCAST:
{
size_t l_addrLen = calcAddrLen(AF_PACKET, l_rtaDataSize);
makeSockaddr(AF_PACKET, (struct sockaddr *)l_addr, l_rtaData, l_rtaDataSize);
((struct sockaddr_ll *)l_addr)->sll_ifindex = l_info->ifi_index;
((struct sockaddr_ll *)l_addr)->sll_hatype = l_info->ifi_type;
if(l_rta->rta_type == IFLA_ADDRESS)
{
l_entry->ifa_addr = (struct sockaddr *)l_addr;
}
else
{
l_entry->ifa_broadaddr = (struct sockaddr *)l_addr;
}
l_addr += NLMSG_ALIGN(l_addrLen);
break;
}
case IFLA_IFNAME:
strncpy(l_name, l_rtaData, l_rtaDataSize);
l_name[l_rtaDataSize] = '\0';
l_entry->ifa_name = l_name;
break;
case IFLA_STATS:
memcpy(l_data, l_rtaData, l_rtaDataSize);
l_entry->ifa_data = l_data;
break;
default:
break;
}
}
addToEnd(p_resultList, l_entry);
return 0;
}
static struct ifaddrs *findInterface(int p_index, struct ifaddrs **p_links, int p_numLinks)
{
int l_num = 0;
struct ifaddrs *l_cur = *p_links;
while(l_cur && l_num < p_numLinks)
{
char *l_indexPtr = ((char *)l_cur) + sizeof(struct ifaddrs);
int l_index;
memcpy(&l_index, l_indexPtr, sizeof(int));
if(l_index == p_index)
{
return l_cur;
}
l_cur = l_cur->ifa_next;
++l_num;
}
return NULL;
}
static int interpretAddr(struct nlmsghdr *p_hdr, struct ifaddrs **p_resultList, int p_numLinks)
{
struct ifaddrmsg *l_info = (struct ifaddrmsg *)NLMSG_DATA(p_hdr);
struct ifaddrs *l_interface = findInterface(l_info->ifa_index, p_resultList, p_numLinks);
size_t l_nameSize = 0;
size_t l_addrSize = 0;
int l_addedNetmask = 0;
size_t l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifaddrmsg));
struct rtattr *l_rta;
struct ifaddrs *l_entry;
char *l_name;
char *l_addr;
for(l_rta = IFA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
{
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
if(l_info->ifa_family == AF_PACKET)
{
continue;
}
switch(l_rta->rta_type)
{
case IFA_ADDRESS:
case IFA_LOCAL:
l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize));
if((l_info->ifa_family == AF_INET || l_info->ifa_family == AF_INET6) && !l_addedNetmask)
{
/* Make room for netmask */
l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize));
l_addedNetmask = 1;
}
break;
case IFA_BROADCAST:
l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize));
break;
case IFA_LABEL:
l_nameSize += NLMSG_ALIGN(l_rtaDataSize + 1);
break;
default:
break;
}
}
l_entry = uv__malloc(sizeof(struct ifaddrs) + l_nameSize + l_addrSize);
if (l_entry == NULL)
{
return -1;
}
memset(l_entry, 0, sizeof(struct ifaddrs));
l_entry->ifa_name = (l_interface ? l_interface->ifa_name : "");
l_name = ((char *)l_entry) + sizeof(struct ifaddrs);
l_addr = l_name + l_nameSize;
l_entry->ifa_flags = l_info->ifa_flags;
if(l_interface)
{
l_entry->ifa_flags |= l_interface->ifa_flags;
}
l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifaddrmsg));
for(l_rta = IFA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
{
void *l_rtaData = RTA_DATA(l_rta);
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
switch(l_rta->rta_type)
{
case IFA_ADDRESS:
case IFA_BROADCAST:
case IFA_LOCAL:
{
size_t l_addrLen = calcAddrLen(l_info->ifa_family, l_rtaDataSize);
makeSockaddr(l_info->ifa_family, (struct sockaddr *)l_addr, l_rtaData, l_rtaDataSize);
if(l_info->ifa_family == AF_INET6)
{
if(IN6_IS_ADDR_LINKLOCAL((struct in6_addr *)l_rtaData) || IN6_IS_ADDR_MC_LINKLOCAL((struct in6_addr *)l_rtaData))
{
((struct sockaddr_in6 *)l_addr)->sin6_scope_id = l_info->ifa_index;
}
}
/* Apparently in a point-to-point network IFA_ADDRESS contains
* the dest address and IFA_LOCAL contains the local address
*/
if(l_rta->rta_type == IFA_ADDRESS)
{
if(l_entry->ifa_addr)
{
l_entry->ifa_dstaddr = (struct sockaddr *)l_addr;
}
else
{
l_entry->ifa_addr = (struct sockaddr *)l_addr;
}
}
else if(l_rta->rta_type == IFA_LOCAL)
{
if(l_entry->ifa_addr)
{
l_entry->ifa_dstaddr = l_entry->ifa_addr;
}
l_entry->ifa_addr = (struct sockaddr *)l_addr;
}
else
{
l_entry->ifa_broadaddr = (struct sockaddr *)l_addr;
}
l_addr += NLMSG_ALIGN(l_addrLen);
break;
}
case IFA_LABEL:
strncpy(l_name, l_rtaData, l_rtaDataSize);
l_name[l_rtaDataSize] = '\0';
l_entry->ifa_name = l_name;
break;
default:
break;
}
}
if(l_entry->ifa_addr && (l_entry->ifa_addr->sa_family == AF_INET || l_entry->ifa_addr->sa_family == AF_INET6))
{
unsigned l_maxPrefix = (l_entry->ifa_addr->sa_family == AF_INET ? 32 : 128);
unsigned l_prefix = (l_info->ifa_prefixlen > l_maxPrefix ? l_maxPrefix : l_info->ifa_prefixlen);
unsigned char l_mask[16] = {0};
unsigned i;
for(i=0; i<(l_prefix/8); ++i)
{
l_mask[i] = 0xff;
}
if(l_prefix % 8)
{
l_mask[i] = 0xff << (8 - (l_prefix % 8));
}
makeSockaddr(l_entry->ifa_addr->sa_family, (struct sockaddr *)l_addr, l_mask, l_maxPrefix / 8);
l_entry->ifa_netmask = (struct sockaddr *)l_addr;
}
addToEnd(p_resultList, l_entry);
return 0;
}
static int interpretLinks(int p_socket, pid_t p_pid, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList)
{
int l_numLinks = 0;
for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next)
{
unsigned int l_nlsize = p_netlinkList->m_size;
struct nlmsghdr *l_hdr;
for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize))
{
if((pid_t)l_hdr->nlmsg_pid != p_pid || (int)l_hdr->nlmsg_seq != p_socket)
{
continue;
}
if(l_hdr->nlmsg_type == NLMSG_DONE)
{
break;
}
if(l_hdr->nlmsg_type == RTM_NEWLINK)
{
if(interpretLink(l_hdr, p_resultList) == -1)
{
return -1;
}
++l_numLinks;
}
}
}
return l_numLinks;
}
static int interpretAddrs(int p_socket, pid_t p_pid, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList, int p_numLinks)
{
for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next)
{
unsigned int l_nlsize = p_netlinkList->m_size;
struct nlmsghdr *l_hdr;
for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize))
{
if((pid_t)l_hdr->nlmsg_pid != p_pid || (int)l_hdr->nlmsg_seq != p_socket)
{
continue;
}
if(l_hdr->nlmsg_type == NLMSG_DONE)
{
break;
}
if(l_hdr->nlmsg_type == RTM_NEWADDR)
{
if (interpretAddr(l_hdr, p_resultList, p_numLinks) == -1)
{
return -1;
}
}
}
}
return 0;
}
int getifaddrs(struct ifaddrs **ifap)
{
int l_socket;
int l_result;
int l_numLinks;
pid_t l_pid;
NetlinkList *l_linkResults;
NetlinkList *l_addrResults;
if(!ifap)
{
return -1;
}
*ifap = NULL;
l_socket = netlink_socket(&l_pid);
if(l_socket < 0)
{
return -1;
}
l_linkResults = getResultList(l_socket, RTM_GETLINK, l_pid);
if(!l_linkResults)
{
close(l_socket);
return -1;
}
l_addrResults = getResultList(l_socket, RTM_GETADDR, l_pid);
if(!l_addrResults)
{
close(l_socket);
freeResultList(l_linkResults);
return -1;
}
l_result = 0;
l_numLinks = interpretLinks(l_socket, l_pid, l_linkResults, ifap);
if(l_numLinks == -1 || interpretAddrs(l_socket, l_pid, l_addrResults, ifap, l_numLinks) == -1)
{
l_result = -1;
}
freeResultList(l_linkResults);
freeResultList(l_addrResults);
close(l_socket);
return l_result;
}
void freeifaddrs(struct ifaddrs *ifa)
{
struct ifaddrs *l_cur;
while(ifa)
{
l_cur = ifa;
ifa = ifa->ifa_next;
uv__free(l_cur);
}
}

View file

@ -37,12 +37,11 @@ UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)) {
: "memory");
return out;
#elif defined(__MVS__)
unsigned int op4;
if (__plo_CSST(ptr, (unsigned int*) &oldval, newval,
(unsigned int*) ptr, *ptr, &op4))
/* Use hand-rolled assembly because codegen from builtin __plo_CSST results in
* a runtime bug.
*/
__asm(" cs %0,%2,%1 \n " : "+r"(oldval), "+m"(*ptr) : "r"(newval) :);
return oldval;
else
return op4;
#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
return atomic_cas_uint((uint_t *)ptr, (uint_t)oldval, (uint_t)newval);
#else
@ -55,7 +54,9 @@ UV_UNUSED(static void cpu_relax(void)) {
__asm__ __volatile__ ("rep; nop" ::: "memory"); /* a.k.a. PAUSE */
#elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__)
__asm__ __volatile__ ("yield" ::: "memory");
#elif defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__)
#elif (defined(__ppc__) || defined(__ppc64__)) && defined(__APPLE__)
__asm volatile ("" : : : "memory");
#elif !defined(__APPLE__) && (defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__))
__asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory");
#endif
}

View file

@ -27,7 +27,7 @@
#include <ifaddrs.h>
#include <net/if.h>
#if !defined(__CYGWIN__) && !defined(__MSYS__)
#if !defined(__CYGWIN__) && !defined(__MSYS__) && !defined(__GNU__)
#include <net/if_dl.h>
#endif
@ -40,7 +40,7 @@ static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
return 1;
if (ent->ifa_addr == NULL)
return 1;
#if !defined(__CYGWIN__) && !defined(__MSYS__)
#if !defined(__CYGWIN__) && !defined(__MSYS__) && !defined(__GNU__)
/*
* If `exclude_type` is `UV__EXCLUDE_IFPHYS`, return whether `sa_family`
* equals `AF_LINK`. Otherwise, the result depends on the operating
@ -69,7 +69,7 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
struct ifaddrs* addrs;
struct ifaddrs* ent;
uv_interface_address_t* address;
#if !(defined(__CYGWIN__) || defined(__MSYS__))
#if !(defined(__CYGWIN__) || defined(__MSYS__)) && !defined(__GNU__)
int i;
#endif
@ -126,7 +126,7 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
address++;
}
#if !(defined(__CYGWIN__) || defined(__MSYS__))
#if !(defined(__CYGWIN__) || defined(__MSYS__)) && !defined(__GNU__)
/* Fill in physical addresses for each interface */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFPHYS))

View file

@ -38,6 +38,7 @@ static void init_process_title_mutex_once(void) {
void uv__process_title_cleanup(void) {
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
uv_mutex_destroy(&process_title_mutex);
}

View file

@ -20,6 +20,7 @@
#include "uv.h"
#include "internal.h"
#include "strtok.h"
#include <stddef.h> /* NULL */
#include <stdio.h> /* printf */
@ -80,10 +81,12 @@ extern char** environ;
#endif
#if defined(__MVS__)
#include <sys/ioctl.h>
# include <sys/ioctl.h>
# include "zos-sys-info.h"
#endif
#if defined(__linux__)
# include <sched.h>
# include <sys/syscall.h>
# define uv__accept4 accept4
#endif
@ -92,13 +95,13 @@ extern char** environ;
# include <sanitizer/linux_syscall_hooks.h>
#endif
static int uv__run_pending(uv_loop_t* loop);
static void uv__run_pending(uv_loop_t* loop);
/* Verify that uv_buf_t is ABI-compatible with struct iovec. */
STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->base) ==
STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->base) ==
sizeof(((struct iovec*) 0)->iov_base));
STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->len) ==
STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->len) ==
sizeof(((struct iovec*) 0)->iov_len));
STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
@ -158,6 +161,15 @@ void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
case UV_FS_EVENT:
uv__fs_event_close((uv_fs_event_t*)handle);
#if defined(__sun) || defined(__MVS__)
/*
* On Solaris, illumos, and z/OS we will not be able to dissociate the
* watcher for an event which is pending delivery, so we cannot always call
* uv__make_close_pending() straight away. The backend will call the
* function once the event has cleared.
*/
return;
#endif
break;
case UV_POLL:
@ -334,33 +346,34 @@ int uv_backend_fd(const uv_loop_t* loop) {
}
int uv_backend_timeout(const uv_loop_t* loop) {
if (loop->stop_flag != 0)
return 0;
if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop))
return 0;
if (!QUEUE_EMPTY(&loop->idle_handles))
return 0;
if (!QUEUE_EMPTY(&loop->pending_queue))
return 0;
if (loop->closing_handles)
return 0;
return uv__next_timeout(loop);
}
static int uv__loop_alive(const uv_loop_t* loop) {
return uv__has_active_handles(loop) ||
uv__has_active_reqs(loop) ||
!QUEUE_EMPTY(&loop->pending_queue) ||
loop->closing_handles != NULL;
}
static int uv__backend_timeout(const uv_loop_t* loop) {
if (loop->stop_flag == 0 &&
/* uv__loop_alive(loop) && */
(uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
QUEUE_EMPTY(&loop->pending_queue) &&
QUEUE_EMPTY(&loop->idle_handles) &&
loop->closing_handles == NULL)
return uv__next_timeout(loop);
return 0;
}
int uv_backend_timeout(const uv_loop_t* loop) {
if (QUEUE_EMPTY(&loop->watcher_queue))
return uv__backend_timeout(loop);
/* Need to call uv_run to update the backend fd state. */
return 0;
}
int uv_loop_alive(const uv_loop_t* loop) {
return uv__loop_alive(loop);
}
@ -369,7 +382,7 @@ int uv_loop_alive(const uv_loop_t* loop) {
int uv_run(uv_loop_t* loop, uv_run_mode mode) {
int timeout;
int r;
int ran_pending;
int can_sleep;
r = uv__loop_alive(loop);
if (!r)
@ -378,16 +391,25 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) {
while (r != 0 && loop->stop_flag == 0) {
uv__update_time(loop);
uv__run_timers(loop);
ran_pending = uv__run_pending(loop);
can_sleep =
QUEUE_EMPTY(&loop->pending_queue) && QUEUE_EMPTY(&loop->idle_handles);
uv__run_pending(loop);
uv__run_idle(loop);
uv__run_prepare(loop);
timeout = 0;
if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT)
timeout = uv_backend_timeout(loop);
if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT)
timeout = uv__backend_timeout(loop);
uv__io_poll(loop, timeout);
/* Process immediate callbacks (e.g. write_cb) a small fixed number of
* times to avoid loop starvation.*/
for (r = 0; r < 8 && !QUEUE_EMPTY(&loop->pending_queue); r++)
uv__run_pending(loop);
/* Run one final update on the provider_idle_time in case uv__io_poll
* returned because the timeout expired, but no events were received. This
* call will be ignored if the provider_entry_time was either never set (if
@ -597,20 +619,6 @@ int uv__nonblock_ioctl(int fd, int set) {
return 0;
}
int uv__cloexec_ioctl(int fd, int set) {
int r;
do
r = ioctl(fd, set ? FIOCLEX : FIONCLEX);
while (r == -1 && errno == EINTR);
if (r)
return UV__ERR(errno);
return 0;
}
#endif
@ -645,25 +653,13 @@ int uv__nonblock_fcntl(int fd, int set) {
}
int uv__cloexec_fcntl(int fd, int set) {
int uv__cloexec(int fd, int set) {
int flags;
int r;
do
r = fcntl(fd, F_GETFD);
while (r == -1 && errno == EINTR);
if (r == -1)
return UV__ERR(errno);
/* Bail out now if already set/clear. */
if (!!(r & FD_CLOEXEC) == !!set)
return 0;
flags = 0;
if (set)
flags = r | FD_CLOEXEC;
else
flags = r & ~FD_CLOEXEC;
flags = FD_CLOEXEC;
do
r = fcntl(fd, F_SETFD, flags);
@ -677,28 +673,23 @@ int uv__cloexec_fcntl(int fd, int set) {
ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
struct cmsghdr* cmsg;
#if defined(__ANDROID__) || \
defined(__DragonFly__) || \
defined(__FreeBSD__) || \
defined(__NetBSD__) || \
defined(__OpenBSD__) || \
defined(__linux__)
ssize_t rc;
int* pfd;
int* end;
#if defined(__linux__)
static int no_msg_cmsg_cloexec;
if (0 == uv__load_relaxed(&no_msg_cmsg_cloexec)) {
rc = recvmsg(fd, msg, flags | 0x40000000); /* MSG_CMSG_CLOEXEC */
if (rc != -1)
return rc;
if (errno != EINVAL)
return UV__ERR(errno);
rc = recvmsg(fd, msg, flags);
rc = recvmsg(fd, msg, flags | MSG_CMSG_CLOEXEC);
if (rc == -1)
return UV__ERR(errno);
uv__store_relaxed(&no_msg_cmsg_cloexec, 1);
} else {
rc = recvmsg(fd, msg, flags);
}
return rc;
#else
struct cmsghdr* cmsg;
int* pfd;
int* end;
ssize_t rc;
rc = recvmsg(fd, msg, flags);
#endif
if (rc == -1)
return UV__ERR(errno);
if (msg->msg_controllen == 0)
@ -711,6 +702,7 @@ ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
pfd += 1)
uv__cloexec(*pfd, 1);
return rc;
#endif
}
@ -803,14 +795,11 @@ int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
}
static int uv__run_pending(uv_loop_t* loop) {
static void uv__run_pending(uv_loop_t* loop) {
QUEUE* q;
QUEUE pq;
uv__io_t* w;
if (QUEUE_EMPTY(&loop->pending_queue))
return 0;
QUEUE_MOVE(&loop->pending_queue, &pq);
while (!QUEUE_EMPTY(&pq)) {
@ -820,8 +809,6 @@ static int uv__run_pending(uv_loop_t* loop) {
w = QUEUE_DATA(q, uv__io_t, pending_queue);
w->cb(loop, w, POLLOUT);
}
return 1;
}
@ -1036,6 +1023,32 @@ int uv__open_cloexec(const char* path, int flags) {
}
int uv__slurp(const char* filename, char* buf, size_t len) {
ssize_t n;
int fd;
assert(len > 0);
fd = uv__open_cloexec(filename, O_RDONLY);
if (fd < 0)
return fd;
do
n = read(fd, buf, len - 1);
while (n == -1 && errno == EINTR);
if (uv__close_nocheckstdio(fd))
abort();
if (n < 0)
return UV__ERR(errno);
buf[n] = '\0';
return 0;
}
int uv__dup2_cloexec(int oldfd, int newfd) {
#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__linux__)
int r;
@ -1160,24 +1173,17 @@ int uv__getpwuid_r(uv_passwd_t* pwd) {
size_t name_size;
size_t homedir_size;
size_t shell_size;
long initsize;
int r;
if (pwd == NULL)
return UV_EINVAL;
initsize = sysconf(_SC_GETPW_R_SIZE_MAX);
if (initsize <= 0)
bufsize = 4096;
else
bufsize = (size_t) initsize;
uid = geteuid();
buf = NULL;
for (;;) {
uv__free(buf);
/* Calling sysconf(_SC_GETPW_R_SIZE_MAX) would get the suggested size, but it
* is frequently 1024 or 4096, so we can just use that directly. The pwent
* will not usually be large. */
for (bufsize = 2000;; bufsize *= 2) {
buf = uv__malloc(bufsize);
if (buf == NULL)
@ -1187,21 +1193,18 @@ int uv__getpwuid_r(uv_passwd_t* pwd) {
r = getpwuid_r(uid, &pw, buf, bufsize, &result);
while (r == EINTR);
if (r != 0 || result == NULL)
uv__free(buf);
if (r != ERANGE)
break;
bufsize *= 2;
}
if (r != 0) {
uv__free(buf);
if (r != 0)
return UV__ERR(r);
}
if (result == NULL) {
uv__free(buf);
if (result == NULL)
return UV_ENOENT;
}
/* Allocate memory for the username, shell, and home directory */
name_size = strlen(pw.pw_name) + 1;
@ -1554,6 +1557,7 @@ int uv__search_path(const char* prog, char* buf, size_t* buflen) {
char* cloned_path;
char* path_env;
char* token;
char* itr;
if (buf == NULL || buflen == NULL || *buflen == 0)
return UV_EINVAL;
@ -1595,7 +1599,7 @@ int uv__search_path(const char* prog, char* buf, size_t* buflen) {
if (cloned_path == NULL)
return UV_ENOMEM;
token = strtok(cloned_path, ":");
token = uv__strtok(cloned_path, ":", &itr);
while (token != NULL) {
snprintf(trypath, sizeof(trypath) - 1, "%s/%s", token, prog);
if (realpath(trypath, abspath) == abspath) {
@ -1614,10 +1618,50 @@ int uv__search_path(const char* prog, char* buf, size_t* buflen) {
return 0;
}
}
token = strtok(NULL, ":");
token = uv__strtok(NULL, ":", &itr);
}
uv__free(cloned_path);
/* Out of tokens (path entries), and no match found */
return UV_EINVAL;
}
unsigned int uv_available_parallelism(void) {
#ifdef __linux__
cpu_set_t set;
long rc;
memset(&set, 0, sizeof(set));
/* sysconf(_SC_NPROCESSORS_ONLN) in musl calls sched_getaffinity() but in
* glibc it's... complicated... so for consistency try sched_getaffinity()
* before falling back to sysconf(_SC_NPROCESSORS_ONLN).
*/
if (0 == sched_getaffinity(0, sizeof(set), &set))
rc = CPU_COUNT(&set);
else
rc = sysconf(_SC_NPROCESSORS_ONLN);
if (rc < 1)
rc = 1;
return (unsigned) rc;
#elif defined(__MVS__)
int rc;
rc = __get_num_online_cpus();
if (rc < 1)
rc = 1;
return (unsigned) rc;
#else /* __linux__ */
long rc;
rc = sysconf(_SC_NPROCESSORS_ONLN);
if (rc < 1)
rc = 1;
return (unsigned) rc;
#endif /* __linux__ */
}

View file

@ -287,3 +287,18 @@ int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
return errno = ENOSYS, -1;
#endif
}
ssize_t
uv__fs_copy_file_range(int fd_in,
off_t* off_in,
int fd_out,
off_t* off_out,
size_t len,
unsigned int flags)
{
#if __FreeBSD__ >= 13 && !defined(__DragonFly__)
return copy_file_range(fd_in, off_in, fd_out, off_out, len, flags);
#else
return errno = ENOSYS, -1;
#endif
}

18
deps/uv/src/unix/fs.c vendored
View file

@ -247,7 +247,8 @@ UV_UNUSED(static struct timeval uv__fs_to_timeval(double time)) {
static ssize_t uv__fs_futime(uv_fs_t* req) {
#if defined(__linux__) \
|| defined(_AIX71) \
|| defined(__HAIKU__)
|| defined(__HAIKU__) \
|| defined(__GNU__)
struct timespec ts[2];
ts[0] = uv__fs_to_timespec(req->atime);
ts[1] = uv__fs_to_timespec(req->mtime);
@ -1074,6 +1075,17 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
*/
#if defined(__FreeBSD__) || defined(__DragonFly__)
#if defined(__FreeBSD__)
off_t off;
off = req->off;
r = uv__fs_copy_file_range(in_fd, &off, out_fd, NULL, req->bufsml[0].len, 0);
if (r >= 0) {
r = off - req->off;
req->off = off;
return r;
}
#endif
len = 0;
r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
#elif defined(__FreeBSD_kernel__)
@ -1168,7 +1180,9 @@ static ssize_t uv__fs_lutime(uv_fs_t* req) {
#if defined(__linux__) || \
defined(_AIX71) || \
defined(__sun) || \
defined(__HAIKU__)
defined(__HAIKU__) || \
defined(__GNU__) || \
defined(__OpenBSD__)
struct timespec ts[2];
ts[0] = uv__fs_to_timespec(req->atime);
ts[1] = uv__fs_to_timespec(req->mtime);

167
deps/uv/src/unix/hurd.c vendored Normal file
View file

@ -0,0 +1,167 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#define _GNU_SOURCE 1
#include "uv.h"
#include "internal.h"
#include <hurd.h>
#include <hurd/process.h>
#include <mach/task_info.h>
#include <mach/vm_statistics.h>
#include <mach/vm_param.h>
#include <inttypes.h>
#include <stddef.h>
#include <unistd.h>
#include <string.h>
#include <limits.h>
int uv_exepath(char* buffer, size_t* size) {
kern_return_t err;
/* XXX in current Hurd, strings are char arrays of 1024 elements */
string_t exepath;
ssize_t copied;
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
if (*size - 1 > 0) {
/* XXX limited length of buffer in current Hurd, this API will probably
* evolve in the future */
err = proc_get_exe(getproc(), getpid(), exepath);
if (err)
return UV__ERR(err);
}
copied = uv__strscpy(buffer, exepath, *size);
/* do not return error on UV_E2BIG failure */
*size = copied < 0 ? strlen(buffer) : (size_t) copied;
return 0;
}
int uv_resident_set_memory(size_t* rss) {
kern_return_t err;
struct task_basic_info bi;
mach_msg_type_number_t count;
count = TASK_BASIC_INFO_COUNT;
err = task_info(mach_task_self(), TASK_BASIC_INFO,
(task_info_t) &bi, &count);
if (err)
return UV__ERR(err);
*rss = bi.resident_size;
return 0;
}
uint64_t uv_get_free_memory(void) {
kern_return_t err;
struct vm_statistics vmstats;
err = vm_statistics(mach_task_self(), &vmstats);
if (err)
return 0;
return vmstats.free_count * vm_page_size;
}
uint64_t uv_get_total_memory(void) {
kern_return_t err;
host_basic_info_data_t hbi;
mach_msg_type_number_t cnt;
cnt = HOST_BASIC_INFO_COUNT;
err = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t) &hbi, &cnt);
if (err)
return 0;
return hbi.memory_size;
}
int uv_uptime(double* uptime) {
char buf[128];
/* Try /proc/uptime first */
if (0 == uv__slurp("/proc/uptime", buf, sizeof(buf)))
if (1 == sscanf(buf, "%lf", uptime))
return 0;
/* Reimplement here code from procfs to calculate uptime if not mounted? */
return UV__ERR(EIO);
}
void uv_loadavg(double avg[3]) {
char buf[128]; /* Large enough to hold all of /proc/loadavg. */
if (0 == uv__slurp("/proc/loadavg", buf, sizeof(buf)))
if (3 == sscanf(buf, "%lf %lf %lf", &avg[0], &avg[1], &avg[2]))
return;
/* Reimplement here code from procfs to calculate loadavg if not mounted? */
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
kern_return_t err;
host_basic_info_data_t hbi;
mach_msg_type_number_t cnt;
/* Get count of cpus */
cnt = HOST_BASIC_INFO_COUNT;
err = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t) &hbi, &cnt);
if (err) {
err = UV__ERR(err);
goto abort;
}
/* XXX not implemented on the Hurd */
*cpu_infos = uv__calloc(hbi.avail_cpus, sizeof(**cpu_infos));
if (*cpu_infos == NULL) {
err = UV_ENOMEM;
goto abort;
}
*count = hbi.avail_cpus;
return 0;
abort:
*cpu_infos = NULL;
*count = 0;
return err;
}
uint64_t uv_get_constrained_memory(void) {
return 0; /* Memory constraints are unknown. */
}

View file

@ -145,7 +145,8 @@ typedef struct uv__stream_queued_fds_s uv__stream_queued_fds_t;
/* loop flags */
enum {
UV_LOOP_BLOCK_SIGPROF = 1
UV_LOOP_BLOCK_SIGPROF = 0x1,
UV_LOOP_REAP_CHILDREN = 0x2
};
/* flags of excluding ifaddr */
@ -174,11 +175,9 @@ struct uv__stream_queued_fds_s {
defined(__linux__) || \
defined(__OpenBSD__) || \
defined(__NetBSD__)
#define uv__cloexec uv__cloexec_ioctl
#define uv__nonblock uv__nonblock_ioctl
#define UV__NONBLOCK_IS_IOCTL 1
#else
#define uv__cloexec uv__cloexec_fcntl
#define uv__nonblock uv__nonblock_fcntl
#define UV__NONBLOCK_IS_IOCTL 0
#endif
@ -196,8 +195,7 @@ struct uv__stream_queued_fds_s {
#endif
/* core */
int uv__cloexec_ioctl(int fd, int set);
int uv__cloexec_fcntl(int fd, int set);
int uv__cloexec(int fd, int set);
int uv__nonblock_ioctl(int fd, int set);
int uv__nonblock_fcntl(int fd, int set);
int uv__close(int fd); /* preserves errno */
@ -241,14 +239,15 @@ void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events);
int uv__accept(int sockfd);
int uv__dup2_cloexec(int oldfd, int newfd);
int uv__open_cloexec(const char* path, int flags);
int uv__slurp(const char* filename, char* buf, size_t len);
/* tcp */
int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb);
int uv__tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb);
int uv__tcp_nodelay(int fd, int on);
int uv__tcp_keepalive(int fd, int on, unsigned int delay);
/* pipe */
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
int uv__pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
/* signal */
void uv__signal_close(uv_signal_t* handle);
@ -278,10 +277,10 @@ void uv__tcp_close(uv_tcp_t* handle);
size_t uv__thread_stack_size(void);
void uv__udp_close(uv_udp_t* handle);
void uv__udp_finish_close(uv_udp_t* handle);
uv_handle_type uv__handle_type(int fd);
FILE* uv__open_file(const char* path);
int uv__getpwuid_r(uv_passwd_t* pwd);
int uv__search_path(const char* prog, char* buf, size_t* buflen);
void uv__wait_children(uv_loop_t* loop);
/* random */
int uv__random_devurandom(void* buf, size_t buflen);
@ -356,5 +355,15 @@ size_t strnlen(const char* s, size_t maxlen);
#endif
#endif
#if defined(__FreeBSD__)
ssize_t
uv__fs_copy_file_range(int fd_in,
off_t* off_in,
int fd_out,
off_t* off_out,
size_t len,
unsigned int flags);
#endif
#endif /* UV_UNIX_INTERNAL_H_ */

View file

@ -117,6 +117,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
unsigned int revents;
QUEUE* q;
uv__io_t* w;
uv_process_t* process;
sigset_t* pset;
sigset_t set;
uint64_t base;
@ -285,6 +286,21 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
for (i = 0; i < nfds; i++) {
ev = events + i;
fd = ev->ident;
/* Handle kevent NOTE_EXIT results */
if (ev->filter == EVFILT_PROC) {
QUEUE_FOREACH(q, &loop->process_handles) {
process = QUEUE_DATA(q, uv_process_t, queue);
if (process->pid == fd) {
process->flags |= UV_HANDLE_REAP;
loop->flags |= UV_LOOP_REAP_CHILDREN;
break;
}
}
nevents++;
continue;
}
/* Skip invalidated events, see uv__platform_invalidate_fd */
if (fd == -1)
continue;
@ -377,6 +393,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
nevents++;
}
if (loop->flags & UV_LOOP_REAP_CHILDREN) {
loop->flags &= ~UV_LOOP_REAP_CHILDREN;
uv__wait_children(loop);
}
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
@ -435,7 +456,7 @@ void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
/* Invalidate events with same file descriptor */
for (i = 0; i < nfds; i++)
if ((int) events[i].ident == fd)
if ((int) events[i].ident == fd && events[i].filter != EVFILT_PROC)
events[i].ident = -1;
}

View file

@ -45,6 +45,10 @@
#define HAVE_IFADDRS_H 1
# if defined(__ANDROID_API__) && __ANDROID_API__ < 24
# undef HAVE_IFADDRS_H
#endif
#ifdef __UCLIBC__
# if __UCLIBC_MAJOR__ < 0 && __UCLIBC_MINOR__ < 9 && __UCLIBC_SUBLEVEL__ < 32
# undef HAVE_IFADDRS_H
@ -52,11 +56,7 @@
#endif
#ifdef HAVE_IFADDRS_H
# if defined(__ANDROID__)
# include "uv/android-ifaddrs.h"
# else
# include <ifaddrs.h>
# endif
# include <sys/socket.h>
# include <net/ethernet.h>
# include <netpacket/packet.h>
@ -211,31 +211,6 @@ err:
return UV_EINVAL;
}
static int uv__slurp(const char* filename, char* buf, size_t len) {
ssize_t n;
int fd;
assert(len > 0);
fd = uv__open_cloexec(filename, O_RDONLY);
if (fd < 0)
return fd;
do
n = read(fd, buf, len - 1);
while (n == -1 && errno == EINTR);
if (uv__close_nocheckstdio(fd))
abort();
if (n < 0)
return UV__ERR(errno);
buf[n] = '\0';
return 0;
}
int uv_uptime(double* uptime) {
static volatile int no_clock_boottime;
char buf[128];
@ -641,6 +616,7 @@ static uint64_t read_cpufreq(unsigned int cpunum) {
}
#ifdef HAVE_IFADDRS_H
static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
return 1;
@ -654,6 +630,7 @@ static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
return exclude_type;
return !exclude_type;
}
#endif
int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
#ifndef HAVE_IFADDRS_H

View file

@ -284,6 +284,8 @@ int epoll_wait(uv__os390_epoll* lst, struct epoll_event* events,
nmsgsfds_t size;
struct pollfd* pfds;
int pollret;
int pollfdret;
int pollmsgret;
int reventcount;
int nevents;
struct pollfd msg_fd;
@ -304,24 +306,24 @@ int epoll_wait(uv__os390_epoll* lst, struct epoll_event* events,
return -1;
}
if (lst->size > 0)
assert(lst->size > 0);
_SET_FDS_MSGS(size, 1, lst->size - 1);
else
_SET_FDS_MSGS(size, 0, 0);
pfds = lst->items;
pollret = poll(pfds, size, timeout);
if (pollret <= 0)
return pollret;
assert(lst->size > 0);
pollret = _NFDS(pollret) + _NMSGS(pollret);
pollfdret = _NFDS(pollret);
pollmsgret = _NMSGS(pollret);
reventcount = 0;
nevents = 0;
msg_fd = pfds[lst->size - 1];
msg_fd = pfds[lst->size - 1]; /* message queue is always last entry */
maxevents = maxevents - pollmsgret; /* allow spot for message queue */
for (i = 0;
i < lst->size && i < maxevents && reventcount < pollret; ++i) {
i < lst->size - 1 &&
nevents < maxevents &&
reventcount < pollfdret; ++i) {
struct epoll_event ev;
struct pollfd* pfd;
@ -332,18 +334,18 @@ int epoll_wait(uv__os390_epoll* lst, struct epoll_event* events,
ev.fd = pfd->fd;
ev.events = pfd->revents;
ev.is_msg = 0;
if (pfd->revents & POLLIN && pfd->revents & POLLOUT)
reventcount += 2;
else if (pfd->revents & (POLLIN | POLLOUT))
++reventcount;
pfd->revents = 0;
reventcount++;
events[nevents++] = ev;
}
if (msg_fd.revents != 0 && msg_fd.fd != -1)
if (i == lst->size)
events[nevents - 1].is_msg = 1;
if (pollmsgret > 0 && msg_fd.revents != 0 && msg_fd.fd != -1) {
struct epoll_event ev;
ev.fd = msg_fd.fd;
ev.events = msg_fd.revents;
ev.is_msg = 1;
events[nevents++] = ev;
}
return nevents;
}

View file

@ -278,7 +278,9 @@ static int uv__interface_addresses_v6(uv_interface_address_t** addresses,
__net_ifconf6header_t ifc;
__net_ifconf6entry_t* ifr;
__net_ifconf6entry_t* p;
__net_ifconf6entry_t flg;
unsigned int i;
int count_names;
unsigned char netmask[16] = {0};
*count = 0;
/* Assume maximum buffer size allowable */
@ -287,24 +289,33 @@ static int uv__interface_addresses_v6(uv_interface_address_t** addresses,
if (0 > (sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP)))
return UV__ERR(errno);
ifc.__nif6h_version = 1;
ifc.__nif6h_buflen = maxsize;
ifc.__nif6h_buffer = uv__calloc(1, maxsize);;
ifc.__nif6h_buffer = uv__calloc(1, maxsize);
if (ioctl(sockfd, SIOCGIFCONF6, &ifc) == -1) {
if (ifc.__nif6h_buffer == NULL) {
uv__close(sockfd);
return UV__ERR(errno);
return UV_ENOMEM;
}
ifc.__nif6h_version = 1;
ifc.__nif6h_buflen = maxsize;
if (ioctl(sockfd, SIOCGIFCONF6, &ifc) == -1) {
/* This will error on a system that does not support IPv6. However, we want
* to treat this as there being 0 interfaces so we can continue to get IPv4
* interfaces in uv_interface_addresses(). So return 0 instead of the error.
*/
uv__free(ifc.__nif6h_buffer);
uv__close(sockfd);
errno = 0;
return 0;
}
*count = 0;
ifr = (__net_ifconf6entry_t*)(ifc.__nif6h_buffer);
while ((char*)ifr < (char*)ifc.__nif6h_buffer + ifc.__nif6h_buflen) {
p = ifr;
ifr = (__net_ifconf6entry_t*)((char*)ifr + ifc.__nif6h_entrylen);
if (!(p->__nif6e_addr.sin6_family == AF_INET6 ||
p->__nif6e_addr.sin6_family == AF_INET))
if (!(p->__nif6e_addr.sin6_family == AF_INET6))
continue;
if (!(p->__nif6e_flags & _NIF6E_FLAGS_ON_LINK_ACTIVE))
@ -313,21 +324,28 @@ static int uv__interface_addresses_v6(uv_interface_address_t** addresses,
++(*count);
}
if ((*count) == 0) {
uv__free(ifc.__nif6h_buffer);
uv__close(sockfd);
return 0;
}
/* Alloc the return interface structs */
*addresses = uv__malloc(*count * sizeof(uv_interface_address_t));
*addresses = uv__calloc(1, *count * sizeof(uv_interface_address_t));
if (!(*addresses)) {
uv__free(ifc.__nif6h_buffer);
uv__close(sockfd);
return UV_ENOMEM;
}
address = *addresses;
count_names = 0;
ifr = (__net_ifconf6entry_t*)(ifc.__nif6h_buffer);
while ((char*)ifr < (char*)ifc.__nif6h_buffer + ifc.__nif6h_buflen) {
p = ifr;
ifr = (__net_ifconf6entry_t*)((char*)ifr + ifc.__nif6h_entrylen);
if (!(p->__nif6e_addr.sin6_family == AF_INET6 ||
p->__nif6e_addr.sin6_family == AF_INET))
if (!(p->__nif6e_addr.sin6_family == AF_INET6))
continue;
if (!(p->__nif6e_flags & _NIF6E_FLAGS_ON_LINK_ACTIVE))
@ -335,20 +353,41 @@ static int uv__interface_addresses_v6(uv_interface_address_t** addresses,
/* All conditions above must match count loop */
address->name = uv__strdup(p->__nif6e_name);
i = 0;
/* Ignore EBCDIC space (0x40) padding in name */
while (i < ARRAY_SIZE(p->__nif6e_name) &&
p->__nif6e_name[i] != 0x40 &&
p->__nif6e_name[i] != 0)
++i;
address->name = uv__malloc(i + 1);
if (address->name == NULL) {
uv_free_interface_addresses(*addresses, count_names);
uv__free(ifc.__nif6h_buffer);
uv__close(sockfd);
return UV_ENOMEM;
}
memcpy(address->name, p->__nif6e_name, i);
address->name[i] = '\0';
__e2a_s(address->name);
count_names++;
if (p->__nif6e_addr.sin6_family == AF_INET6)
address->address.address6 = *((struct sockaddr_in6*) &p->__nif6e_addr);
else
address->address.address4 = *((struct sockaddr_in*) &p->__nif6e_addr);
/* TODO: Retrieve netmask using SIOCGIFNETMASK ioctl */
for (i = 0; i < (p->__nif6e_prefixlen / 8); i++)
netmask[i] = 0xFF;
address->is_internal = flg.__nif6e_flags & _NIF6E_FLAGS_LOOPBACK ? 1 : 0;
memset(address->phys_addr, 0, sizeof(address->phys_addr));
if (p->__nif6e_prefixlen % 8)
netmask[i] = 0xFF << (8 - (p->__nif6e_prefixlen % 8));
address->netmask.netmask6.sin6_len = p->__nif6e_prefixlen;
memcpy(&(address->netmask.netmask6.sin6_addr), netmask, 16);
address->netmask.netmask6.sin6_family = AF_INET6;
address->is_internal = p->__nif6e_flags & _NIF6E_FLAGS_LOOPBACK ? 1 : 0;
address++;
}
uv__free(ifc.__nif6h_buffer);
uv__close(sockfd);
return 0;
}
@ -362,14 +401,18 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
struct ifreq flg;
struct ifreq* ifr;
struct ifreq* p;
uv_interface_address_t* addresses_v6;
int count_v6;
unsigned int i;
int rc;
int count_names;
*count = 0;
*addresses = NULL;
/* get the ipv6 addresses first */
uv_interface_address_t* addresses_v6;
uv__interface_addresses_v6(&addresses_v6, &count_v6);
if ((rc = uv__interface_addresses_v6(&addresses_v6, &count_v6)) != 0)
return rc;
/* now get the ipv4 addresses */
@ -377,12 +420,27 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
maxsize = 16384;
sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
if (0 > sockfd)
if (0 > sockfd) {
if (count_v6)
uv_free_interface_addresses(addresses_v6, count_v6);
return UV__ERR(errno);
}
ifc.ifc_req = uv__calloc(1, maxsize);
if (ifc.ifc_req == NULL) {
if (count_v6)
uv_free_interface_addresses(addresses_v6, count_v6);
uv__close(sockfd);
return UV_ENOMEM;
}
ifc.ifc_len = maxsize;
if (ioctl(sockfd, SIOCGIFCONF, &ifc) == -1) {
if (count_v6)
uv_free_interface_addresses(addresses_v6, count_v6);
uv__free(ifc.ifc_req);
uv__close(sockfd);
return UV__ERR(errno);
}
@ -403,6 +461,9 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) {
if (count_v6)
uv_free_interface_addresses(addresses_v6, count_v6);
uv__free(ifc.ifc_req);
uv__close(sockfd);
return UV__ERR(errno);
}
@ -413,27 +474,35 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
(*count)++;
}
if (*count == 0) {
if (*count == 0 && count_v6 == 0) {
uv__free(ifc.ifc_req);
uv__close(sockfd);
return 0;
}
/* Alloc the return interface structs */
*addresses = uv__malloc((*count + count_v6) *
*addresses = uv__calloc(1, (*count + count_v6) *
sizeof(uv_interface_address_t));
if (!(*addresses)) {
if (count_v6)
uv_free_interface_addresses(addresses_v6, count_v6);
uv__free(ifc.ifc_req);
uv__close(sockfd);
return UV_ENOMEM;
}
address = *addresses;
/* copy over the ipv6 addresses */
/* copy over the ipv6 addresses if any are found */
if (count_v6) {
memcpy(address, addresses_v6, count_v6 * sizeof(uv_interface_address_t));
address += count_v6;
*count += count_v6;
/* free ipv6 addresses, but keep address names */
uv__free(addresses_v6);
}
count_names = *count;
ifr = ifc.ifc_req;
while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
p = ifr;
@ -446,6 +515,8 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) {
uv_free_interface_addresses(*addresses, count_names);
uv__free(ifc.ifc_req);
uv__close(sockfd);
return UV_ENOSYS;
}
@ -455,22 +526,43 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
/* All conditions above must match count loop */
address->name = uv__strdup(p->ifr_name);
i = 0;
/* Ignore EBCDIC space (0x40) padding in name */
while (i < ARRAY_SIZE(p->ifr_name) &&
p->ifr_name[i] != 0x40 &&
p->ifr_name[i] != 0)
++i;
address->name = uv__malloc(i + 1);
if (address->name == NULL) {
uv_free_interface_addresses(*addresses, count_names);
uv__free(ifc.ifc_req);
uv__close(sockfd);
return UV_ENOMEM;
}
memcpy(address->name, p->ifr_name, i);
address->name[i] = '\0';
__e2a_s(address->name);
count_names++;
if (p->ifr_addr.sa_family == AF_INET6) {
address->address.address6 = *((struct sockaddr_in6*) &p->ifr_addr);
} else {
address->address.address4 = *((struct sockaddr_in*) &p->ifr_addr);
if (ioctl(sockfd, SIOCGIFNETMASK, p) == -1) {
uv_free_interface_addresses(*addresses, count_names);
uv__free(ifc.ifc_req);
uv__close(sockfd);
return UV__ERR(errno);
}
address->netmask.netmask4 = *((struct sockaddr_in*) &p->ifr_addr);
address->netmask.netmask4.sin_family = AF_INET;
address->is_internal = flg.ifr_flags & IFF_LOOPBACK ? 1 : 0;
memset(address->phys_addr, 0, sizeof(address->phys_addr));
address++;
}
#undef ADDR_SIZE
#undef MAX
uv__free(ifc.ifc_req);
uv__close(sockfd);
return 0;
}
@ -529,27 +621,17 @@ int uv__io_check_fd(uv_loop_t* loop, int fd) {
}
void uv__fs_event_close(uv_fs_event_t* handle) {
uv_fs_event_stop(handle);
}
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
return 0;
}
int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb,
const char* filename, unsigned int flags) {
static int os390_regfileint(uv_fs_event_t* handle, char* path) {
uv__os390_epoll* ep;
_RFIS reg_struct;
char* path;
int rc;
if (uv__is_active(handle))
return UV_EINVAL;
ep = handle->loop->ep;
assert(ep->msg_queue != -1);
@ -558,17 +640,10 @@ int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb,
reg_struct.__rfis_type = 1;
memcpy(reg_struct.__rfis_utok, &handle, sizeof(handle));
path = uv__strdup(filename);
if (path == NULL)
return UV_ENOMEM;
rc = __w_pioctl(path, _IOCC_REGFILEINT, sizeof(reg_struct), &reg_struct);
if (rc != 0)
return UV__ERR(errno);
uv__handle_start(handle);
handle->path = path;
handle->cb = cb;
memcpy(handle->rfis_rftok, reg_struct.__rfis_rftok,
sizeof(handle->rfis_rftok));
@ -576,7 +651,33 @@ int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb,
}
int uv_fs_event_stop(uv_fs_event_t* handle) {
int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb,
const char* filename, unsigned int flags) {
char* path;
int rc;
if (uv__is_active(handle))
return UV_EINVAL;
path = uv__strdup(filename);
if (path == NULL)
return UV_ENOMEM;
rc = os390_regfileint(handle, path);
if (rc != 0) {
uv__free(path);
return rc;
}
uv__handle_start(handle);
handle->path = path;
handle->cb = cb;
return 0;
}
int uv__fs_event_stop(uv_fs_event_t* handle) {
uv__os390_epoll* ep;
_RFIS reg_struct;
int rc;
@ -602,12 +703,40 @@ int uv_fs_event_stop(uv_fs_event_t* handle) {
if (rc != 0 && errno != EALREADY && errno != ENOENT)
abort();
if (handle->path != NULL) {
uv__free(handle->path);
handle->path = NULL;
}
if (rc != 0 && errno == EALREADY)
return -1;
uv__handle_stop(handle);
return 0;
}
int uv_fs_event_stop(uv_fs_event_t* handle) {
uv__fs_event_stop(handle);
return 0;
}
void uv__fs_event_close(uv_fs_event_t* handle) {
/*
* If we were unable to unregister file interest here, then it is most likely
* that there is a pending queued change notification. When this happens, we
* don't want to complete the close as it will free the underlying memory for
* the handle, causing a use-after-free problem when the event is processed.
* We defer the final cleanup until after the event is consumed in
* os390_message_queue_handler().
*/
if (uv__fs_event_stop(handle) == 0)
uv__make_close_pending((uv_handle_t*) handle);
}
static int os390_message_queue_handler(uv__os390_epoll* ep) {
uv_fs_event_t* handle;
int msglen;
@ -628,7 +757,15 @@ static int os390_message_queue_handler(uv__os390_epoll* ep) {
events = 0;
if (msg.__rfim_event == _RFIM_ATTR || msg.__rfim_event == _RFIM_WRITE)
events = UV_CHANGE;
else if (msg.__rfim_event == _RFIM_RENAME)
else if (msg.__rfim_event == _RFIM_RENAME || msg.__rfim_event == _RFIM_UNLINK)
events = UV_RENAME;
else if (msg.__rfim_event == 156)
/* TODO(gabylb): zos - this event should not happen, need to investigate.
*
* This event seems to occur when the watched file is [re]moved, or an
* editor (like vim) renames then creates the file on save (for vim, that's
* when backupcopy=no|auto).
*/
events = UV_RENAME;
else
/* Some event that we are not interested in. */
@ -639,6 +776,26 @@ static int os390_message_queue_handler(uv__os390_epoll* ep) {
*/
__a2e_l(msg.__rfim_utok, sizeof(msg.__rfim_utok));
handle = *(uv_fs_event_t**)(msg.__rfim_utok);
assert(handle != NULL);
assert((handle->flags & UV_HANDLE_CLOSED) == 0);
if (uv__is_closing(handle)) {
uv__handle_stop(handle);
uv__make_close_pending((uv_handle_t*) handle);
return 0;
} else if (handle->path == NULL) {
/* _RFIS_UNREG returned EALREADY. */
uv__handle_stop(handle);
return 0;
}
/* The file is implicitly unregistered when the change notification is
* sent, only one notification is sent per registration. So we need to
* re-register interest in a file after each change notification we
* receive.
*/
assert(handle->path != NULL);
os390_regfileint(handle, handle->path);
handle->cb(handle, uv__basename_r(handle->path), events, 0);
return 1;
}
@ -650,6 +807,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
struct epoll_event* pe;
struct epoll_event e;
uv__os390_epoll* ep;
int have_signals;
int real_timeout;
QUEUE* q;
uv__io_t* w;
@ -712,6 +870,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
count = 48; /* Benchmarks suggest this gives the best throughput. */
real_timeout = timeout;
int nevents = 0;
have_signals = 0;
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
@ -796,6 +955,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
ep = loop->ep;
if (pe->is_msg) {
os390_message_queue_handler(ep);
nevents++;
continue;
}
@ -825,19 +985,35 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
pe->events |= w->pevents & (POLLIN | POLLOUT);
if (pe->events != 0) {
/* Run signal watchers last. This also affects child process watchers
* because those are implemented in terms of signal watchers.
*/
if (w == &loop->signal_io_watcher) {
have_signals = 1;
} else {
uv__metrics_update_idle_time(loop);
w->cb(loop, w, pe->events);
}
nevents++;
}
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (have_signals != 0) {
uv__metrics_update_idle_time(loop);
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
if (have_signals != 0)
return; /* Event loop should cycle now so don't poll again. */
if (nevents != 0) {
if (nfds == ARRAY_SIZE(events) && --count != 0) {
/* Poll for more events but don't block this time. */
@ -872,6 +1048,5 @@ int uv__io_fork(uv_loop_t* loop) {
*/
loop->ep = NULL;
uv__platform_loop_delete(loop);
return uv__platform_loop_init(loop);
}

View file

@ -51,7 +51,9 @@ int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
/* Already bound? */
if (uv__stream_fd(handle) >= 0)
return UV_EINVAL;
if (uv__is_closing(handle)) {
return UV_EINVAL;
}
/* Make a copy of the file name, it outlives this function's scope. */
pipe_fname = uv__strdup(name);
if (pipe_fname == NULL)
@ -91,7 +93,7 @@ err_socket:
}
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
int uv__pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
if (uv__stream_fd(handle) == -1)
return UV_EINVAL;
@ -319,7 +321,7 @@ uv_handle_type uv_pipe_pending_type(uv_pipe_t* handle) {
if (handle->accepted_fd == -1)
return UV_UNKNOWN_HANDLE;
else
return uv__handle_type(handle->accepted_fd);
return uv_guess_handle(handle->accepted_fd);
}

View file

@ -27,6 +27,7 @@
#include <assert.h>
#include <errno.h>
#include <signal.h>
#include <string.h>
#include <sys/types.h>
#include <sys/wait.h>
@ -34,9 +35,22 @@
#include <fcntl.h>
#include <poll.h>
#if defined(__APPLE__) && !TARGET_OS_IPHONE
#if defined(__APPLE__)
# include <spawn.h>
# include <paths.h>
# include <sys/kauth.h>
# include <sys/types.h>
# include <sys/sysctl.h>
# include <dlfcn.h>
# include <crt_externs.h>
# include <xlocale.h>
# define environ (*_NSGetEnviron())
/* macOS 10.14 back does not define this constant */
# ifndef POSIX_SPAWN_SETSID
# define POSIX_SPAWN_SETSID 1024
# endif
#else
extern char **environ;
#endif
@ -49,22 +63,36 @@ extern char **environ;
# include "zos-base.h"
#endif
#if defined(__APPLE__) || \
defined(__DragonFly__) || \
defined(__FreeBSD__) || \
defined(__NetBSD__) || \
defined(__OpenBSD__)
#include <sys/event.h>
#else
#define UV_USE_SIGCHLD
#endif
#ifdef UV_USE_SIGCHLD
static void uv__chld(uv_signal_t* handle, int signum) {
assert(signum == SIGCHLD);
uv__wait_children(handle->loop);
}
#endif
void uv__wait_children(uv_loop_t* loop) {
uv_process_t* process;
uv_loop_t* loop;
int exit_status;
int term_signal;
int status;
int options;
pid_t pid;
QUEUE pending;
QUEUE* q;
QUEUE* h;
assert(signum == SIGCHLD);
QUEUE_INIT(&pending);
loop = handle->loop;
h = &loop->process_handles;
q = QUEUE_HEAD(h);
@ -72,19 +100,33 @@ static void uv__chld(uv_signal_t* handle, int signum) {
process = QUEUE_DATA(q, uv_process_t, queue);
q = QUEUE_NEXT(q);
#ifndef UV_USE_SIGCHLD
if ((process->flags & UV_HANDLE_REAP) == 0)
continue;
options = 0;
process->flags &= ~UV_HANDLE_REAP;
#else
options = WNOHANG;
#endif
do
pid = waitpid(process->pid, &status, WNOHANG);
pid = waitpid(process->pid, &status, options);
while (pid == -1 && errno == EINTR);
if (pid == 0)
#ifdef UV_USE_SIGCHLD
if (pid == 0) /* Not yet exited */
continue;
#endif
if (pid == -1) {
if (errno != ECHILD)
abort();
/* The child died, and we missed it. This probably means someone else
* stole the waitpid from us. Handle this by not handling it at all. */
continue;
}
assert(pid == process->pid);
process->status = status;
QUEUE_REMOVE(&process->queue);
QUEUE_INSERT_TAIL(&pending, &process->queue);
@ -195,16 +237,14 @@ static void uv__write_int(int fd, int val) {
n = write(fd, &val, sizeof(val));
while (n == -1 && errno == EINTR);
if (n == -1 && errno == EPIPE)
return; /* parent process has quit */
assert(n == sizeof(val));
/* The write might have failed (e.g. if the parent process has died),
* but we have nothing left but to _exit ourself now too. */
_exit(127);
}
static void uv__write_errno(int error_fd) {
uv__write_int(error_fd, UV__ERR(errno));
_exit(127);
}
@ -254,22 +294,31 @@ static void uv__process_child_init(const uv_process_options_t* options,
use_fd = pipes[fd][1];
if (use_fd < 0 || use_fd >= fd)
continue;
#ifdef F_DUPFD_CLOEXEC /* POSIX 2008 */
pipes[fd][1] = fcntl(use_fd, F_DUPFD_CLOEXEC, stdio_count);
#else
pipes[fd][1] = fcntl(use_fd, F_DUPFD, stdio_count);
#endif
if (pipes[fd][1] == -1)
uv__write_errno(error_fd);
#ifndef F_DUPFD_CLOEXEC /* POSIX 2008 */
n = uv__cloexec(pipes[fd][1], 1);
if (n)
uv__write_int(error_fd, n);
#endif
}
for (fd = 0; fd < stdio_count; fd++) {
close_fd = pipes[fd][0];
close_fd = -1;
use_fd = pipes[fd][1];
if (use_fd < 0) {
if (fd >= 3)
continue;
else {
/* redirect stdin, stdout and stderr to /dev/null even if UV_IGNORE is
* set
*/
/* Redirect stdin, stdout and stderr to /dev/null even if UV_IGNORE is
* set. */
uv__close_nocheckstdio(fd); /* Free up fd, if it happens to be open. */
use_fd = open("/dev/null", fd == 0 ? O_RDONLY : O_RDWR);
close_fd = use_fd;
@ -278,28 +327,27 @@ static void uv__process_child_init(const uv_process_options_t* options,
}
}
if (fd == use_fd)
uv__cloexec_fcntl(use_fd, 0);
else
if (fd == use_fd) {
if (close_fd == -1) {
n = uv__cloexec(use_fd, 0);
if (n)
uv__write_int(error_fd, n);
}
}
else {
fd = dup2(use_fd, fd);
}
if (fd == -1)
uv__write_errno(error_fd);
if (fd <= 2)
if (fd <= 2 && close_fd == -1)
uv__nonblock_fcntl(fd, 0);
if (close_fd >= stdio_count)
uv__close(close_fd);
}
for (fd = 0; fd < stdio_count; fd++) {
use_fd = pipes[fd][1];
if (use_fd >= stdio_count)
uv__close(use_fd);
}
if (options->cwd != NULL && chdir(options->cwd))
uv__write_errno(error_fd);
@ -320,9 +368,8 @@ static void uv__process_child_init(const uv_process_options_t* options,
if ((options->flags & UV_PROCESS_SETUID) && setuid(options->uid))
uv__write_errno(error_fd);
if (options->env != NULL) {
if (options->env != NULL)
environ = options->env;
}
/* Reset signal mask just before exec. */
sigemptyset(&signewset);
@ -336,11 +383,555 @@ static void uv__process_child_init(const uv_process_options_t* options,
#endif
uv__write_errno(error_fd);
abort();
}
#endif
#if defined(__APPLE__)
typedef struct uv__posix_spawn_fncs_tag {
struct {
int (*addchdir_np)(const posix_spawn_file_actions_t *, const char *);
} file_actions;
} uv__posix_spawn_fncs_t;
static uv_once_t posix_spawn_init_once = UV_ONCE_INIT;
static uv__posix_spawn_fncs_t posix_spawn_fncs;
static int posix_spawn_can_use_setsid;
static void uv__spawn_init_posix_spawn_fncs(void) {
/* Try to locate all non-portable functions at runtime */
posix_spawn_fncs.file_actions.addchdir_np =
dlsym(RTLD_DEFAULT, "posix_spawn_file_actions_addchdir_np");
}
static void uv__spawn_init_can_use_setsid(void) {
int which[] = {CTL_KERN, KERN_OSRELEASE};
unsigned major;
unsigned minor;
unsigned patch;
char buf[256];
size_t len;
len = sizeof(buf);
if (sysctl(which, ARRAY_SIZE(which), buf, &len, NULL, 0))
return;
/* NULL specifies to use LC_C_LOCALE */
if (3 != sscanf_l(buf, NULL, "%u.%u.%u", &major, &minor, &patch))
return;
posix_spawn_can_use_setsid = (major >= 19); /* macOS Catalina */
}
static void uv__spawn_init_posix_spawn(void) {
/* Init handles to all potentially non-defined functions */
uv__spawn_init_posix_spawn_fncs();
/* Init feature detection for POSIX_SPAWN_SETSID flag */
uv__spawn_init_can_use_setsid();
}
static int uv__spawn_set_posix_spawn_attrs(
posix_spawnattr_t* attrs,
const uv__posix_spawn_fncs_t* posix_spawn_fncs,
const uv_process_options_t* options) {
int err;
unsigned int flags;
sigset_t signal_set;
err = posix_spawnattr_init(attrs);
if (err != 0) {
/* If initialization fails, no need to de-init, just return */
return err;
}
if (options->flags & (UV_PROCESS_SETUID | UV_PROCESS_SETGID)) {
/* kauth_cred_issuser currently requires exactly uid == 0 for these
* posixspawn_attrs (set_groups_np, setuid_np, setgid_np), which deviates
* from the normal specification of setuid (which also uses euid), and they
* are also undocumented syscalls, so we do not use them. */
err = ENOSYS;
goto error;
}
/* Set flags for spawn behavior
* 1) POSIX_SPAWN_CLOEXEC_DEFAULT: (Apple Extension) All descriptors in the
* parent will be treated as if they had been created with O_CLOEXEC. The
* only fds that will be passed on to the child are those manipulated by
* the file actions
* 2) POSIX_SPAWN_SETSIGDEF: Signals mentioned in spawn-sigdefault in the
* spawn attributes will be reset to behave as their default
* 3) POSIX_SPAWN_SETSIGMASK: Signal mask will be set to the value of
* spawn-sigmask in attributes
* 4) POSIX_SPAWN_SETSID: Make the process a new session leader if a detached
* session was requested. */
flags = POSIX_SPAWN_CLOEXEC_DEFAULT |
POSIX_SPAWN_SETSIGDEF |
POSIX_SPAWN_SETSIGMASK;
if (options->flags & UV_PROCESS_DETACHED) {
/* If running on a version of macOS where this flag is not supported,
* revert back to the fork/exec flow. Otherwise posix_spawn will
* silently ignore the flag. */
if (!posix_spawn_can_use_setsid) {
err = ENOSYS;
goto error;
}
flags |= POSIX_SPAWN_SETSID;
}
err = posix_spawnattr_setflags(attrs, flags);
if (err != 0)
goto error;
/* Reset all signal the child to their default behavior */
sigfillset(&signal_set);
err = posix_spawnattr_setsigdefault(attrs, &signal_set);
if (err != 0)
goto error;
/* Reset the signal mask for all signals */
sigemptyset(&signal_set);
err = posix_spawnattr_setsigmask(attrs, &signal_set);
if (err != 0)
goto error;
return err;
error:
(void) posix_spawnattr_destroy(attrs);
return err;
}
static int uv__spawn_set_posix_spawn_file_actions(
posix_spawn_file_actions_t* actions,
const uv__posix_spawn_fncs_t* posix_spawn_fncs,
const uv_process_options_t* options,
int stdio_count,
int (*pipes)[2]) {
int fd;
int fd2;
int use_fd;
int err;
err = posix_spawn_file_actions_init(actions);
if (err != 0) {
/* If initialization fails, no need to de-init, just return */
return err;
}
/* Set the current working directory if requested */
if (options->cwd != NULL) {
if (posix_spawn_fncs->file_actions.addchdir_np == NULL) {
err = ENOSYS;
goto error;
}
err = posix_spawn_fncs->file_actions.addchdir_np(actions, options->cwd);
if (err != 0)
goto error;
}
/* Do not return ENOSYS after this point, as we may mutate pipes. */
/* First duplicate low numbered fds, since it's not safe to duplicate them,
* they could get replaced. Example: swapping stdout and stderr; without
* this fd 2 (stderr) would be duplicated into fd 1, thus making both
* stdout and stderr go to the same fd, which was not the intention. */
for (fd = 0; fd < stdio_count; fd++) {
use_fd = pipes[fd][1];
if (use_fd < 0 || use_fd >= fd)
continue;
use_fd = stdio_count;
for (fd2 = 0; fd2 < stdio_count; fd2++) {
/* If we were not setting POSIX_SPAWN_CLOEXEC_DEFAULT, we would need to
* also consider whether fcntl(fd, F_GETFD) returned without the
* FD_CLOEXEC flag set. */
if (pipes[fd2][1] == use_fd) {
use_fd++;
fd2 = 0;
}
}
err = posix_spawn_file_actions_adddup2(
actions,
pipes[fd][1],
use_fd);
assert(err != ENOSYS);
if (err != 0)
goto error;
pipes[fd][1] = use_fd;
}
/* Second, move the descriptors into their respective places */
for (fd = 0; fd < stdio_count; fd++) {
use_fd = pipes[fd][1];
if (use_fd < 0) {
if (fd >= 3)
continue;
else {
/* If ignored, redirect to (or from) /dev/null, */
err = posix_spawn_file_actions_addopen(
actions,
fd,
"/dev/null",
fd == 0 ? O_RDONLY : O_RDWR,
0);
assert(err != ENOSYS);
if (err != 0)
goto error;
continue;
}
}
if (fd == use_fd)
err = posix_spawn_file_actions_addinherit_np(actions, fd);
else
err = posix_spawn_file_actions_adddup2(actions, use_fd, fd);
assert(err != ENOSYS);
if (err != 0)
goto error;
/* Make sure the fd is marked as non-blocking (state shared between child
* and parent). */
uv__nonblock_fcntl(use_fd, 0);
}
/* Finally, close all the superfluous descriptors */
for (fd = 0; fd < stdio_count; fd++) {
use_fd = pipes[fd][1];
if (use_fd < stdio_count)
continue;
/* Check if we already closed this. */
for (fd2 = 0; fd2 < fd; fd2++) {
if (pipes[fd2][1] == use_fd)
break;
}
if (fd2 < fd)
continue;
err = posix_spawn_file_actions_addclose(actions, use_fd);
assert(err != ENOSYS);
if (err != 0)
goto error;
}
return 0;
error:
(void) posix_spawn_file_actions_destroy(actions);
return err;
}
char* uv__spawn_find_path_in_env(char** env) {
char** env_iterator;
const char path_var[] = "PATH=";
/* Look for an environment variable called PATH in the
* provided env array, and return its value if found */
for (env_iterator = env; *env_iterator != NULL; env_iterator++) {
if (strncmp(*env_iterator, path_var, sizeof(path_var) - 1) == 0) {
/* Found "PATH=" at the beginning of the string */
return *env_iterator + sizeof(path_var) - 1;
}
}
return NULL;
}
static int uv__spawn_resolve_and_spawn(const uv_process_options_t* options,
posix_spawnattr_t* attrs,
posix_spawn_file_actions_t* actions,
pid_t* pid) {
const char *p;
const char *z;
const char *path;
size_t l;
size_t k;
int err;
int seen_eacces;
path = NULL;
err = -1;
seen_eacces = 0;
/* Short circuit for erroneous case */
if (options->file == NULL)
return ENOENT;
/* The environment for the child process is that of the parent unless overriden
* by options->env */
char** env = environ;
if (options->env != NULL)
env = options->env;
/* If options->file contains a slash, posix_spawn/posix_spawnp should behave
* the same, and do not involve PATH resolution at all. The libc
* `posix_spawnp` provided by Apple is buggy (since 10.15), so we now emulate it
* here, per https://github.com/libuv/libuv/pull/3583. */
if (strchr(options->file, '/') != NULL) {
do
err = posix_spawn(pid, options->file, actions, attrs, options->args, env);
while (err == EINTR);
return err;
}
/* Look for the definition of PATH in the provided env */
path = uv__spawn_find_path_in_env(env);
/* The following resolution logic (execvpe emulation) is copied from
* https://git.musl-libc.org/cgit/musl/tree/src/process/execvp.c
* and adapted to work for our specific usage */
/* If no path was provided in env, use the default value
* to look for the executable */
if (path == NULL)
path = _PATH_DEFPATH;
k = strnlen(options->file, NAME_MAX + 1);
if (k > NAME_MAX)
return ENAMETOOLONG;
l = strnlen(path, PATH_MAX - 1) + 1;
for (p = path;; p = z) {
/* Compose the new process file from the entry in the PATH
* environment variable and the actual file name */
char b[PATH_MAX + NAME_MAX];
z = strchr(p, ':');
if (!z)
z = p + strlen(p);
if ((size_t)(z - p) >= l) {
if (!*z++)
break;
continue;
}
memcpy(b, p, z - p);
b[z - p] = '/';
memcpy(b + (z - p) + (z > p), options->file, k + 1);
/* Try to spawn the new process file. If it fails with ENOENT, the
* new process file is not in this PATH entry, continue with the next
* PATH entry. */
do
err = posix_spawn(pid, b, actions, attrs, options->args, env);
while (err == EINTR);
switch (err) {
case EACCES:
seen_eacces = 1;
break; /* continue search */
case ENOENT:
case ENOTDIR:
break; /* continue search */
default:
return err;
}
if (!*z++)
break;
}
if (seen_eacces)
return EACCES;
return err;
}
static int uv__spawn_and_init_child_posix_spawn(
const uv_process_options_t* options,
int stdio_count,
int (*pipes)[2],
pid_t* pid,
const uv__posix_spawn_fncs_t* posix_spawn_fncs) {
int err;
posix_spawnattr_t attrs;
posix_spawn_file_actions_t actions;
err = uv__spawn_set_posix_spawn_attrs(&attrs, posix_spawn_fncs, options);
if (err != 0)
goto error;
/* This may mutate pipes. */
err = uv__spawn_set_posix_spawn_file_actions(&actions,
posix_spawn_fncs,
options,
stdio_count,
pipes);
if (err != 0) {
(void) posix_spawnattr_destroy(&attrs);
goto error;
}
/* Try to spawn options->file resolving in the provided environment
* if any */
err = uv__spawn_resolve_and_spawn(options, &attrs, &actions, pid);
assert(err != ENOSYS);
/* Destroy the actions/attributes */
(void) posix_spawn_file_actions_destroy(&actions);
(void) posix_spawnattr_destroy(&attrs);
error:
/* In an error situation, the attributes and file actions are
* already destroyed, only the happy path requires cleanup */
return UV__ERR(err);
}
#endif
static int uv__spawn_and_init_child_fork(const uv_process_options_t* options,
int stdio_count,
int (*pipes)[2],
int error_fd,
pid_t* pid) {
sigset_t signewset;
sigset_t sigoldset;
/* Start the child with most signals blocked, to avoid any issues before we
* can reset them, but allow program failures to exit (and not hang). */
sigfillset(&signewset);
sigdelset(&signewset, SIGKILL);
sigdelset(&signewset, SIGSTOP);
sigdelset(&signewset, SIGTRAP);
sigdelset(&signewset, SIGSEGV);
sigdelset(&signewset, SIGBUS);
sigdelset(&signewset, SIGILL);
sigdelset(&signewset, SIGSYS);
sigdelset(&signewset, SIGABRT);
if (pthread_sigmask(SIG_BLOCK, &signewset, &sigoldset) != 0)
abort();
*pid = fork();
if (*pid == 0) {
/* Fork succeeded, in the child process */
uv__process_child_init(options, stdio_count, pipes, error_fd);
abort();
}
if (pthread_sigmask(SIG_SETMASK, &sigoldset, NULL) != 0)
abort();
if (*pid == -1)
/* Failed to fork */
return UV__ERR(errno);
/* Fork succeeded, in the parent process */
return 0;
}
static int uv__spawn_and_init_child(
uv_loop_t* loop,
const uv_process_options_t* options,
int stdio_count,
int (*pipes)[2],
pid_t* pid) {
int signal_pipe[2] = { -1, -1 };
int status;
int err;
int exec_errorno;
ssize_t r;
#if defined(__APPLE__)
uv_once(&posix_spawn_init_once, uv__spawn_init_posix_spawn);
/* Special child process spawn case for macOS Big Sur (11.0) onwards
*
* Big Sur introduced a significant performance degradation on a call to
* fork/exec when the process has many pages mmaped in with MAP_JIT, like, say
* a javascript interpreter. Electron-based applications, for example,
* are impacted; though the magnitude of the impact depends on how much the
* app relies on subprocesses.
*
* On macOS, though, posix_spawn is implemented in a way that does not
* exhibit the problem. This block implements the forking and preparation
* logic with posix_spawn and its related primitives. It also takes advantage of
* the macOS extension POSIX_SPAWN_CLOEXEC_DEFAULT that makes impossible to
* leak descriptors to the child process. */
err = uv__spawn_and_init_child_posix_spawn(options,
stdio_count,
pipes,
pid,
&posix_spawn_fncs);
/* The posix_spawn flow will return UV_ENOSYS if any of the posix_spawn_x_np
* non-standard functions is both _needed_ and _undefined_. In those cases,
* default back to the fork/execve strategy. For all other errors, just fail. */
if (err != UV_ENOSYS)
return err;
#endif
/* This pipe is used by the parent to wait until
* the child has called `execve()`. We need this
* to avoid the following race condition:
*
* if ((pid = fork()) > 0) {
* kill(pid, SIGTERM);
* }
* else if (pid == 0) {
* execve("/bin/cat", argp, envp);
* }
*
* The parent sends a signal immediately after forking.
* Since the child may not have called `execve()` yet,
* there is no telling what process receives the signal,
* our fork or /bin/cat.
*
* To avoid ambiguity, we create a pipe with both ends
* marked close-on-exec. Then, after the call to `fork()`,
* the parent polls the read end until it EOFs or errors with EPIPE.
*/
err = uv__make_pipe(signal_pipe, 0);
if (err)
return err;
/* Acquire write lock to prevent opening new fds in worker threads */
uv_rwlock_wrlock(&loop->cloexec_lock);
err = uv__spawn_and_init_child_fork(options, stdio_count, pipes, signal_pipe[1], pid);
/* Release lock in parent process */
uv_rwlock_wrunlock(&loop->cloexec_lock);
uv__close(signal_pipe[1]);
if (err == 0) {
do
r = read(signal_pipe[0], &exec_errorno, sizeof(exec_errorno));
while (r == -1 && errno == EINTR);
if (r == 0)
; /* okay, EOF */
else if (r == sizeof(exec_errorno)) {
do
err = waitpid(*pid, &status, 0); /* okay, read errorno */
while (err == -1 && errno == EINTR);
assert(err == *pid);
err = exec_errorno;
} else if (r == -1 && errno == EPIPE) {
/* Something unknown happened to our child before spawn */
do
err = waitpid(*pid, &status, 0); /* okay, got EPIPE */
while (err == -1 && errno == EINTR);
assert(err == *pid);
err = UV_EPIPE;
} else
abort();
}
uv__close_nocheckstdio(signal_pipe[0]);
return err;
}
int uv_spawn(uv_loop_t* loop,
uv_process_t* process,
const uv_process_options_t* options) {
@ -348,18 +939,13 @@ int uv_spawn(uv_loop_t* loop,
/* fork is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED. */
return UV_ENOSYS;
#else
sigset_t signewset;
sigset_t sigoldset;
int signal_pipe[2] = { -1, -1 };
int pipes_storage[8][2];
int (*pipes)[2];
int stdio_count;
ssize_t r;
pid_t pid;
int err;
int exec_errorno;
int i;
int status;
assert(options->file != NULL);
assert(!(options->flags & ~(UV_PROCESS_DETACHED |
@ -372,6 +958,7 @@ int uv_spawn(uv_loop_t* loop,
uv__handle_init(loop, (uv_handle_t*)process, UV_PROCESS);
QUEUE_INIT(&process->queue);
process->status = 0;
stdio_count = options->stdio_count;
if (stdio_count < 3)
@ -396,91 +983,42 @@ int uv_spawn(uv_loop_t* loop,
goto error;
}
/* This pipe is used by the parent to wait until
* the child has called `execve()`. We need this
* to avoid the following race condition:
*
* if ((pid = fork()) > 0) {
* kill(pid, SIGTERM);
* }
* else if (pid == 0) {
* execve("/bin/cat", argp, envp);
* }
*
* The parent sends a signal immediately after forking.
* Since the child may not have called `execve()` yet,
* there is no telling what process receives the signal,
* our fork or /bin/cat.
*
* To avoid ambiguity, we create a pipe with both ends
* marked close-on-exec. Then, after the call to `fork()`,
* the parent polls the read end until it EOFs or errors with EPIPE.
*/
err = uv__make_pipe(signal_pipe, 0);
if (err)
goto error;
#ifdef UV_USE_SIGCHLD
uv_signal_start(&loop->child_watcher, uv__chld, SIGCHLD);
#endif
/* Acquire write lock to prevent opening new fds in worker threads */
uv_rwlock_wrlock(&loop->cloexec_lock);
/* Spawn the child */
exec_errorno = uv__spawn_and_init_child(loop, options, stdio_count, pipes, &pid);
/* Start the child with most signals blocked, to avoid any issues before we
* can reset them, but allow program failures to exit (and not hang). */
sigfillset(&signewset);
sigdelset(&signewset, SIGKILL);
sigdelset(&signewset, SIGSTOP);
sigdelset(&signewset, SIGTRAP);
sigdelset(&signewset, SIGSEGV);
sigdelset(&signewset, SIGBUS);
sigdelset(&signewset, SIGILL);
sigdelset(&signewset, SIGSYS);
sigdelset(&signewset, SIGABRT);
if (pthread_sigmask(SIG_BLOCK, &signewset, &sigoldset) != 0)
abort();
pid = fork();
if (pid == -1)
err = UV__ERR(errno);
if (pid == 0)
uv__process_child_init(options, stdio_count, pipes, signal_pipe[1]);
if (pthread_sigmask(SIG_SETMASK, &sigoldset, NULL) != 0)
abort();
/* Release lock in parent process */
uv_rwlock_wrunlock(&loop->cloexec_lock);
uv__close(signal_pipe[1]);
if (pid == -1) {
uv__close(signal_pipe[0]);
#if 0
/* This runs into a nodejs issue (it expects initialized streams, even if the
* exec failed).
* See https://github.com/libuv/libuv/pull/3107#issuecomment-782482608 */
if (exec_errorno != 0)
goto error;
}
#endif
process->status = 0;
exec_errorno = 0;
do
r = read(signal_pipe[0], &exec_errorno, sizeof(exec_errorno));
while (r == -1 && errno == EINTR);
if (r == 0)
; /* okay, EOF */
else if (r == sizeof(exec_errorno)) {
do
err = waitpid(pid, &status, 0); /* okay, read errorno */
while (err == -1 && errno == EINTR);
assert(err == pid);
} else if (r == -1 && errno == EPIPE) {
do
err = waitpid(pid, &status, 0); /* okay, got EPIPE */
while (err == -1 && errno == EINTR);
assert(err == pid);
} else
/* Activate this handle if exec() happened successfully, even if we later
* fail to open a stdio handle. This ensures we can eventually reap the child
* with waitpid. */
if (exec_errorno == 0) {
#ifndef UV_USE_SIGCHLD
struct kevent event;
EV_SET(&event, pid, EVFILT_PROC, EV_ADD | EV_ONESHOT, NOTE_EXIT, 0, 0);
if (kevent(loop->backend_fd, &event, 1, NULL, 0, NULL)) {
if (errno != ESRCH)
abort();
/* Process already exited. Call waitpid on the next loop iteration. */
process->flags |= UV_HANDLE_REAP;
loop->flags |= UV_LOOP_REAP_CHILDREN;
}
#endif
uv__close_nocheckstdio(signal_pipe[0]);
process->pid = pid;
process->exit_cb = options->exit_cb;
QUEUE_INSERT_TAIL(&loop->process_handles, &process->queue);
uv__handle_start(process);
}
for (i = 0; i < options->stdio_count; i++) {
err = uv__process_open_stream(options->stdio + i, pipes[i]);
@ -493,15 +1031,6 @@ int uv_spawn(uv_loop_t* loop,
goto error;
}
/* Only activate this handle if exec() happened successfully */
if (exec_errorno == 0) {
QUEUE_INSERT_TAIL(&loop->process_handles, &process->queue);
uv__handle_start(process);
}
process->pid = pid;
process->exit_cb = options->exit_cb;
if (pipes != pipes_storage)
uv__free(pipes);
@ -534,9 +1063,16 @@ int uv_process_kill(uv_process_t* process, int signum) {
int uv_kill(int pid, int signum) {
if (kill(pid, signum))
if (kill(pid, signum)) {
#if defined(__MVS__)
/* EPERM is returned if the process is a zombie. */
siginfo_t infop;
if (errno == EPERM &&
waitid(P_PID, pid, &infop, WNOHANG | WNOWAIT | WEXITED) == 0)
return 0;
#endif
return UV__ERR(errno);
else
} else
return 0;
}

View file

@ -66,6 +66,7 @@ static void uv__read(uv_stream_t* stream);
static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events);
static void uv__write_callbacks(uv_stream_t* stream);
static size_t uv__write_req_size(uv_write_t* req);
static void uv__drain(uv_stream_t* stream);
void uv__stream_init(uv_loop_t* loop,
@ -453,17 +454,7 @@ void uv__stream_destroy(uv_stream_t* stream) {
uv__stream_flush_write_queue(stream, UV_ECANCELED);
uv__write_callbacks(stream);
if (stream->shutdown_req) {
/* The ECANCELED error code is a lie, the shutdown(2) syscall is a
* fait accompli at this point. Maybe we should revisit this in v0.11.
* A possible reason for leaving it unchanged is that it informs the
* callee that the handle has been destroyed.
*/
uv__req_unregister(stream->loop, stream->shutdown_req);
stream->shutdown_req->cb(stream->shutdown_req, UV_ECANCELED);
stream->shutdown_req = NULL;
}
uv__drain(stream);
assert(stream->write_queue_size == 0);
}
@ -641,14 +632,16 @@ done:
int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) {
int err;
if (uv__is_closing(stream)) {
return UV_EINVAL;
}
switch (stream->type) {
case UV_TCP:
err = uv_tcp_listen((uv_tcp_t*)stream, backlog, cb);
err = uv__tcp_listen((uv_tcp_t*)stream, backlog, cb);
break;
case UV_NAMED_PIPE:
err = uv_pipe_listen((uv_pipe_t*)stream, backlog, cb);
err = uv__pipe_listen((uv_pipe_t*)stream, backlog, cb);
break;
default:
@ -667,25 +660,30 @@ static void uv__drain(uv_stream_t* stream) {
int err;
assert(QUEUE_EMPTY(&stream->write_queue));
if (!(stream->flags & UV_HANDLE_CLOSING)) {
uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
uv__stream_osx_interrupt_select(stream);
}
/* Shutdown? */
if ((stream->flags & UV_HANDLE_SHUTTING) &&
!(stream->flags & UV_HANDLE_CLOSING) &&
!(stream->flags & UV_HANDLE_SHUT)) {
assert(stream->shutdown_req);
if (!(stream->flags & UV_HANDLE_SHUTTING))
return;
req = stream->shutdown_req;
assert(req);
if ((stream->flags & UV_HANDLE_CLOSING) ||
!(stream->flags & UV_HANDLE_SHUT)) {
stream->shutdown_req = NULL;
stream->flags &= ~UV_HANDLE_SHUTTING;
uv__req_unregister(stream->loop, req);
err = 0;
if (shutdown(uv__stream_fd(stream), SHUT_WR))
if (stream->flags & UV_HANDLE_CLOSING)
/* The user destroyed the stream before we got to do the shutdown. */
err = UV_ECANCELED;
else if (shutdown(uv__stream_fd(stream), SHUT_WR))
err = UV__ERR(errno);
if (err == 0)
else /* Success. */
stream->flags |= UV_HANDLE_SHUT;
if (req->cb != NULL)
@ -926,7 +924,6 @@ static void uv__write(uv_stream_t* stream) {
}
req->error = n;
// XXX(jwn): this must call uv__stream_flush_write_queue(stream, n) here, since we won't generate any more events
uv__write_req_finish(req);
uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
uv__stream_osx_interrupt_select(stream);
@ -964,49 +961,6 @@ static void uv__write_callbacks(uv_stream_t* stream) {
}
uv_handle_type uv__handle_type(int fd) {
struct sockaddr_storage ss;
socklen_t sslen;
socklen_t len;
int type;
memset(&ss, 0, sizeof(ss));
sslen = sizeof(ss);
if (getsockname(fd, (struct sockaddr*)&ss, &sslen))
return UV_UNKNOWN_HANDLE;
len = sizeof type;
if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &type, &len))
return UV_UNKNOWN_HANDLE;
if (type == SOCK_STREAM) {
#if defined(_AIX) || defined(__DragonFly__)
/* on AIX/DragonFly the getsockname call returns an empty sa structure
* for sockets of type AF_UNIX. For all other types it will
* return a properly filled in structure.
*/
if (sslen == 0)
return UV_NAMED_PIPE;
#endif
switch (ss.ss_family) {
case AF_UNIX:
return UV_NAMED_PIPE;
case AF_INET:
case AF_INET6:
return UV_TCP;
}
}
if (type == SOCK_DGRAM &&
(ss.ss_family == AF_INET || ss.ss_family == AF_INET6))
return UV_UDP;
return UV_UNKNOWN_HANDLE;
}
static void uv__stream_eof(uv_stream_t* stream, const uv_buf_t* buf) {
stream->flags |= UV_HANDLE_READ_EOF;
stream->flags &= ~UV_HANDLE_READING;
@ -1278,7 +1232,8 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
assert(uv__stream_fd(stream) >= 0);
/* Initialize request */
/* Initialize request. The `shutdown(2)` call will always be deferred until
* `uv__drain`, just before the callback is run. */
uv__req_init(stream->loop, req, UV_SHUTDOWN);
req->handle = stream;
req->cb = cb;
@ -1286,8 +1241,8 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
stream->flags |= UV_HANDLE_SHUTTING;
stream->flags &= ~UV_HANDLE_WRITABLE;
uv__io_start(stream->loop, &stream->io_watcher, POLLOUT);
uv__stream_osx_interrupt_select(stream);
if (QUEUE_EMPTY(&stream->write_queue))
uv__io_feed(stream->loop, &stream->io_watcher);
return 0;
}

View file

@ -154,7 +154,6 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
sigset_t set;
uint64_t base;
uint64_t diff;
uint64_t idle_poll;
unsigned int nfds;
unsigned int i;
int saved_errno;
@ -424,7 +423,7 @@ void uv_loadavg(double avg[3]) {
#if defined(PORT_SOURCE_FILE)
static int uv__fs_event_rearm(uv_fs_event_t *handle) {
if (handle->fd == -1)
if (handle->fd == PORT_DELETED)
return UV_EBADF;
if (port_associate(handle->loop->fs_fd,
@ -475,6 +474,12 @@ static void uv__fs_event_read(uv_loop_t* loop,
handle = (uv_fs_event_t*) pe.portev_user;
assert((r == 0) && "unexpected port_get() error");
if (uv__is_closing(handle)) {
uv__handle_stop(handle);
uv__make_close_pending((uv_handle_t*) handle);
break;
}
events = 0;
if (pe.portev_events & (FILE_ATTRIB | FILE_MODIFIED))
events |= UV_CHANGE;
@ -542,12 +547,14 @@ int uv_fs_event_start(uv_fs_event_t* handle,
}
int uv_fs_event_stop(uv_fs_event_t* handle) {
static int uv__fs_event_stop(uv_fs_event_t* handle) {
int ret = 0;
if (!uv__is_active(handle))
return 0;
if (handle->fd == PORT_FIRED || handle->fd == PORT_LOADED) {
port_dissociate(handle->loop->fs_fd,
if (handle->fd == PORT_LOADED) {
ret = port_dissociate(handle->loop->fs_fd,
PORT_SOURCE_FILE,
(uintptr_t) &handle->fo);
}
@ -556,13 +563,28 @@ int uv_fs_event_stop(uv_fs_event_t* handle) {
uv__free(handle->path);
handle->path = NULL;
handle->fo.fo_name = NULL;
if (ret == 0)
uv__handle_stop(handle);
return ret;
}
int uv_fs_event_stop(uv_fs_event_t* handle) {
(void) uv__fs_event_stop(handle);
return 0;
}
void uv__fs_event_close(uv_fs_event_t* handle) {
uv_fs_event_stop(handle);
/*
* If we were unable to dissociate the port here, then it is most likely
* that there is a pending queued event. When this happens, we don't want
* to complete the close as it will free the underlying memory for the
* handle, causing a use-after-free problem when the event is processed.
* We defer the final cleanup until after the event is consumed in
* uv__fs_event_read().
*/
if (uv__fs_event_stop(handle) == 0)
uv__make_close_pending((uv_handle_t*) handle);
}
#else /* !defined(PORT_SOURCE_FILE) */

View file

@ -184,14 +184,15 @@ int uv__tcp_bind(uv_tcp_t* tcp,
#endif
errno = 0;
if (bind(tcp->io_watcher.fd, addr, addrlen) && errno != EADDRINUSE) {
err = bind(tcp->io_watcher.fd, addr, addrlen);
if (err == -1 && errno != EADDRINUSE) {
if (errno == EAFNOSUPPORT)
/* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a
* socket created with AF_INET to an AF_INET6 address or vice versa. */
return UV_EINVAL;
return UV__ERR(errno);
}
tcp->delayed_error = UV__ERR(errno);
tcp->delayed_error = (err == -1) ? UV__ERR(errno) : 0;
tcp->flags |= UV_HANDLE_BOUND;
if (addr->sa_family == AF_INET6)
@ -320,15 +321,23 @@ int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) {
return UV_EINVAL;
fd = uv__stream_fd(handle);
if (0 != setsockopt(fd, SOL_SOCKET, SO_LINGER, &l, sizeof(l)))
if (0 != setsockopt(fd, SOL_SOCKET, SO_LINGER, &l, sizeof(l))) {
if (errno == EINVAL) {
/* Open Group Specifications Issue 7, 2018 edition states that
* EINVAL may mean the socket has been shut down already.
* Behavior observed on Solaris, illumos and macOS. */
errno = 0;
} else {
return UV__ERR(errno);
}
}
uv_close((uv_handle_t*) handle, close_cb);
return 0;
}
int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
int uv__tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
static int single_accept_cached = -1;
unsigned long flags;
int single_accept;

View file

@ -162,25 +162,7 @@ void uv_barrier_destroy(uv_barrier_t* barrier) {
#endif
/* On MacOS, threads other than the main thread are created with a reduced
* stack size by default. Adjust to RLIMIT_STACK aligned to the page size.
*
* On Linux, threads created by musl have a much smaller stack than threads
* created by glibc (80 vs. 2048 or 4096 kB.) Follow glibc for consistency.
*/
size_t uv__thread_stack_size(void) {
#if defined(__APPLE__) || defined(__linux__)
struct rlimit lim;
/* getrlimit() can fail on some aarch64 systems due to a glibc bug where
* the system call wrapper invokes the wrong system call. Don't treat
* that as fatal, just use the default stack size instead.
*/
if (0 == getrlimit(RLIMIT_STACK, &lim) && lim.rlim_cur != RLIM_INFINITY) {
/* pthread_attr_setstacksize() expects page-aligned values. */
lim.rlim_cur -= lim.rlim_cur % (rlim_t) getpagesize();
/* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is
/* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is
* too small to safely receive signals on.
*
* Musl's PTHREAD_STACK_MIN + MINSIGSTKSZ == 8192 on arm64 (which has
@ -191,12 +173,22 @@ size_t uv__thread_stack_size(void) {
* is between 28 and 133 KB when compiling against glibc, depending
* on the architecture.
*/
if (lim.rlim_cur >= 8192)
if (lim.rlim_cur >= PTHREAD_STACK_MIN)
return lim.rlim_cur;
}
#endif
static size_t uv__min_stack_size(void) {
static const size_t min = 8192;
#ifdef PTHREAD_STACK_MIN /* Not defined on NetBSD. */
if (min < (size_t) PTHREAD_STACK_MIN)
return PTHREAD_STACK_MIN;
#endif /* PTHREAD_STACK_MIN */
return min;
}
/* On Linux, threads created by musl have a much smaller stack than threads
* created by glibc (80 vs. 2048 or 4096 kB.) Follow glibc for consistency.
*/
static size_t uv__default_stack_size(void) {
#if !defined(__linux__)
return 0;
#elif defined(__PPC__) || defined(__ppc__) || defined(__powerpc__)
@ -207,6 +199,34 @@ size_t uv__thread_stack_size(void) {
}
/* On MacOS, threads other than the main thread are created with a reduced
* stack size by default. Adjust to RLIMIT_STACK aligned to the page size.
*/
size_t uv__thread_stack_size(void) {
#if defined(__APPLE__) || defined(__linux__)
struct rlimit lim;
/* getrlimit() can fail on some aarch64 systems due to a glibc bug where
* the system call wrapper invokes the wrong system call. Don't treat
* that as fatal, just use the default stack size instead.
*/
if (getrlimit(RLIMIT_STACK, &lim))
return uv__default_stack_size();
if (lim.rlim_cur == RLIM_INFINITY)
return uv__default_stack_size();
/* pthread_attr_setstacksize() expects page-aligned values. */
lim.rlim_cur -= lim.rlim_cur % (rlim_t) getpagesize();
if (lim.rlim_cur >= (rlim_t) uv__min_stack_size())
return lim.rlim_cur;
#endif
return uv__default_stack_size();
}
int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
uv_thread_options_t params;
params.flags = UV_THREAD_NO_FLAGS;
@ -222,6 +242,7 @@ int uv_thread_create_ex(uv_thread_t* tid,
pthread_attr_t attr_storage;
size_t pagesize;
size_t stack_size;
size_t min_stack_size;
/* Used to squelch a -Wcast-function-type warning. */
union {
@ -239,10 +260,9 @@ int uv_thread_create_ex(uv_thread_t* tid,
pagesize = (size_t)getpagesize();
/* Round up to the nearest page boundary. */
stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1);
#ifdef PTHREAD_STACK_MIN
if (stack_size < PTHREAD_STACK_MIN)
stack_size = PTHREAD_STACK_MIN;
#endif
min_stack_size = uv__min_stack_size();
if (stack_size < min_stack_size)
stack_size = min_stack_size;
}
if (stack_size > 0) {

View file

@ -66,6 +66,19 @@ static int orig_termios_fd = -1;
static struct termios orig_termios;
static uv_spinlock_t termios_spinlock = UV_SPINLOCK_INITIALIZER;
int uv__tcsetattr(int fd, int how, const struct termios *term) {
int rc;
do
rc = tcsetattr(fd, how, term);
while (rc == -1 && errno == EINTR);
if (rc == -1)
return UV__ERR(errno);
return 0;
}
static int uv__tty_is_slave(const int fd) {
int result;
#if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
@ -268,13 +281,18 @@ static void uv__tty_make_raw(struct termios* tio) {
int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
struct termios tmp;
int fd;
int rc;
if (tty->mode == (int) mode)
return 0;
fd = uv__stream_fd(tty);
if (tty->mode == UV_TTY_MODE_NORMAL && mode != UV_TTY_MODE_NORMAL) {
if (tcgetattr(fd, &tty->orig_termios))
do
rc = tcgetattr(fd, &tty->orig_termios);
while (rc == -1 && errno == EINTR);
if (rc == -1)
return UV__ERR(errno);
/* This is used for uv_tty_reset_mode() */
@ -304,11 +322,11 @@ int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
}
/* Apply changes after draining */
if (tcsetattr(fd, TCSADRAIN, &tmp))
return UV__ERR(errno);
rc = uv__tcsetattr(fd, TCSADRAIN, &tmp);
if (rc == 0)
tty->mode = mode;
return 0;
return rc;
}
@ -331,7 +349,7 @@ int uv_tty_get_winsize(uv_tty_t* tty, int* width, int* height) {
uv_handle_type uv_guess_handle(uv_file file) {
struct sockaddr sa;
struct sockaddr_storage ss;
struct stat s;
socklen_t len;
int type;
@ -342,8 +360,24 @@ uv_handle_type uv_guess_handle(uv_file file) {
if (isatty(file))
return UV_TTY;
if (fstat(file, &s))
if (fstat(file, &s)) {
#if defined(__PASE__)
/* On ibmi receiving RST from TCP instead of FIN immediately puts fd into
* an error state. fstat will return EINVAL, getsockname will also return
* EINVAL, even if sockaddr_storage is valid. (If file does not refer to a
* socket, ENOTSOCK is returned instead.)
* In such cases, we will permit the user to open the connection as uv_tcp
* still, so that the user can get immediately notified of the error in
* their read callback and close this fd.
*/
len = sizeof(ss);
if (getsockname(file, (struct sockaddr*) &ss, &len)) {
if (errno == EINVAL)
return UV_TCP;
}
#endif
return UV_UNKNOWN_HANDLE;
}
if (S_ISREG(s.st_mode))
return UV_FILE;
@ -357,16 +391,29 @@ uv_handle_type uv_guess_handle(uv_file file) {
if (!S_ISSOCK(s.st_mode))
return UV_UNKNOWN_HANDLE;
len = sizeof(ss);
if (getsockname(file, (struct sockaddr*) &ss, &len)) {
#if defined(_AIX)
/* On aix receiving RST from TCP instead of FIN immediately puts fd into
* an error state. In such case getsockname will return EINVAL, even if
* sockaddr_storage is valid.
* In such cases, we will permit the user to open the connection as uv_tcp
* still, so that the user can get immediately notified of the error in
* their read callback and close this fd.
*/
if (errno == EINVAL) {
return UV_TCP;
}
#endif
return UV_UNKNOWN_HANDLE;
}
len = sizeof(type);
if (getsockopt(file, SOL_SOCKET, SO_TYPE, &type, &len))
return UV_UNKNOWN_HANDLE;
len = sizeof(sa);
if (getsockname(file, &sa, &len))
return UV_UNKNOWN_HANDLE;
if (type == SOCK_DGRAM)
if (sa.sa_family == AF_INET || sa.sa_family == AF_INET6)
if (ss.ss_family == AF_INET || ss.ss_family == AF_INET6)
return UV_UDP;
if (type == SOCK_STREAM) {
@ -379,9 +426,9 @@ uv_handle_type uv_guess_handle(uv_file file) {
return UV_NAMED_PIPE;
#endif /* defined(_AIX) || defined(__DragonFly__) */
if (sa.sa_family == AF_INET || sa.sa_family == AF_INET6)
if (ss.ss_family == AF_INET || ss.ss_family == AF_INET6)
return UV_TCP;
if (sa.sa_family == AF_UNIX)
if (ss.ss_family == AF_UNIX)
return UV_NAMED_PIPE;
}
@ -403,8 +450,7 @@ int uv_tty_reset_mode(void) {
err = 0;
if (orig_termios_fd != -1)
if (tcsetattr(orig_termios_fd, TCSANOW, &orig_termios))
err = UV__ERR(errno);
err = uv__tcsetattr(orig_termios_fd, TCSANOW, &orig_termios);
uv_spinlock_unlock(&termios_spinlock);
errno = saved_errno;

View file

@ -201,6 +201,7 @@ static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
for (k = 0; k < chunks; ++k) {
iov[k].iov_base = buf->base + k * UV__UDP_DGRAM_MAXSIZE;
iov[k].iov_len = UV__UDP_DGRAM_MAXSIZE;
memset(&msgs[k].msg_hdr, 0, sizeof(msgs[k].msg_hdr));
msgs[k].msg_hdr.msg_iov = iov + k;
msgs[k].msg_hdr.msg_iovlen = 1;
msgs[k].msg_hdr.msg_name = peers + k;
@ -494,7 +495,7 @@ static int uv__set_reuse(int fd) {
if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
return UV__ERR(errno);
}
#elif defined(SO_REUSEPORT) && !defined(__linux__)
#elif defined(SO_REUSEPORT) && !defined(__linux__) && !defined(__GNU__)
if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
return UV__ERR(errno);
#else
@ -703,7 +704,16 @@ int uv__udp_disconnect(uv_udp_t* handle) {
do {
errno = 0;
#ifdef __PASE__
/* On IBMi a connectionless transport socket can be disconnected by
* either setting the addr parameter to NULL or setting the
* addr_length parameter to zero, and issuing another connect().
* https://www.ibm.com/docs/en/i/7.4?topic=ssw_ibm_i_74/apis/connec.htm
*/
r = connect(handle->io_watcher.fd, (struct sockaddr*) NULL, 0);
#else
r = connect(handle->io_watcher.fd, (struct sockaddr*) &addr, sizeof(addr));
#endif
} while (r == -1 && errno == EINTR);
if (r == -1) {
@ -927,7 +937,8 @@ static int uv__udp_set_membership6(uv_udp_t* handle,
!defined(__NetBSD__) && \
!defined(__ANDROID__) && \
!defined(__DragonFly__) && \
!defined(__QNX__)
!defined(__QNX__) && \
!defined(__GNU__)
static int uv__udp_set_source_membership4(uv_udp_t* handle,
const struct sockaddr_in* multicast_addr,
const char* interface_addr,
@ -1119,7 +1130,8 @@ int uv_udp_set_source_membership(uv_udp_t* handle,
!defined(__NetBSD__) && \
!defined(__ANDROID__) && \
!defined(__DragonFly__) && \
!defined(__QNX__)
!defined(__QNX__) && \
!defined(__GNU__)
int err;
union uv__sockaddr mcast_addr;
union uv__sockaddr src_addr;

View file

@ -295,7 +295,9 @@ int uv_tcp_bind(uv_tcp_t* handle,
if (handle->type != UV_TCP)
return UV_EINVAL;
if (uv__is_closing(handle)) {
return UV_EINVAL;
}
if (addr->sa_family == AF_INET)
addrlen = sizeof(struct sockaddr_in);
else if (addr->sa_family == AF_INET6)

View file

@ -130,7 +130,10 @@ enum {
UV_SIGNAL_ONE_SHOT = 0x02000000,
/* Only used by uv_poll_t handles. */
UV_HANDLE_POLL_SLOW = 0x01000000
UV_HANDLE_POLL_SLOW = 0x01000000,
/* Only used by uv_process_t handles. */
UV_HANDLE_REAP = 0x10000000
};
int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap);

View file

@ -28,7 +28,7 @@
#include "req-inl.h"
void uv_async_endgame(uv_loop_t* loop, uv_async_t* handle) {
void uv__async_endgame(uv_loop_t* loop, uv_async_t* handle) {
if (handle->flags & UV_HANDLE_CLOSING &&
!handle->async_sent) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
@ -54,9 +54,9 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
}
void uv_async_close(uv_loop_t* loop, uv_async_t* handle) {
void uv__async_close(uv_loop_t* loop, uv_async_t* handle) {
if (!((uv_async_t*)handle)->async_sent) {
uv_want_endgame(loop, (uv_handle_t*) handle);
uv__want_endgame(loop, (uv_handle_t*) handle);
}
uv__handle_closing(handle);
@ -83,7 +83,7 @@ int uv_async_send(uv_async_t* handle) {
}
void uv_process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle,
void uv__process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle,
uv_req_t* req) {
assert(handle->type == UV_ASYNC);
assert(req->type == UV_WAKEUP);
@ -91,7 +91,7 @@ void uv_process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle,
handle->async_sent = 0;
if (handle->flags & UV_HANDLE_CLOSING) {
uv_want_endgame(loop, (uv_handle_t*)handle);
uv__want_endgame(loop, (uv_handle_t*)handle);
} else if (handle->async_cb != NULL) {
handle->async_cb(handle);
}

View file

@ -84,10 +84,12 @@ static int uv__loops_capacity;
#define UV__LOOPS_CHUNK_SIZE 8
static uv_mutex_t uv__loops_lock;
static void uv__loops_init(void) {
uv_mutex_init(&uv__loops_lock);
}
static int uv__loops_add(uv_loop_t* loop) {
uv_loop_t** new_loops;
int new_capacity, i;
@ -115,6 +117,7 @@ failed_loops_realloc:
return ERROR_OUTOFMEMORY;
}
static void uv__loops_remove(uv_loop_t* loop) {
int loop_index;
int smaller_capacity;
@ -173,7 +176,7 @@ void uv__wake_all_loops(void) {
uv_mutex_unlock(&uv__loops_lock);
}
static void uv_init(void) {
static void uv__init(void) {
/* Tell Windows that we will handle critical errors. */
SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX |
SEM_NOOPENFILEERRORBOX);
@ -199,19 +202,19 @@ static void uv_init(void) {
/* Fetch winapi function pointers. This must be done first because other
* initialization code might need these function pointers to be loaded.
*/
uv_winapi_init();
uv__winapi_init();
/* Initialize winsock */
uv_winsock_init();
uv__winsock_init();
/* Initialize FS */
uv_fs_init();
uv__fs_init();
/* Initialize signal stuff */
uv_signals_init();
uv__signals_init();
/* Initialize console */
uv_console_init();
uv__console_init();
/* Initialize utilities */
uv__util_init();
@ -327,7 +330,7 @@ void uv_update_time(uv_loop_t* loop) {
void uv__once_init(void) {
uv_once(&uv_init_guard_, uv_init);
uv_once(&uv_init_guard_, uv__init);
}
@ -395,23 +398,28 @@ int uv_loop_fork(uv_loop_t* loop) {
}
static int uv__loop_alive(const uv_loop_t* loop) {
return uv__has_active_handles(loop) ||
uv__has_active_reqs(loop) ||
loop->pending_reqs_tail != NULL ||
loop->endgame_handles != NULL;
}
int uv_loop_alive(const uv_loop_t* loop) {
return uv__loop_alive(loop);
}
int uv_backend_timeout(const uv_loop_t* loop) {
if (loop->stop_flag != 0)
return 0;
if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop))
return 0;
if (loop->pending_reqs_tail)
return 0;
if (loop->endgame_handles)
return 0;
if (loop->idle_handles)
return 0;
if (loop->stop_flag == 0 &&
/* uv__loop_alive(loop) && */
(uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
loop->pending_reqs_tail == NULL &&
loop->idle_handles == NULL &&
loop->endgame_handles == NULL)
return uv__next_timeout(loop);
return 0;
}
@ -462,8 +470,8 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
if (overlapped) {
/* Package was dequeued */
req = uv_overlapped_to_req(overlapped);
uv_insert_pending_req(loop, req);
req = uv__overlapped_to_req(overlapped);
uv__insert_pending_req(loop, req);
/* Some time might have passed waiting for I/O,
* so update the loop time here.
@ -547,8 +555,8 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) {
* meant only to wake us up.
*/
if (overlappeds[i].lpOverlapped) {
req = uv_overlapped_to_req(overlappeds[i].lpOverlapped);
uv_insert_pending_req(loop, req);
req = uv__overlapped_to_req(overlappeds[i].lpOverlapped);
uv__insert_pending_req(loop, req);
}
}
@ -581,22 +589,10 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) {
}
static int uv__loop_alive(const uv_loop_t* loop) {
return uv__has_active_handles(loop) ||
uv__has_active_reqs(loop) ||
loop->endgame_handles != NULL;
}
int uv_loop_alive(const uv_loop_t* loop) {
return uv__loop_alive(loop);
}
int uv_run(uv_loop_t *loop, uv_run_mode mode) {
DWORD timeout;
int r;
int ran_pending;
int can_sleep;
r = uv__loop_alive(loop);
if (!r)
@ -606,12 +602,14 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
uv_update_time(loop);
uv__run_timers(loop);
ran_pending = uv_process_reqs(loop);
uv_idle_invoke(loop);
uv_prepare_invoke(loop);
can_sleep = loop->pending_reqs_tail == NULL && loop->idle_handles == NULL;
uv__process_reqs(loop);
uv__idle_invoke(loop);
uv__prepare_invoke(loop);
timeout = 0;
if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT)
if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT)
timeout = uv_backend_timeout(loop);
if (pGetQueuedCompletionStatusEx)
@ -619,6 +617,11 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
else
uv__poll_wine(loop, timeout);
/* Process immediate callbacks (e.g. write_cb) a small fixed number of
* times to avoid loop starvation.*/
for (r = 0; r < 8 && loop->pending_reqs_tail != NULL; r++)
uv__process_reqs(loop);
/* Run one final update on the provider_idle_time in case uv__poll*
* returned because the timeout expired, but no events were received. This
* call will be ignored if the provider_entry_time was either never set (if
@ -626,8 +629,8 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
*/
uv__metrics_update_idle_time(loop);
uv_check_invoke(loop);
uv_process_endgames(loop);
uv__check_invoke(loop);
uv__process_endgames(loop);
if (mode == UV_RUN_ONCE) {
/* UV_RUN_ONCE implies forward progress: at least one callback must have
@ -638,6 +641,7 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
* UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
* the check.
*/
uv_update_time(loop);
uv__run_timers(loop);
}

View file

@ -33,7 +33,7 @@
const unsigned int uv_directory_watcher_buffer_size = 4096;
static void uv_fs_event_queue_readdirchanges(uv_loop_t* loop,
static void uv__fs_event_queue_readdirchanges(uv_loop_t* loop,
uv_fs_event_t* handle) {
assert(handle->dir_handle != INVALID_HANDLE_VALUE);
assert(!handle->req_pending);
@ -57,13 +57,13 @@ static void uv_fs_event_queue_readdirchanges(uv_loop_t* loop,
NULL)) {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(&handle->req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*)&handle->req);
uv__insert_pending_req(loop, (uv_req_t*)&handle->req);
}
handle->req_pending = 1;
}
static void uv_relative_path(const WCHAR* filename,
static void uv__relative_path(const WCHAR* filename,
const WCHAR* dir,
WCHAR** relpath) {
size_t relpathlen;
@ -80,7 +80,7 @@ static void uv_relative_path(const WCHAR* filename,
(*relpath)[relpathlen] = L'\0';
}
static int uv_split_path(const WCHAR* filename, WCHAR** dir,
static int uv__split_path(const WCHAR* filename, WCHAR** dir,
WCHAR** file) {
size_t len, i;
DWORD dir_len;
@ -255,12 +255,12 @@ int uv_fs_event_start(uv_fs_event_t* handle,
short_path_done:
short_path = short_path_buffer;
if (uv_split_path(pathw, &dir, &handle->filew) != 0) {
if (uv__split_path(pathw, &dir, &handle->filew) != 0) {
last_error = GetLastError();
goto error;
}
if (uv_split_path(short_path, NULL, &handle->short_filew) != 0) {
if (uv__split_path(short_path, NULL, &handle->short_filew) != 0) {
last_error = GetLastError();
goto error;
}
@ -423,7 +423,7 @@ static int file_info_cmp(WCHAR* str, WCHAR* file_name, size_t file_name_len) {
}
void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
void uv__process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
uv_fs_event_t* handle) {
FILE_NOTIFY_INFORMATION* file_info;
int err, sizew, size;
@ -442,7 +442,7 @@ void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
*/
if (!uv__is_active(handle)) {
if (handle->flags & UV_HANDLE_CLOSING) {
uv_want_endgame(loop, (uv_handle_t*) handle);
uv__want_endgame(loop, (uv_handle_t*) handle);
}
return;
}
@ -515,7 +515,7 @@ void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
if (long_filenamew) {
/* Get the file name out of the long path. */
uv_relative_path(long_filenamew,
uv__relative_path(long_filenamew,
handle->dirw,
&filenamew);
uv__free(long_filenamew);
@ -575,26 +575,26 @@ void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
}
if (handle->flags & UV_HANDLE_CLOSING) {
uv_want_endgame(loop, (uv_handle_t*)handle);
uv__want_endgame(loop, (uv_handle_t*)handle);
} else if (uv__is_active(handle)) {
uv_fs_event_queue_readdirchanges(loop, handle);
uv__fs_event_queue_readdirchanges(loop, handle);
}
}
void uv_fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle) {
void uv__fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle) {
uv_fs_event_stop(handle);
uv__handle_closing(handle);
if (!handle->req_pending) {
uv_want_endgame(loop, (uv_handle_t*)handle);
uv__want_endgame(loop, (uv_handle_t*)handle);
}
}
void uv_fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle) {
void uv__fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle) {
if ((handle->flags & UV_HANDLE_CLOSING) && !handle->req_pending) {
assert(!(handle->flags & UV_HANDLE_CLOSED));

24
deps/uv/src/win/fs.c vendored
View file

@ -46,7 +46,7 @@
do { \
if (req == NULL) \
return UV_EINVAL; \
uv_fs_req_init(loop, req, subtype, cb); \
uv__fs_req_init(loop, req, subtype, cb); \
} \
while (0)
@ -132,7 +132,7 @@ static int uv__file_symlink_usermode_flag = SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGE
static DWORD uv__allocation_granularity;
void uv_fs_init(void) {
void uv__fs_init(void) {
SYSTEM_INFO system_info;
GetSystemInfo(&system_info);
@ -241,7 +241,7 @@ INLINE static int fs__capture_path(uv_fs_t* req, const char* path,
INLINE static void uv_fs_req_init(uv_loop_t* loop, uv_fs_t* req,
INLINE static void uv__fs_req_init(uv_loop_t* loop, uv_fs_t* req,
uv_fs_type fs_type, const uv_fs_cb cb) {
uv__once_init();
UV_REQ_INIT(req, UV_FS);
@ -912,12 +912,11 @@ void fs__read(uv_fs_t* req) {
SET_REQ_RESULT(req, bytes);
} else {
error = GetLastError();
if (error == ERROR_ACCESS_DENIED) {
error = ERROR_INVALID_FLAGS;
}
if (error == ERROR_HANDLE_EOF) {
if (error == ERROR_HANDLE_EOF || error == ERROR_BROKEN_PIPE) {
SET_REQ_RESULT(req, bytes);
} else {
SET_REQ_WIN32_ERROR(req, error);
@ -1881,8 +1880,9 @@ INLINE static DWORD fs__stat_impl_from_path(WCHAR* path,
NULL);
if (handle == INVALID_HANDLE_VALUE)
ret = GetLastError();
else if (fs__stat_handle(handle, statbuf, do_lstat) != 0)
return GetLastError();
if (fs__stat_handle(handle, statbuf, do_lstat) != 0)
ret = GetLastError();
else
ret = 0;
@ -2300,13 +2300,13 @@ INLINE static DWORD fs__utime_impl_from_path(WCHAR* path,
flags,
NULL);
if (handle == INVALID_HANDLE_VALUE) {
if (handle == INVALID_HANDLE_VALUE)
return GetLastError();
if (fs__utime_handle(handle, atime, mtime) != 0)
ret = GetLastError();
} else if (fs__utime_handle(handle, atime, mtime) != 0) {
ret = GetLastError();
} else {
else
ret = 0;
}
CloseHandle(handle);
return ret;

View file

@ -55,7 +55,7 @@
\
if (handle->flags & UV_HANDLE_CLOSING && \
handle->reqs_pending == 0) { \
uv_want_endgame(loop, (uv_handle_t*)handle); \
uv__want_endgame(loop, (uv_handle_t*)handle); \
} \
} while (0)
@ -85,7 +85,7 @@
} while (0)
INLINE static void uv_want_endgame(uv_loop_t* loop, uv_handle_t* handle) {
INLINE static void uv__want_endgame(uv_loop_t* loop, uv_handle_t* handle) {
if (!(handle->flags & UV_HANDLE_ENDGAME_QUEUED)) {
handle->flags |= UV_HANDLE_ENDGAME_QUEUED;
@ -95,7 +95,7 @@ INLINE static void uv_want_endgame(uv_loop_t* loop, uv_handle_t* handle) {
}
INLINE static void uv_process_endgames(uv_loop_t* loop) {
INLINE static void uv__process_endgames(uv_loop_t* loop) {
uv_handle_t* handle;
while (loop->endgame_handles) {
@ -106,23 +106,23 @@ INLINE static void uv_process_endgames(uv_loop_t* loop) {
switch (handle->type) {
case UV_TCP:
uv_tcp_endgame(loop, (uv_tcp_t*) handle);
uv__tcp_endgame(loop, (uv_tcp_t*) handle);
break;
case UV_NAMED_PIPE:
uv_pipe_endgame(loop, (uv_pipe_t*) handle);
uv__pipe_endgame(loop, (uv_pipe_t*) handle);
break;
case UV_TTY:
uv_tty_endgame(loop, (uv_tty_t*) handle);
uv__tty_endgame(loop, (uv_tty_t*) handle);
break;
case UV_UDP:
uv_udp_endgame(loop, (uv_udp_t*) handle);
uv__udp_endgame(loop, (uv_udp_t*) handle);
break;
case UV_POLL:
uv_poll_endgame(loop, (uv_poll_t*) handle);
uv__poll_endgame(loop, (uv_poll_t*) handle);
break;
case UV_TIMER:
@ -133,23 +133,23 @@ INLINE static void uv_process_endgames(uv_loop_t* loop) {
case UV_PREPARE:
case UV_CHECK:
case UV_IDLE:
uv_loop_watcher_endgame(loop, handle);
uv__loop_watcher_endgame(loop, handle);
break;
case UV_ASYNC:
uv_async_endgame(loop, (uv_async_t*) handle);
uv__async_endgame(loop, (uv_async_t*) handle);
break;
case UV_SIGNAL:
uv_signal_endgame(loop, (uv_signal_t*) handle);
uv__signal_endgame(loop, (uv_signal_t*) handle);
break;
case UV_PROCESS:
uv_process_endgame(loop, (uv_process_t*) handle);
uv__process_endgame(loop, (uv_process_t*) handle);
break;
case UV_FS_EVENT:
uv_fs_event_endgame(loop, (uv_fs_event_t*) handle);
uv__fs_event_endgame(loop, (uv_fs_event_t*) handle);
break;
case UV_FS_POLL:

View file

@ -77,63 +77,63 @@ void uv_close(uv_handle_t* handle, uv_close_cb cb) {
/* Handle-specific close actions */
switch (handle->type) {
case UV_TCP:
uv_tcp_close(loop, (uv_tcp_t*)handle);
uv__tcp_close(loop, (uv_tcp_t*)handle);
return;
case UV_NAMED_PIPE:
uv_pipe_close(loop, (uv_pipe_t*) handle);
uv__pipe_close(loop, (uv_pipe_t*) handle);
return;
case UV_TTY:
uv_tty_close((uv_tty_t*) handle);
uv__tty_close((uv_tty_t*) handle);
return;
case UV_UDP:
uv_udp_close(loop, (uv_udp_t*) handle);
uv__udp_close(loop, (uv_udp_t*) handle);
return;
case UV_POLL:
uv_poll_close(loop, (uv_poll_t*) handle);
uv__poll_close(loop, (uv_poll_t*) handle);
return;
case UV_TIMER:
uv_timer_stop((uv_timer_t*)handle);
uv__handle_closing(handle);
uv_want_endgame(loop, handle);
uv__want_endgame(loop, handle);
return;
case UV_PREPARE:
uv_prepare_stop((uv_prepare_t*)handle);
uv__handle_closing(handle);
uv_want_endgame(loop, handle);
uv__want_endgame(loop, handle);
return;
case UV_CHECK:
uv_check_stop((uv_check_t*)handle);
uv__handle_closing(handle);
uv_want_endgame(loop, handle);
uv__want_endgame(loop, handle);
return;
case UV_IDLE:
uv_idle_stop((uv_idle_t*)handle);
uv__handle_closing(handle);
uv_want_endgame(loop, handle);
uv__want_endgame(loop, handle);
return;
case UV_ASYNC:
uv_async_close(loop, (uv_async_t*) handle);
uv__async_close(loop, (uv_async_t*) handle);
return;
case UV_SIGNAL:
uv_signal_close(loop, (uv_signal_t*) handle);
uv__signal_close(loop, (uv_signal_t*) handle);
return;
case UV_PROCESS:
uv_process_close(loop, (uv_process_t*) handle);
uv__process_close(loop, (uv_process_t*) handle);
return;
case UV_FS_EVENT:
uv_fs_event_close(loop, (uv_fs_event_t*) handle);
uv__fs_event_close(loop, (uv_fs_event_t*) handle);
return;
case UV_FS_POLL:

View file

@ -72,25 +72,28 @@ typedef struct {
uint32_t delayed_error;
} uv__ipc_socket_xfer_info_t;
int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb);
int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client);
int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
int uv__tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb);
int uv__tcp_accept(uv_tcp_t* server, uv_tcp_t* client);
int uv__tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
uv_read_cb read_cb);
int uv_tcp_write(uv_loop_t* loop, uv_write_t* req, uv_tcp_t* handle,
int uv__tcp_write(uv_loop_t* loop, uv_write_t* req, uv_tcp_t* handle,
const uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb);
int uv__tcp_try_write(uv_tcp_t* handle, const uv_buf_t bufs[],
unsigned int nbufs);
void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle, uv_req_t* req);
void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
void uv__process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle, uv_req_t* req);
void uv__process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_write_t* req);
void uv_process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
void uv__process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_req_t* req);
void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
void uv__process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_connect_t* req);
void uv__process_tcp_shutdown_req(uv_loop_t* loop,
uv_tcp_t* stream,
uv_shutdown_t* req);
void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp);
void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle);
void uv__tcp_close(uv_loop_t* loop, uv_tcp_t* tcp);
void uv__tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle);
int uv__tcp_xfer_export(uv_tcp_t* handle,
int pid,
@ -104,12 +107,12 @@ int uv__tcp_xfer_import(uv_tcp_t* tcp,
/*
* UDP
*/
void uv_process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle, uv_req_t* req);
void uv_process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle,
void uv__process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle, uv_req_t* req);
void uv__process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle,
uv_udp_send_t* req);
void uv_udp_close(uv_loop_t* loop, uv_udp_t* handle);
void uv_udp_endgame(uv_loop_t* loop, uv_udp_t* handle);
void uv__udp_close(uv_loop_t* loop, uv_udp_t* handle);
void uv__udp_endgame(uv_loop_t* loop, uv_udp_t* handle);
/*
@ -118,9 +121,9 @@ void uv_udp_endgame(uv_loop_t* loop, uv_udp_t* handle);
int uv__create_stdio_pipe_pair(uv_loop_t* loop,
uv_pipe_t* parent_pipe, HANDLE* child_pipe_ptr, unsigned int flags);
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client);
int uv_pipe_read_start(uv_pipe_t* handle, uv_alloc_cb alloc_cb,
int uv__pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
int uv__pipe_accept(uv_pipe_t* server, uv_stream_t* client);
int uv__pipe_read_start(uv_pipe_t* handle, uv_alloc_cb alloc_cb,
uv_read_cb read_cb);
void uv__pipe_read_stop(uv_pipe_t* handle);
int uv__pipe_write(uv_loop_t* loop,
@ -130,75 +133,77 @@ int uv__pipe_write(uv_loop_t* loop,
size_t nbufs,
uv_stream_t* send_handle,
uv_write_cb cb);
void uv__pipe_shutdown(uv_loop_t* loop, uv_pipe_t* handle, uv_shutdown_t* req);
void uv_process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle,
void uv__process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_req_t* req);
void uv_process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
void uv__process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_write_t* req);
void uv_process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
void uv__process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_req_t* raw_req);
void uv_process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
void uv__process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_connect_t* req);
void uv_process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
void uv__process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_shutdown_t* req);
void uv_pipe_close(uv_loop_t* loop, uv_pipe_t* handle);
void uv_pipe_cleanup(uv_loop_t* loop, uv_pipe_t* handle);
void uv_pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle);
void uv__pipe_close(uv_loop_t* loop, uv_pipe_t* handle);
void uv__pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle);
/*
* TTY
*/
void uv_console_init(void);
void uv__console_init(void);
int uv_tty_read_start(uv_tty_t* handle, uv_alloc_cb alloc_cb,
int uv__tty_read_start(uv_tty_t* handle, uv_alloc_cb alloc_cb,
uv_read_cb read_cb);
int uv_tty_read_stop(uv_tty_t* handle);
int uv_tty_write(uv_loop_t* loop, uv_write_t* req, uv_tty_t* handle,
int uv__tty_read_stop(uv_tty_t* handle);
int uv__tty_write(uv_loop_t* loop, uv_write_t* req, uv_tty_t* handle,
const uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb);
int uv__tty_try_write(uv_tty_t* handle, const uv_buf_t bufs[],
unsigned int nbufs);
void uv_tty_close(uv_tty_t* handle);
void uv__tty_close(uv_tty_t* handle);
void uv_process_tty_read_req(uv_loop_t* loop, uv_tty_t* handle,
void uv__process_tty_read_req(uv_loop_t* loop, uv_tty_t* handle,
uv_req_t* req);
void uv_process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
void uv__process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
uv_write_t* req);
/*
* uv_process_tty_accept_req() is a stub to keep DELEGATE_STREAM_REQ working
* uv__process_tty_accept_req() is a stub to keep DELEGATE_STREAM_REQ working
* TODO: find a way to remove it
*/
void uv_process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle,
void uv__process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle,
uv_req_t* raw_req);
/*
* uv_process_tty_connect_req() is a stub to keep DELEGATE_STREAM_REQ working
* uv__process_tty_connect_req() is a stub to keep DELEGATE_STREAM_REQ working
* TODO: find a way to remove it
*/
void uv_process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle,
void uv__process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle,
uv_connect_t* req);
void uv_tty_endgame(uv_loop_t* loop, uv_tty_t* handle);
void uv__process_tty_shutdown_req(uv_loop_t* loop,
uv_tty_t* stream,
uv_shutdown_t* req);
void uv__tty_endgame(uv_loop_t* loop, uv_tty_t* handle);
/*
* Poll watchers
*/
void uv_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
void uv__process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
uv_req_t* req);
int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle);
void uv_poll_endgame(uv_loop_t* loop, uv_poll_t* handle);
int uv__poll_close(uv_loop_t* loop, uv_poll_t* handle);
void uv__poll_endgame(uv_loop_t* loop, uv_poll_t* handle);
/*
* Loop watchers
*/
void uv_loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle);
void uv__loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle);
void uv_prepare_invoke(uv_loop_t* loop);
void uv_check_invoke(uv_loop_t* loop);
void uv_idle_invoke(uv_loop_t* loop);
void uv__prepare_invoke(uv_loop_t* loop);
void uv__check_invoke(uv_loop_t* loop);
void uv__idle_invoke(uv_loop_t* loop);
void uv__once_init(void);
@ -206,53 +211,47 @@ void uv__once_init(void);
/*
* Async watcher
*/
void uv_async_close(uv_loop_t* loop, uv_async_t* handle);
void uv_async_endgame(uv_loop_t* loop, uv_async_t* handle);
void uv__async_close(uv_loop_t* loop, uv_async_t* handle);
void uv__async_endgame(uv_loop_t* loop, uv_async_t* handle);
void uv_process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle,
void uv__process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle,
uv_req_t* req);
/*
* Signal watcher
*/
void uv_signals_init(void);
void uv__signals_init(void);
int uv__signal_dispatch(int signum);
void uv_signal_close(uv_loop_t* loop, uv_signal_t* handle);
void uv_signal_endgame(uv_loop_t* loop, uv_signal_t* handle);
void uv__signal_close(uv_loop_t* loop, uv_signal_t* handle);
void uv__signal_endgame(uv_loop_t* loop, uv_signal_t* handle);
void uv_process_signal_req(uv_loop_t* loop, uv_signal_t* handle,
void uv__process_signal_req(uv_loop_t* loop, uv_signal_t* handle,
uv_req_t* req);
/*
* Spawn
*/
void uv_process_proc_exit(uv_loop_t* loop, uv_process_t* handle);
void uv_process_close(uv_loop_t* loop, uv_process_t* handle);
void uv_process_endgame(uv_loop_t* loop, uv_process_t* handle);
/*
* Error
*/
int uv_translate_sys_error(int sys_errno);
void uv__process_proc_exit(uv_loop_t* loop, uv_process_t* handle);
void uv__process_close(uv_loop_t* loop, uv_process_t* handle);
void uv__process_endgame(uv_loop_t* loop, uv_process_t* handle);
/*
* FS
*/
void uv_fs_init(void);
void uv__fs_init(void);
/*
* FS Event
*/
void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
void uv__process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
uv_fs_event_t* handle);
void uv_fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle);
void uv_fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle);
void uv__fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle);
void uv__fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle);
/*
@ -299,28 +298,28 @@ HANDLE uv__stdio_handle(BYTE* buffer, int fd);
/*
* Winapi and ntapi utility functions
*/
void uv_winapi_init(void);
void uv__winapi_init(void);
/*
* Winsock utility functions
*/
void uv_winsock_init(void);
void uv__winsock_init(void);
int uv_ntstatus_to_winsock_error(NTSTATUS status);
int uv__ntstatus_to_winsock_error(NTSTATUS status);
BOOL uv_get_acceptex_function(SOCKET socket, LPFN_ACCEPTEX* target);
BOOL uv_get_connectex_function(SOCKET socket, LPFN_CONNECTEX* target);
BOOL uv__get_acceptex_function(SOCKET socket, LPFN_ACCEPTEX* target);
BOOL uv__get_connectex_function(SOCKET socket, LPFN_CONNECTEX* target);
int WSAAPI uv_wsarecv_workaround(SOCKET socket, WSABUF* buffers,
int WSAAPI uv__wsarecv_workaround(SOCKET socket, WSABUF* buffers,
DWORD buffer_count, DWORD* bytes, DWORD* flags, WSAOVERLAPPED *overlapped,
LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
int WSAAPI uv_wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers,
int WSAAPI uv__wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers,
DWORD buffer_count, DWORD* bytes, DWORD* flags, struct sockaddr* addr,
int* addr_len, WSAOVERLAPPED *overlapped,
LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
int WSAAPI uv_msafd_poll(SOCKET socket, AFD_POLL_INFO* info_in,
int WSAAPI uv__msafd_poll(SOCKET socket, AFD_POLL_INFO* info_in,
AFD_POLL_INFO* info_out, OVERLAPPED* overlapped);
/* Whether there are any non-IFS LSPs stacked on TCP */

View file

@ -26,7 +26,7 @@
#include "handle-inl.h"
void uv_loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle) {
void uv__loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle) {
if (handle->flags & UV_HANDLE_CLOSING) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
handle->flags |= UV_HANDLE_CLOSED;
@ -104,7 +104,7 @@ void uv_loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle) {
} \
\
\
void uv_##name##_invoke(uv_loop_t* loop) { \
void uv__##name##_invoke(uv_loop_t* loop) { \
uv_##name##_t* handle; \
\
(loop)->next_##name##_handle = (loop)->name##_handles; \

390
deps/uv/src/win/pipe.c vendored
View file

@ -98,13 +98,13 @@ static void eof_timer_destroy(uv_pipe_t* pipe);
static void eof_timer_close_cb(uv_handle_t* handle);
static void uv_unique_pipe_name(char* ptr, char* name, size_t size) {
static void uv__unique_pipe_name(char* ptr, char* name, size_t size) {
snprintf(name, size, "\\\\?\\pipe\\uv\\%p-%lu", ptr, GetCurrentProcessId());
}
int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) {
uv_stream_init(loop, (uv_stream_t*)handle, UV_NAMED_PIPE);
uv__stream_init(loop, (uv_stream_t*)handle, UV_NAMED_PIPE);
handle->reqs_pending = 0;
handle->handle = INVALID_HANDLE_VALUE;
@ -120,15 +120,11 @@ int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) {
}
static void uv_pipe_connection_init(uv_pipe_t* handle) {
uv_connection_init((uv_stream_t*) handle);
static void uv__pipe_connection_init(uv_pipe_t* handle) {
assert(!(handle->flags & UV_HANDLE_PIPESERVER));
uv__connection_init((uv_stream_t*) handle);
handle->read_req.data = handle;
handle->pipe.conn.eof_timer = NULL;
assert(!(handle->flags & UV_HANDLE_PIPESERVER));
if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) {
handle->pipe.conn.readfile_thread_handle = NULL;
InitializeCriticalSection(&handle->pipe.conn.readfile_thread_lock);
}
}
@ -209,7 +205,7 @@ static int uv__pipe_server(
int err;
for (;;) {
uv_unique_pipe_name(random, name, nameSize);
uv__unique_pipe_name(random, name, nameSize);
pipeHandle = CreateNamedPipeA(name,
access | FILE_FLAG_FIRST_PIPE_INSTANCE,
@ -393,6 +389,8 @@ int uv__create_stdio_pipe_pair(uv_loop_t* loop,
unsigned int client_flags;
int err;
uv__pipe_connection_init(parent_pipe);
server_pipe = INVALID_HANDLE_VALUE;
client_pipe = INVALID_HANDLE_VALUE;
@ -427,7 +425,6 @@ int uv__create_stdio_pipe_pair(uv_loop_t* loop,
goto error;
}
uv_pipe_connection_init(parent_pipe);
parent_pipe->handle = server_pipe;
*child_pipe_ptr = client_pipe;
@ -450,7 +447,7 @@ int uv__create_stdio_pipe_pair(uv_loop_t* loop,
}
static int uv_set_pipe_handle(uv_loop_t* loop,
static int uv__set_pipe_handle(uv_loop_t* loop,
uv_pipe_t* handle,
HANDLE pipeHandle,
int fd,
@ -462,7 +459,9 @@ static int uv_set_pipe_handle(uv_loop_t* loop,
DWORD current_mode = 0;
DWORD err = 0;
if (handle->flags & UV_HANDLE_PIPESERVER)
assert(handle->flags & UV_HANDLE_CONNECTION);
assert(!(handle->flags & UV_HANDLE_PIPESERVER));
if (handle->flags & UV_HANDLE_CLOSING)
return UV_EINVAL;
if (handle->handle != INVALID_HANDLE_VALUE)
return UV_EBUSY;
@ -478,18 +477,17 @@ static int uv_set_pipe_handle(uv_loop_t* loop,
*/
if (!GetNamedPipeHandleState(pipeHandle, &current_mode, NULL, NULL,
NULL, NULL, 0)) {
return -1;
return uv_translate_sys_error(GetLastError());
} else if (current_mode & PIPE_NOWAIT) {
SetLastError(ERROR_ACCESS_DENIED);
return -1;
return UV_EACCES;
}
} else {
/* If this returns ERROR_INVALID_PARAMETER we probably opened
* something that is not a pipe. */
if (err == ERROR_INVALID_PARAMETER) {
SetLastError(WSAENOTSOCK);
return UV_ENOTSOCK;
}
return -1;
return uv_translate_sys_error(err);
}
}
@ -500,13 +498,15 @@ static int uv_set_pipe_handle(uv_loop_t* loop,
sizeof(mode_info),
FileModeInformation);
if (nt_status != STATUS_SUCCESS) {
return -1;
return uv_translate_sys_error(err);
}
if (mode_info.Mode & FILE_SYNCHRONOUS_IO_ALERT ||
mode_info.Mode & FILE_SYNCHRONOUS_IO_NONALERT) {
/* Non-overlapped pipe. */
handle->flags |= UV_HANDLE_NON_OVERLAPPED_PIPE;
handle->pipe.conn.readfile_thread_handle = NULL;
InitializeCriticalSection(&handle->pipe.conn.readfile_thread_lock);
} else {
/* Overlapped pipe. Try to associate with IOCP. */
if (CreateIoCompletionPort(pipeHandle,
@ -578,32 +578,19 @@ static DWORD WINAPI pipe_shutdown_thread_proc(void* parameter) {
}
void uv_pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) {
int err;
void uv__pipe_shutdown(uv_loop_t* loop, uv_pipe_t* handle, uv_shutdown_t *req) {
DWORD result;
uv_shutdown_t* req;
NTSTATUS nt_status;
IO_STATUS_BLOCK io_status;
FILE_PIPE_LOCAL_INFORMATION pipe_info;
uv__ipc_xfer_queue_item_t* xfer_queue_item;
if ((handle->flags & UV_HANDLE_CONNECTION) &&
handle->stream.conn.shutdown_req != NULL &&
handle->stream.conn.write_reqs_pending == 0) {
req = handle->stream.conn.shutdown_req;
/* Clear the shutdown_req field so we don't go here again. */
handle->stream.conn.shutdown_req = NULL;
assert(handle->flags & UV_HANDLE_CONNECTION);
assert(req != NULL);
assert(handle->stream.conn.write_reqs_pending == 0);
SET_REQ_SUCCESS(req);
if (handle->flags & UV_HANDLE_CLOSING) {
UNREGISTER_HANDLE_REQ(loop, handle, req);
/* Already closing. Cancel the shutdown. */
if (req->cb) {
req->cb(req, UV_ECANCELED);
}
DECREASE_PENDING_REQ_COUNT(handle);
uv__insert_pending_req(loop, (uv_req_t*) req);
return;
}
@ -615,22 +602,16 @@ void uv_pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) {
FilePipeLocalInformation);
if (nt_status != STATUS_SUCCESS) {
/* Failure */
UNREGISTER_HANDLE_REQ(loop, handle, req);
handle->flags |= UV_HANDLE_WRITABLE; /* Questionable */
if (req->cb) {
err = pRtlNtStatusToDosError(nt_status);
req->cb(req, uv_translate_sys_error(err));
}
DECREASE_PENDING_REQ_COUNT(handle);
SET_REQ_ERROR(req, pRtlNtStatusToDosError(nt_status));
handle->flags |= UV_HANDLE_WRITABLE; /* Questionable. */
uv__insert_pending_req(loop, (uv_req_t*) req);
return;
}
if (pipe_info.OutboundQuota == pipe_info.WriteQuotaAvailable) {
/* Short-circuit, no need to call FlushFileBuffers. */
uv_insert_pending_req(loop, (uv_req_t*) req);
/* Short-circuit, no need to call FlushFileBuffers:
* all writes have been read. */
uv__insert_pending_req(loop, (uv_req_t*) req);
return;
}
@ -638,26 +619,20 @@ void uv_pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) {
result = QueueUserWorkItem(pipe_shutdown_thread_proc,
req,
WT_EXECUTELONGFUNCTION);
if (result) {
return;
} else {
/* Failure. */
UNREGISTER_HANDLE_REQ(loop, handle, req);
handle->flags |= UV_HANDLE_WRITABLE; /* Questionable */
if (req->cb) {
err = GetLastError();
req->cb(req, uv_translate_sys_error(err));
}
DECREASE_PENDING_REQ_COUNT(handle);
if (!result) {
SET_REQ_ERROR(req, GetLastError());
handle->flags |= UV_HANDLE_WRITABLE; /* Questionable. */
uv__insert_pending_req(loop, (uv_req_t*) req);
return;
}
}
}
if (handle->flags & UV_HANDLE_CLOSING &&
handle->reqs_pending == 0) {
void uv__pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) {
uv__ipc_xfer_queue_item_t* xfer_queue_item;
assert(handle->reqs_pending == 0);
assert(handle->flags & UV_HANDLE_CLOSING);
assert(!(handle->flags & UV_HANDLE_CLOSED));
if (handle->flags & UV_HANDLE_CONNECTION) {
@ -706,7 +681,6 @@ void uv_pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) {
}
uv__handle_close(handle);
}
}
@ -731,7 +705,9 @@ int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
if (!name) {
return UV_EINVAL;
}
if (uv__is_closing(handle)) {
return UV_EINVAL;
}
if (!(handle->flags & UV_HANDLE_PIPESERVER)) {
handle->pipe.serv.pending_instances = default_pending_pipe_instances;
}
@ -815,7 +791,7 @@ static DWORD WINAPI pipe_connect_thread_proc(void* parameter) {
assert(loop);
/* We're here because CreateFile on a pipe returned ERROR_PIPE_BUSY. We wait
* for the pipe to become available with WaitNamedPipe. */
* up to 30 seconds for the pipe to become available with WaitNamedPipe. */
while (WaitNamedPipeW(handle->name, 30000)) {
/* The pipe is now available, try to connect. */
pipeHandle = open_named_pipe(handle->name, &duplex_flags);
@ -825,9 +801,10 @@ static DWORD WINAPI pipe_connect_thread_proc(void* parameter) {
SwitchToThread();
}
if (pipeHandle != INVALID_HANDLE_VALUE &&
!uv_set_pipe_handle(loop, handle, pipeHandle, -1, duplex_flags)) {
if (pipeHandle != INVALID_HANDLE_VALUE) {
SET_REQ_SUCCESS(req);
req->u.connect.pipeHandle = pipeHandle;
req->u.connect.duplex_flags = duplex_flags;
} else {
SET_REQ_ERROR(req, GetLastError());
}
@ -849,6 +826,18 @@ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
UV_REQ_INIT(req, UV_CONNECT);
req->handle = (uv_stream_t*) handle;
req->cb = cb;
req->u.connect.pipeHandle = INVALID_HANDLE_VALUE;
req->u.connect.duplex_flags = 0;
if (handle->flags & UV_HANDLE_PIPESERVER) {
err = ERROR_INVALID_PARAMETER;
goto error;
}
if (handle->flags & UV_HANDLE_CONNECTION) {
err = ERROR_PIPE_BUSY;
goto error;
}
uv__pipe_connection_init(handle);
/* Convert name to UTF16. */
nameSize = MultiByteToWideChar(CP_UTF8, 0, name, -1, NULL, 0) * sizeof(WCHAR);
@ -888,19 +877,10 @@ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
goto error;
}
assert(pipeHandle != INVALID_HANDLE_VALUE);
if (uv_set_pipe_handle(loop,
(uv_pipe_t*) req->handle,
pipeHandle,
-1,
duplex_flags)) {
err = GetLastError();
goto error;
}
req->u.connect.pipeHandle = pipeHandle;
req->u.connect.duplex_flags = duplex_flags;
SET_REQ_SUCCESS(req);
uv_insert_pending_req(loop, (uv_req_t*) req);
uv__insert_pending_req(loop, (uv_req_t*) req);
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
return;
@ -916,7 +896,7 @@ error:
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, err);
uv_insert_pending_req(loop, (uv_req_t*) req);
uv__insert_pending_req(loop, (uv_req_t*) req);
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
return;
@ -937,7 +917,7 @@ void uv__pipe_interrupt_read(uv_pipe_t* handle) {
/* Cancel asynchronous read. */
r = CancelIoEx(handle->handle, &handle->read_req.u.io.overlapped);
assert(r || GetLastError() == ERROR_NOT_FOUND);
(void) r;
} else {
/* Cancel synchronous read (which is happening in the thread pool). */
HANDLE thread;
@ -973,17 +953,30 @@ void uv__pipe_interrupt_read(uv_pipe_t* handle) {
void uv__pipe_read_stop(uv_pipe_t* handle) {
handle->flags &= ~UV_HANDLE_READING;
DECREASE_ACTIVE_COUNT(handle->loop, handle);
uv__pipe_interrupt_read(handle);
}
/* Cleans up uv_pipe_t (server or connection) and all resources associated with
* it. */
void uv_pipe_cleanup(uv_loop_t* loop, uv_pipe_t* handle) {
void uv__pipe_close(uv_loop_t* loop, uv_pipe_t* handle) {
int i;
HANDLE pipeHandle;
if (handle->flags & UV_HANDLE_READING) {
handle->flags &= ~UV_HANDLE_READING;
DECREASE_ACTIVE_COUNT(loop, handle);
}
if (handle->flags & UV_HANDLE_LISTENING) {
handle->flags &= ~UV_HANDLE_LISTENING;
DECREASE_ACTIVE_COUNT(loop, handle);
}
handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
uv__handle_closing(handle);
uv__pipe_interrupt_read(handle);
if (handle->name) {
@ -1003,45 +996,27 @@ void uv_pipe_cleanup(uv_loop_t* loop, uv_pipe_t* handle) {
}
if (handle->flags & UV_HANDLE_CONNECTION) {
handle->flags &= ~UV_HANDLE_WRITABLE;
eof_timer_destroy(handle);
}
if ((handle->flags & UV_HANDLE_CONNECTION)
&& handle->handle != INVALID_HANDLE_VALUE)
&& handle->handle != INVALID_HANDLE_VALUE) {
/* This will eventually destroy the write queue for us too. */
close_pipe(handle);
}
if (handle->reqs_pending == 0)
uv__want_endgame(loop, (uv_handle_t*) handle);
}
void uv_pipe_close(uv_loop_t* loop, uv_pipe_t* handle) {
if (handle->flags & UV_HANDLE_READING) {
handle->flags &= ~UV_HANDLE_READING;
DECREASE_ACTIVE_COUNT(loop, handle);
}
if (handle->flags & UV_HANDLE_LISTENING) {
handle->flags &= ~UV_HANDLE_LISTENING;
DECREASE_ACTIVE_COUNT(loop, handle);
}
uv_pipe_cleanup(loop, handle);
if (handle->reqs_pending == 0) {
uv_want_endgame(loop, (uv_handle_t*) handle);
}
handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
uv__handle_closing(handle);
}
static void uv_pipe_queue_accept(uv_loop_t* loop, uv_pipe_t* handle,
static void uv__pipe_queue_accept(uv_loop_t* loop, uv_pipe_t* handle,
uv_pipe_accept_t* req, BOOL firstInstance) {
assert(handle->flags & UV_HANDLE_LISTENING);
if (!firstInstance && !pipe_alloc_accept(loop, handle, req, FALSE)) {
SET_REQ_ERROR(req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*) req);
uv__insert_pending_req(loop, (uv_req_t*) req);
handle->reqs_pending++;
return;
}
@ -1061,7 +1036,7 @@ static void uv_pipe_queue_accept(uv_loop_t* loop, uv_pipe_t* handle,
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, GetLastError());
}
uv_insert_pending_req(loop, (uv_req_t*) req);
uv__insert_pending_req(loop, (uv_req_t*) req);
handle->reqs_pending++;
return;
}
@ -1071,7 +1046,7 @@ static void uv_pipe_queue_accept(uv_loop_t* loop, uv_pipe_t* handle,
}
int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
int uv__pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
uv_loop_t* loop = server->loop;
uv_pipe_t* pipe_client;
uv_pipe_accept_t* req;
@ -1099,6 +1074,7 @@ int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
} else {
pipe_client = (uv_pipe_t*) client;
uv__pipe_connection_init(pipe_client);
/* Find a connection instance that has been connected, but not yet
* accepted. */
@ -1110,7 +1086,6 @@ int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
}
/* Initialize the client handle and copy the pipeHandle to the client */
uv_pipe_connection_init(pipe_client);
pipe_client->handle = req->pipeHandle;
pipe_client->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
@ -1121,7 +1096,7 @@ int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
server->handle = INVALID_HANDLE_VALUE;
if (!(server->flags & UV_HANDLE_CLOSING)) {
uv_pipe_queue_accept(loop, server, req, FALSE);
uv__pipe_queue_accept(loop, server, req, FALSE);
}
}
@ -1130,7 +1105,7 @@ int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
/* Starts listening for connections for the given pipe. */
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
int uv__pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
uv_loop_t* loop = handle->loop;
int i;
@ -1162,7 +1137,7 @@ int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
assert(handle->pipe.serv.accept_reqs[0].pipeHandle != INVALID_HANDLE_VALUE);
for (i = 0; i < handle->pipe.serv.pending_instances; i++) {
uv_pipe_queue_accept(loop, handle, &handle->pipe.serv.accept_reqs[i], i == 0);
uv__pipe_queue_accept(loop, handle, &handle->pipe.serv.accept_reqs[i], i == 0);
}
return 0;
@ -1306,7 +1281,7 @@ static void CALLBACK post_completion_write_wait(void* context, BOOLEAN timed_out
}
static void uv_pipe_queue_read(uv_loop_t* loop, uv_pipe_t* handle) {
static void uv__pipe_queue_read(uv_loop_t* loop, uv_pipe_t* handle) {
uv_read_t* req;
int result;
@ -1365,13 +1340,13 @@ static void uv_pipe_queue_read(uv_loop_t* loop, uv_pipe_t* handle) {
return;
error:
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
handle->flags |= UV_HANDLE_READ_PENDING;
handle->reqs_pending++;
}
int uv_pipe_read_start(uv_pipe_t* handle,
int uv__pipe_read_start(uv_pipe_t* handle,
uv_alloc_cb alloc_cb,
uv_read_cb read_cb) {
uv_loop_t* loop = handle->loop;
@ -1391,14 +1366,14 @@ int uv_pipe_read_start(uv_pipe_t* handle,
uv_fatal_error(GetLastError(), "CreateEvent");
}
}
uv_pipe_queue_read(loop, handle);
uv__pipe_queue_read(loop, handle);
}
return 0;
}
static void uv_insert_non_overlapped_write_req(uv_pipe_t* handle,
static void uv__insert_non_overlapped_write_req(uv_pipe_t* handle,
uv_write_t* req) {
req->next_req = NULL;
if (handle->pipe.conn.non_overlapped_writes_tail) {
@ -1434,7 +1409,7 @@ static uv_write_t* uv_remove_non_overlapped_write_req(uv_pipe_t* handle) {
}
static void uv_queue_non_overlapped_write(uv_pipe_t* handle) {
static void uv__queue_non_overlapped_write(uv_pipe_t* handle) {
uv_write_t* req = uv_remove_non_overlapped_write_req(handle);
if (req) {
if (!QueueUserWorkItem(&uv_pipe_writefile_thread_proc,
@ -1575,9 +1550,9 @@ static int uv__pipe_write_data(uv_loop_t* loop,
return 0;
} else if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) {
req->write_buffer = write_buf;
uv_insert_non_overlapped_write_req(handle, req);
uv__insert_non_overlapped_write_req(handle, req);
if (handle->stream.conn.write_reqs_pending == 0) {
uv_queue_non_overlapped_write(handle);
uv__queue_non_overlapped_write(handle);
}
/* Request queued by the kernel. */
@ -1790,7 +1765,7 @@ int uv__pipe_write(uv_loop_t* loop,
}
static void uv_pipe_read_eof(uv_loop_t* loop, uv_pipe_t* handle,
static void uv__pipe_read_eof(uv_loop_t* loop, uv_pipe_t* handle,
uv_buf_t buf) {
/* If there is an eof timer running, we don't need it any more, so discard
* it. */
@ -1802,7 +1777,7 @@ static void uv_pipe_read_eof(uv_loop_t* loop, uv_pipe_t* handle,
}
static void uv_pipe_read_error(uv_loop_t* loop, uv_pipe_t* handle, int error,
static void uv__pipe_read_error(uv_loop_t* loop, uv_pipe_t* handle, int error,
uv_buf_t buf) {
/* If there is an eof timer running, we don't need it any more, so discard
* it. */
@ -1814,12 +1789,12 @@ static void uv_pipe_read_error(uv_loop_t* loop, uv_pipe_t* handle, int error,
}
static void uv_pipe_read_error_or_eof(uv_loop_t* loop, uv_pipe_t* handle,
static void uv__pipe_read_error_or_eof(uv_loop_t* loop, uv_pipe_t* handle,
int error, uv_buf_t buf) {
if (error == ERROR_BROKEN_PIPE) {
uv_pipe_read_eof(loop, handle, buf);
uv__pipe_read_eof(loop, handle, buf);
} else {
uv_pipe_read_error(loop, handle, error, buf);
uv__pipe_read_error(loop, handle, error, buf);
}
}
@ -1890,7 +1865,7 @@ static DWORD uv__pipe_read_data(uv_loop_t* loop,
/* Read into the user buffer. */
if (!ReadFile(handle->handle, buf.base, max_bytes, &bytes_read, NULL)) {
uv_pipe_read_error_or_eof(loop, handle, GetLastError(), buf);
uv__pipe_read_error_or_eof(loop, handle, GetLastError(), buf);
return 0; /* Break out of read loop. */
}
@ -1977,12 +1952,12 @@ invalid:
err = WSAECONNABORTED; /* Maps to UV_ECONNABORTED. */
error:
uv_pipe_read_error_or_eof(loop, handle, err, uv_null_buf_);
uv__pipe_read_error_or_eof(loop, handle, err, uv_null_buf_);
return 0; /* Break out of read loop. */
}
void uv_process_pipe_read_req(uv_loop_t* loop,
void uv__process_pipe_read_req(uv_loop_t* loop,
uv_pipe_t* handle,
uv_req_t* req) {
assert(handle->type == UV_NAMED_PIPE);
@ -2005,7 +1980,7 @@ void uv_process_pipe_read_req(uv_loop_t* loop,
* indicate an ERROR_OPERATION_ABORTED error. This error isn't relevant to
* the user; we'll start a new zero-read at the end of this function. */
if (err != ERROR_OPERATION_ABORTED)
uv_pipe_read_error_or_eof(loop, handle, err, uv_null_buf_);
uv__pipe_read_error_or_eof(loop, handle, err, uv_null_buf_);
} else {
/* The zero-read completed without error, indicating there is data
@ -2015,7 +1990,7 @@ void uv_process_pipe_read_req(uv_loop_t* loop,
/* Get the number of bytes available. */
avail = 0;
if (!PeekNamedPipe(handle->handle, NULL, 0, NULL, &avail, NULL))
uv_pipe_read_error_or_eof(loop, handle, GetLastError(), uv_null_buf_);
uv__pipe_read_error_or_eof(loop, handle, GetLastError(), uv_null_buf_);
/* Read until we've either read all the bytes available, or the 'reading'
* flag is cleared. */
@ -2044,12 +2019,12 @@ void uv_process_pipe_read_req(uv_loop_t* loop,
/* Start another zero-read request if necessary. */
if ((handle->flags & UV_HANDLE_READING) &&
!(handle->flags & UV_HANDLE_READ_PENDING)) {
uv_pipe_queue_read(loop, handle);
uv__pipe_queue_read(loop, handle);
}
}
void uv_process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
void uv__process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_write_t* req) {
int err;
@ -2091,26 +2066,25 @@ void uv_process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE &&
handle->pipe.conn.non_overlapped_writes_tail) {
assert(handle->stream.conn.write_reqs_pending > 0);
uv_queue_non_overlapped_write(handle);
uv__queue_non_overlapped_write(handle);
}
if (handle->stream.conn.shutdown_req != NULL &&
handle->stream.conn.write_reqs_pending == 0) {
uv_want_endgame(loop, (uv_handle_t*)handle);
}
if (handle->stream.conn.write_reqs_pending == 0)
if (handle->flags & UV_HANDLE_SHUTTING)
uv__pipe_shutdown(loop, handle, handle->stream.conn.shutdown_req);
DECREASE_PENDING_REQ_COUNT(handle);
}
void uv_process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
void uv__process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_req_t* raw_req) {
uv_pipe_accept_t* req = (uv_pipe_accept_t*) raw_req;
assert(handle->type == UV_NAMED_PIPE);
if (handle->flags & UV_HANDLE_CLOSING) {
/* The req->pipeHandle should be freed already in uv_pipe_cleanup(). */
/* The req->pipeHandle should be freed already in uv__pipe_close(). */
assert(req->pipeHandle == INVALID_HANDLE_VALUE);
DECREASE_PENDING_REQ_COUNT(handle);
return;
@ -2130,7 +2104,7 @@ void uv_process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
req->pipeHandle = INVALID_HANDLE_VALUE;
}
if (!(handle->flags & UV_HANDLE_CLOSING)) {
uv_pipe_queue_accept(loop, handle, req, FALSE);
uv__pipe_queue_accept(loop, handle, req, FALSE);
}
}
@ -2138,40 +2112,59 @@ void uv_process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
}
void uv_process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
void uv__process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_connect_t* req) {
HANDLE pipeHandle;
DWORD duplex_flags;
int err;
assert(handle->type == UV_NAMED_PIPE);
UNREGISTER_HANDLE_REQ(loop, handle, req);
if (req->cb) {
err = 0;
if (REQ_SUCCESS(req)) {
uv_pipe_connection_init(handle);
pipeHandle = req->u.connect.pipeHandle;
duplex_flags = req->u.connect.duplex_flags;
err = uv__set_pipe_handle(loop, handle, pipeHandle, -1, duplex_flags);
if (err)
CloseHandle(pipeHandle);
} else {
err = GET_REQ_ERROR(req);
}
req->cb(req, uv_translate_sys_error(err));
err = uv_translate_sys_error(GET_REQ_ERROR(req));
}
if (req->cb)
req->cb(req, err);
DECREASE_PENDING_REQ_COUNT(handle);
}
void uv_process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
void uv__process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_shutdown_t* req) {
int err;
assert(handle->type == UV_NAMED_PIPE);
/* Clear the shutdown_req field so we don't go here again. */
handle->stream.conn.shutdown_req = NULL;
handle->flags &= ~UV_HANDLE_SHUTTING;
UNREGISTER_HANDLE_REQ(loop, handle, req);
if (handle->flags & UV_HANDLE_CLOSING) {
/* Already closing. Cancel the shutdown. */
err = UV_ECANCELED;
} else if (!REQ_SUCCESS(req)) {
/* An error occurred in trying to shutdown gracefully. */
err = uv_translate_sys_error(GET_REQ_ERROR(req));
} else {
if (handle->flags & UV_HANDLE_READABLE) {
/* Initialize and optionally start the eof timer. Only do this if the pipe
* is readable and we haven't seen EOF come in ourselves. */
eof_timer_init(handle);
/* If reading start the timer right now. Otherwise uv_pipe_queue_read will
/* If reading start the timer right now. Otherwise uv__pipe_queue_read will
* start it. */
if (handle->flags & UV_HANDLE_READ_PENDING) {
eof_timer_start(handle);
@ -2182,11 +2175,12 @@ void uv_process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
* know that we're done writing. */
close_pipe(handle);
}
if (req->cb) {
req->cb(req, 0);
err = 0;
}
if (req->cb)
req->cb(req, err);
DECREASE_PENDING_REQ_COUNT(handle);
}
@ -2201,6 +2195,7 @@ static void eof_timer_init(uv_pipe_t* pipe) {
r = uv_timer_init(pipe->loop, pipe->pipe.conn.eof_timer);
assert(r == 0); /* timers can't fail */
(void) r;
pipe->pipe.conn.eof_timer->data = pipe;
uv_unref((uv_handle_t*) pipe->pipe.conn.eof_timer);
}
@ -2231,9 +2226,9 @@ static void eof_timer_cb(uv_timer_t* timer) {
assert(pipe->type == UV_NAMED_PIPE);
/* This should always be true, since we start the timer only in
* uv_pipe_queue_read after successfully calling ReadFile, or in
* uv_process_pipe_shutdown_req if a read is pending, and we always
* immediately stop the timer in uv_process_pipe_read_req. */
* uv__pipe_queue_read after successfully calling ReadFile, or in
* uv__process_pipe_shutdown_req if a read is pending, and we always
* immediately stop the timer in uv__process_pipe_read_req. */
assert(pipe->flags & UV_HANDLE_READ_PENDING);
/* If there are many packets coming off the iocp then the timer callback may
@ -2254,7 +2249,7 @@ static void eof_timer_cb(uv_timer_t* timer) {
/* Report the eof and update flags. This will get reported even if the user
* stopped reading in the meantime. TODO: is that okay? */
uv_pipe_read_eof(loop, pipe, uv_null_buf_);
uv__pipe_read_eof(loop, pipe, uv_null_buf_);
}
@ -2280,10 +2275,16 @@ int uv_pipe_open(uv_pipe_t* pipe, uv_file file) {
IO_STATUS_BLOCK io_status;
FILE_ACCESS_INFORMATION access;
DWORD duplex_flags = 0;
int err;
if (os_handle == INVALID_HANDLE_VALUE)
return UV_EBADF;
if (pipe->flags & UV_HANDLE_PIPESERVER)
return UV_EINVAL;
if (pipe->flags & UV_HANDLE_CONNECTION)
return UV_EBUSY;
uv__pipe_connection_init(pipe);
uv__once_init();
/* In order to avoid closing a stdio file descriptor 0-2, duplicate the
* underlying OS handle and forget about the original fd.
@ -2300,6 +2301,7 @@ int uv_pipe_open(uv_pipe_t* pipe, uv_file file) {
FALSE,
DUPLICATE_SAME_ACCESS))
return uv_translate_sys_error(GetLastError());
assert(os_handle != INVALID_HANDLE_VALUE);
file = -1;
}
@ -2327,17 +2329,17 @@ int uv_pipe_open(uv_pipe_t* pipe, uv_file file) {
if (access.AccessFlags & FILE_READ_DATA)
duplex_flags |= UV_HANDLE_READABLE;
if (os_handle == INVALID_HANDLE_VALUE ||
uv_set_pipe_handle(pipe->loop,
err = uv__set_pipe_handle(pipe->loop,
pipe,
os_handle,
file,
duplex_flags) == -1) {
return UV_EINVAL;
duplex_flags);
if (err) {
if (file == -1)
CloseHandle(os_handle);
return err;
}
uv_pipe_connection_init(pipe);
if (pipe->ipc) {
assert(!(pipe->flags & UV_HANDLE_NON_OVERLAPPED_PIPE));
pipe->pipe.conn.ipc_remote_pid = uv_os_getppid();
@ -2361,6 +2363,51 @@ static int uv__pipe_getname(const uv_pipe_t* handle, char* buffer, size_t* size)
uv__once_init();
name_info = NULL;
if (handle->name != NULL) {
/* The user might try to query the name before we are connected,
* and this is just easier to return the cached value if we have it. */
name_buf = handle->name;
name_len = wcslen(name_buf);
/* check how much space we need */
addrlen = WideCharToMultiByte(CP_UTF8,
0,
name_buf,
name_len,
NULL,
0,
NULL,
NULL);
if (!addrlen) {
*size = 0;
err = uv_translate_sys_error(GetLastError());
return err;
} else if (addrlen >= *size) {
*size = addrlen + 1;
err = UV_ENOBUFS;
goto error;
}
addrlen = WideCharToMultiByte(CP_UTF8,
0,
name_buf,
name_len,
buffer,
addrlen,
NULL,
NULL);
if (!addrlen) {
*size = 0;
err = uv_translate_sys_error(GetLastError());
return err;
}
*size = addrlen;
buffer[addrlen] = '\0';
return 0;
}
if (handle->handle == INVALID_HANDLE_VALUE) {
*size = 0;
return UV_EINVAL;
@ -2498,6 +2545,11 @@ int uv_pipe_getpeername(const uv_pipe_t* handle, char* buffer, size_t* size) {
if (handle->handle != INVALID_HANDLE_VALUE)
return uv__pipe_getname(handle, buffer, size);
if (handle->flags & UV_HANDLE_CONNECTION) {
if (handle->name != NULL)
return uv__pipe_getname(handle, buffer, size);
}
return UV_EBADF;
}

View file

@ -122,14 +122,14 @@ static void uv__fast_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
memset(&req->u.io.overlapped, 0, sizeof req->u.io.overlapped);
result = uv_msafd_poll((SOCKET) handle->peer_socket,
result = uv__msafd_poll((SOCKET) handle->peer_socket,
afd_poll_info,
afd_poll_info,
&req->u.io.overlapped);
if (result != 0 && WSAGetLastError() != WSA_IO_PENDING) {
/* Queue this req, reporting an error. */
SET_REQ_ERROR(req, WSAGetLastError());
uv_insert_pending_req(loop, req);
uv__insert_pending_req(loop, req);
}
}
@ -195,7 +195,7 @@ static void uv__fast_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
} else if ((handle->flags & UV_HANDLE_CLOSING) &&
handle->submitted_events_1 == 0 &&
handle->submitted_events_2 == 0) {
uv_want_endgame(loop, (uv_handle_t*) handle);
uv__want_endgame(loop, (uv_handle_t*) handle);
}
}
@ -357,7 +357,7 @@ static void uv__slow_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
WT_EXECUTELONGFUNCTION)) {
/* Make this req pending, reporting an error. */
SET_REQ_ERROR(req, GetLastError());
uv_insert_pending_req(loop, req);
uv__insert_pending_req(loop, req);
}
}
@ -400,7 +400,7 @@ static void uv__slow_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
} else if ((handle->flags & UV_HANDLE_CLOSING) &&
handle->submitted_events_1 == 0 &&
handle->submitted_events_2 == 0) {
uv_want_endgame(loop, (uv_handle_t*) handle);
uv__want_endgame(loop, (uv_handle_t*) handle);
}
}
@ -524,7 +524,7 @@ int uv_poll_stop(uv_poll_t* handle) {
}
void uv_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, uv_req_t* req) {
void uv__process_poll_req(uv_loop_t* loop, uv_poll_t* handle, uv_req_t* req) {
if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
uv__fast_poll_process_poll_req(loop, handle, req);
} else {
@ -533,7 +533,7 @@ void uv_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, uv_req_t* req) {
}
int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
int uv__poll_close(uv_loop_t* loop, uv_poll_t* handle) {
AFD_POLL_INFO afd_poll_info;
DWORD error;
int result;
@ -543,7 +543,7 @@ int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
if (handle->submitted_events_1 == 0 &&
handle->submitted_events_2 == 0) {
uv_want_endgame(loop, (uv_handle_t*) handle);
uv__want_endgame(loop, (uv_handle_t*) handle);
return 0;
}
@ -559,7 +559,7 @@ int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
afd_poll_info.Handles[0].Status = 0;
afd_poll_info.Handles[0].Events = AFD_POLL_ALL;
result = uv_msafd_poll(handle->socket,
result = uv__msafd_poll(handle->socket,
&afd_poll_info,
uv__get_afd_poll_info_dummy(),
uv__get_overlapped_dummy());
@ -574,7 +574,7 @@ int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
}
void uv_poll_endgame(uv_loop_t* loop, uv_poll_t* handle) {
void uv__poll_endgame(uv_loop_t* loop, uv_poll_t* handle) {
assert(handle->flags & UV_HANDLE_CLOSING);
assert(!(handle->flags & UV_HANDLE_CLOSED));

View file

@ -105,7 +105,7 @@ static void uv__init_global_job_handle(void) {
}
static int uv_utf8_to_utf16_alloc(const char* s, WCHAR** ws_ptr) {
static int uv__utf8_to_utf16_alloc(const char* s, WCHAR** ws_ptr) {
int ws_len, r;
WCHAR* ws;
@ -137,7 +137,7 @@ static int uv_utf8_to_utf16_alloc(const char* s, WCHAR** ws_ptr) {
}
static void uv_process_init(uv_loop_t* loop, uv_process_t* handle) {
static void uv__process_init(uv_loop_t* loop, uv_process_t* handle) {
uv__handle_init(loop, (uv_handle_t*) handle, UV_PROCESS);
handle->exit_cb = NULL;
handle->pid = 0;
@ -864,7 +864,7 @@ static void CALLBACK exit_wait_callback(void* data, BOOLEAN didTimeout) {
/* Called on main thread after a child process has exited. */
void uv_process_proc_exit(uv_loop_t* loop, uv_process_t* handle) {
void uv__process_proc_exit(uv_loop_t* loop, uv_process_t* handle) {
int64_t exit_code;
DWORD status;
@ -874,7 +874,7 @@ void uv_process_proc_exit(uv_loop_t* loop, uv_process_t* handle) {
/* If we're closing, don't call the exit callback. Just schedule a close
* callback now. */
if (handle->flags & UV_HANDLE_CLOSING) {
uv_want_endgame(loop, (uv_handle_t*) handle);
uv__want_endgame(loop, (uv_handle_t*) handle);
return;
}
@ -902,7 +902,7 @@ void uv_process_proc_exit(uv_loop_t* loop, uv_process_t* handle) {
}
void uv_process_close(uv_loop_t* loop, uv_process_t* handle) {
void uv__process_close(uv_loop_t* loop, uv_process_t* handle) {
uv__handle_closing(handle);
if (handle->wait_handle != INVALID_HANDLE_VALUE) {
@ -918,12 +918,12 @@ void uv_process_close(uv_loop_t* loop, uv_process_t* handle) {
}
if (!handle->exit_cb_pending) {
uv_want_endgame(loop, (uv_handle_t*)handle);
uv__want_endgame(loop, (uv_handle_t*)handle);
}
}
void uv_process_endgame(uv_loop_t* loop, uv_process_t* handle) {
void uv__process_endgame(uv_loop_t* loop, uv_process_t* handle) {
assert(!handle->exit_cb_pending);
assert(handle->flags & UV_HANDLE_CLOSING);
assert(!(handle->flags & UV_HANDLE_CLOSED));
@ -948,7 +948,7 @@ int uv_spawn(uv_loop_t* loop,
PROCESS_INFORMATION info;
DWORD process_flags;
uv_process_init(loop, process);
uv__process_init(loop, process);
process->exit_cb = options->exit_cb;
if (options->flags & (UV_PROCESS_SETGID | UV_PROCESS_SETUID)) {
@ -969,7 +969,7 @@ int uv_spawn(uv_loop_t* loop,
UV_PROCESS_WINDOWS_HIDE_GUI |
UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS)));
err = uv_utf8_to_utf16_alloc(options->file, &application);
err = uv__utf8_to_utf16_alloc(options->file, &application);
if (err)
goto done;
@ -988,7 +988,7 @@ int uv_spawn(uv_loop_t* loop,
if (options->cwd) {
/* Explicit cwd */
err = uv_utf8_to_utf16_alloc(options->cwd, &cwd);
err = uv__utf8_to_utf16_alloc(options->cwd, &cwd);
if (err)
goto done;

View file

@ -50,7 +50,7 @@
(pRtlNtStatusToDosError(GET_REQ_STATUS((req))))
#define GET_REQ_SOCK_ERROR(req) \
(uv_ntstatus_to_winsock_error(GET_REQ_STATUS((req))))
(uv__ntstatus_to_winsock_error(GET_REQ_STATUS((req))))
#define REGISTER_HANDLE_REQ(loop, handle, req) \
@ -82,12 +82,12 @@
}
INLINE static uv_req_t* uv_overlapped_to_req(OVERLAPPED* overlapped) {
INLINE static uv_req_t* uv__overlapped_to_req(OVERLAPPED* overlapped) {
return CONTAINING_RECORD(overlapped, uv_req_t, u.io.overlapped);
}
INLINE static void uv_insert_pending_req(uv_loop_t* loop, uv_req_t* req) {
INLINE static void uv__insert_pending_req(uv_loop_t* loop, uv_req_t* req) {
req->next_req = NULL;
if (loop->pending_reqs_tail) {
#ifdef _DEBUG
@ -115,19 +115,19 @@ INLINE static void uv_insert_pending_req(uv_loop_t* loop, uv_req_t* req) {
do { \
switch (((uv_handle_t*) (req)->handle_at)->type) { \
case UV_TCP: \
uv_process_tcp_##method##_req(loop, \
uv__process_tcp_##method##_req(loop, \
(uv_tcp_t*) ((req)->handle_at), \
req); \
break; \
\
case UV_NAMED_PIPE: \
uv_process_pipe_##method##_req(loop, \
uv__process_pipe_##method##_req(loop, \
(uv_pipe_t*) ((req)->handle_at), \
req); \
break; \
\
case UV_TTY: \
uv_process_tty_##method##_req(loop, \
uv__process_tty_##method##_req(loop, \
(uv_tty_t*) ((req)->handle_at), \
req); \
break; \
@ -138,13 +138,13 @@ INLINE static void uv_insert_pending_req(uv_loop_t* loop, uv_req_t* req) {
} while (0)
INLINE static int uv_process_reqs(uv_loop_t* loop) {
INLINE static void uv__process_reqs(uv_loop_t* loop) {
uv_req_t* req;
uv_req_t* first;
uv_req_t* next;
if (loop->pending_reqs_tail == NULL)
return 0;
return;
first = loop->pending_reqs_tail->next_req;
next = first;
@ -172,50 +172,43 @@ INLINE static int uv_process_reqs(uv_loop_t* loop) {
break;
case UV_SHUTDOWN:
/* Tcp shutdown requests don't come here. */
assert(((uv_shutdown_t*) req)->handle->type == UV_NAMED_PIPE);
uv_process_pipe_shutdown_req(
loop,
(uv_pipe_t*) ((uv_shutdown_t*) req)->handle,
(uv_shutdown_t*) req);
DELEGATE_STREAM_REQ(loop, (uv_shutdown_t*) req, shutdown, handle);
break;
case UV_UDP_RECV:
uv_process_udp_recv_req(loop, (uv_udp_t*) req->data, req);
uv__process_udp_recv_req(loop, (uv_udp_t*) req->data, req);
break;
case UV_UDP_SEND:
uv_process_udp_send_req(loop,
uv__process_udp_send_req(loop,
((uv_udp_send_t*) req)->handle,
(uv_udp_send_t*) req);
break;
case UV_WAKEUP:
uv_process_async_wakeup_req(loop, (uv_async_t*) req->data, req);
uv__process_async_wakeup_req(loop, (uv_async_t*) req->data, req);
break;
case UV_SIGNAL_REQ:
uv_process_signal_req(loop, (uv_signal_t*) req->data, req);
uv__process_signal_req(loop, (uv_signal_t*) req->data, req);
break;
case UV_POLL_REQ:
uv_process_poll_req(loop, (uv_poll_t*) req->data, req);
uv__process_poll_req(loop, (uv_poll_t*) req->data, req);
break;
case UV_PROCESS_EXIT:
uv_process_proc_exit(loop, (uv_process_t*) req->data);
uv__process_proc_exit(loop, (uv_process_t*) req->data);
break;
case UV_FS_EVENT_REQ:
uv_process_fs_event_req(loop, req, (uv_fs_event_t*) req->data);
uv__process_fs_event_req(loop, req, (uv_fs_event_t*) req->data);
break;
default:
assert(0);
}
}
return 1;
}
#endif /* UV_WIN_REQ_INL_H_ */

View file

@ -39,7 +39,7 @@ int uv__signal_start(uv_signal_t* handle,
int signum,
int oneshot);
void uv_signals_init(void) {
void uv__signals_init(void) {
InitializeCriticalSection(&uv__signal_lock);
if (!SetConsoleCtrlHandler(uv__signal_control_handler, TRUE))
abort();
@ -231,7 +231,7 @@ int uv__signal_start(uv_signal_t* handle,
}
void uv_process_signal_req(uv_loop_t* loop, uv_signal_t* handle,
void uv__process_signal_req(uv_loop_t* loop, uv_signal_t* handle,
uv_req_t* req) {
long dispatched_signum;
@ -254,22 +254,22 @@ void uv_process_signal_req(uv_loop_t* loop, uv_signal_t* handle,
if (handle->flags & UV_HANDLE_CLOSING) {
/* When it is closing, it must be stopped at this point. */
assert(handle->signum == 0);
uv_want_endgame(loop, (uv_handle_t*) handle);
uv__want_endgame(loop, (uv_handle_t*) handle);
}
}
void uv_signal_close(uv_loop_t* loop, uv_signal_t* handle) {
void uv__signal_close(uv_loop_t* loop, uv_signal_t* handle) {
uv_signal_stop(handle);
uv__handle_closing(handle);
if (handle->pending_signum == 0) {
uv_want_endgame(loop, (uv_handle_t*) handle);
uv__want_endgame(loop, (uv_handle_t*) handle);
}
}
void uv_signal_endgame(uv_loop_t* loop, uv_signal_t* handle) {
void uv__signal_endgame(uv_loop_t* loop, uv_signal_t* handle) {
assert(handle->flags & UV_HANDLE_CLOSING);
assert(!(handle->flags & UV_HANDLE_CLOSED));

View file

@ -30,7 +30,7 @@
#include "req-inl.h"
INLINE static void uv_stream_init(uv_loop_t* loop,
INLINE static void uv__stream_init(uv_loop_t* loop,
uv_stream_t* handle,
uv_handle_type type) {
uv__handle_init(loop, (uv_handle_t*) handle, type);
@ -46,7 +46,7 @@ INLINE static void uv_stream_init(uv_loop_t* loop,
}
INLINE static void uv_connection_init(uv_stream_t* handle) {
INLINE static void uv__connection_init(uv_stream_t* handle) {
handle->flags |= UV_HANDLE_CONNECTION;
}

View file

@ -29,14 +29,16 @@
int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) {
int err;
if (uv__is_closing(stream)) {
return UV_EINVAL;
}
err = ERROR_INVALID_PARAMETER;
switch (stream->type) {
case UV_TCP:
err = uv_tcp_listen((uv_tcp_t*)stream, backlog, cb);
err = uv__tcp_listen((uv_tcp_t*)stream, backlog, cb);
break;
case UV_NAMED_PIPE:
err = uv_pipe_listen((uv_pipe_t*)stream, backlog, cb);
err = uv__pipe_listen((uv_pipe_t*)stream, backlog, cb);
break;
default:
assert(0);
@ -52,10 +54,10 @@ int uv_accept(uv_stream_t* server, uv_stream_t* client) {
err = ERROR_INVALID_PARAMETER;
switch (server->type) {
case UV_TCP:
err = uv_tcp_accept((uv_tcp_t*)server, (uv_tcp_t*)client);
err = uv__tcp_accept((uv_tcp_t*)server, (uv_tcp_t*)client);
break;
case UV_NAMED_PIPE:
err = uv_pipe_accept((uv_pipe_t*)server, client);
err = uv__pipe_accept((uv_pipe_t*)server, client);
break;
default:
assert(0);
@ -73,13 +75,13 @@ int uv__read_start(uv_stream_t* handle,
err = ERROR_INVALID_PARAMETER;
switch (handle->type) {
case UV_TCP:
err = uv_tcp_read_start((uv_tcp_t*)handle, alloc_cb, read_cb);
err = uv__tcp_read_start((uv_tcp_t*)handle, alloc_cb, read_cb);
break;
case UV_NAMED_PIPE:
err = uv_pipe_read_start((uv_pipe_t*)handle, alloc_cb, read_cb);
err = uv__pipe_read_start((uv_pipe_t*)handle, alloc_cb, read_cb);
break;
case UV_TTY:
err = uv_tty_read_start((uv_tty_t*) handle, alloc_cb, read_cb);
err = uv__tty_read_start((uv_tty_t*) handle, alloc_cb, read_cb);
break;
default:
assert(0);
@ -97,7 +99,7 @@ int uv_read_stop(uv_stream_t* handle) {
err = 0;
if (handle->type == UV_TTY) {
err = uv_tty_read_stop((uv_tty_t*) handle);
err = uv__tty_read_stop((uv_tty_t*) handle);
} else if (handle->type == UV_NAMED_PIPE) {
uv__pipe_read_stop((uv_pipe_t*) handle);
} else {
@ -124,14 +126,14 @@ int uv_write(uv_write_t* req,
err = ERROR_INVALID_PARAMETER;
switch (handle->type) {
case UV_TCP:
err = uv_tcp_write(loop, req, (uv_tcp_t*) handle, bufs, nbufs, cb);
err = uv__tcp_write(loop, req, (uv_tcp_t*) handle, bufs, nbufs, cb);
break;
case UV_NAMED_PIPE:
err = uv__pipe_write(
loop, req, (uv_pipe_t*) handle, bufs, nbufs, NULL, cb);
break;
case UV_TTY:
err = uv_tty_write(loop, req, (uv_tty_t*) handle, bufs, nbufs, cb);
err = uv__tty_write(loop, req, (uv_tty_t*) handle, bufs, nbufs, cb);
break;
default:
assert(0);
@ -217,7 +219,12 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) {
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
uv_want_endgame(loop, (uv_handle_t*)handle);
if (handle->stream.conn.write_reqs_pending == 0) {
if (handle->type == UV_NAMED_PIPE)
uv__pipe_shutdown(loop, (uv_pipe_t*) handle, req);
else
uv__insert_pending_req(loop, (uv_req_t*) req);
}
return 0;
}

183
deps/uv/src/win/tcp.c vendored
View file

@ -78,7 +78,7 @@ static int uv__tcp_keepalive(uv_tcp_t* handle, SOCKET socket, int enable, unsign
}
static int uv_tcp_set_socket(uv_loop_t* loop,
static int uv__tcp_set_socket(uv_loop_t* loop,
uv_tcp_t* handle,
SOCKET socket,
int family,
@ -162,7 +162,7 @@ int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* handle, unsigned int flags) {
if (flags & ~0xFF)
return UV_EINVAL;
uv_stream_init(loop, (uv_stream_t*) handle, UV_TCP);
uv__stream_init(loop, (uv_stream_t*) handle, UV_TCP);
handle->tcp.serv.accept_reqs = NULL;
handle->tcp.serv.pending_accepts = NULL;
handle->socket = INVALID_SOCKET;
@ -173,7 +173,7 @@ int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* handle, unsigned int flags) {
handle->delayed_error = 0;
/* If anything fails beyond this point we need to remove the handle from
* the handle queue, since it was added by uv__handle_init in uv_stream_init.
* the handle queue, since it was added by uv__handle_init in uv__stream_init.
*/
if (domain != AF_UNSPEC) {
@ -187,7 +187,7 @@ int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* handle, unsigned int flags) {
return uv_translate_sys_error(err);
}
err = uv_tcp_set_socket(handle->loop, handle, sock, domain, 0);
err = uv__tcp_set_socket(handle->loop, handle, sock, domain, 0);
if (err) {
closesocket(sock);
QUEUE_REMOVE(&handle->handle_queue);
@ -205,36 +205,40 @@ int uv_tcp_init(uv_loop_t* loop, uv_tcp_t* handle) {
}
void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
void uv__process_tcp_shutdown_req(uv_loop_t* loop, uv_tcp_t* stream, uv_shutdown_t *req) {
int err;
assert(req);
assert(stream->stream.conn.write_reqs_pending == 0);
assert(!(stream->flags & UV_HANDLE_SHUT));
assert(stream->flags & UV_HANDLE_CONNECTION);
stream->stream.conn.shutdown_req = NULL;
stream->flags &= ~UV_HANDLE_SHUTTING;
UNREGISTER_HANDLE_REQ(loop, stream, req);
err = 0;
if (stream->flags & UV_HANDLE_CLOSING)
/* The user destroyed the stream before we got to do the shutdown. */
err = UV_ECANCELED;
else if (shutdown(stream->socket, SD_SEND) == SOCKET_ERROR)
err = uv_translate_sys_error(WSAGetLastError());
else /* Success. */
stream->flags |= UV_HANDLE_SHUT;
if (req->cb)
req->cb(req, err);
DECREASE_PENDING_REQ_COUNT(stream);
}
void uv__tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
unsigned int i;
uv_tcp_accept_t* req;
if (handle->flags & UV_HANDLE_CONNECTION &&
handle->stream.conn.shutdown_req != NULL &&
handle->stream.conn.write_reqs_pending == 0) {
UNREGISTER_HANDLE_REQ(loop, handle, handle->stream.conn.shutdown_req);
err = 0;
if (handle->flags & UV_HANDLE_CLOSING) {
err = ERROR_OPERATION_ABORTED;
} else if (shutdown(handle->socket, SD_SEND) == SOCKET_ERROR) {
err = WSAGetLastError();
}
if (handle->stream.conn.shutdown_req->cb) {
handle->stream.conn.shutdown_req->cb(handle->stream.conn.shutdown_req,
uv_translate_sys_error(err));
}
handle->stream.conn.shutdown_req = NULL;
DECREASE_PENDING_REQ_COUNT(handle);
return;
}
if (handle->flags & UV_HANDLE_CLOSING &&
handle->reqs_pending == 0) {
assert(handle->flags & UV_HANDLE_CLOSING);
assert(handle->reqs_pending == 0);
assert(!(handle->flags & UV_HANDLE_CLOSED));
assert(handle->socket == INVALID_SOCKET);
@ -271,7 +275,6 @@ void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
uv__handle_close(handle);
loop->active_tcp_streams--;
}
}
@ -286,7 +289,7 @@ void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
* See issue #1360.
*
*/
static int uv_tcp_try_bind(uv_tcp_t* handle,
static int uv__tcp_try_bind(uv_tcp_t* handle,
const struct sockaddr* addr,
unsigned int addrlen,
unsigned int flags) {
@ -305,7 +308,7 @@ static int uv_tcp_try_bind(uv_tcp_t* handle,
return WSAGetLastError();
}
err = uv_tcp_set_socket(handle->loop, handle, sock, addr->sa_family, 0);
err = uv__tcp_set_socket(handle->loop, handle, sock, addr->sa_family, 0);
if (err) {
closesocket(sock);
return err;
@ -385,7 +388,7 @@ static void CALLBACK post_write_completion(void* context, BOOLEAN timed_out) {
}
static void uv_tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
static void uv__tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
uv_loop_t* loop = handle->loop;
BOOL success;
DWORD bytes;
@ -406,7 +409,7 @@ static void uv_tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
accept_socket = socket(family, SOCK_STREAM, 0);
if (accept_socket == INVALID_SOCKET) {
SET_REQ_ERROR(req, WSAGetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
handle->reqs_pending++;
return;
}
@ -414,7 +417,7 @@ static void uv_tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
/* Make the socket non-inheritable */
if (!SetHandleInformation((HANDLE) accept_socket, HANDLE_FLAG_INHERIT, 0)) {
SET_REQ_ERROR(req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
handle->reqs_pending++;
closesocket(accept_socket);
return;
@ -440,7 +443,7 @@ static void uv_tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
/* Process the req without IOCP. */
req->accept_socket = accept_socket;
handle->reqs_pending++;
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
} else if (UV_SUCCEEDED_WITH_IOCP(success)) {
/* The req will be processed with IOCP. */
req->accept_socket = accept_socket;
@ -451,12 +454,12 @@ static void uv_tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
req->event_handle, post_completion, (void*) req,
INFINITE, WT_EXECUTEINWAITTHREAD)) {
SET_REQ_ERROR(req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
}
} else {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, WSAGetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
handle->reqs_pending++;
/* Destroy the preallocated client socket. */
closesocket(accept_socket);
@ -469,7 +472,7 @@ static void uv_tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
}
static void uv_tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) {
static void uv__tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) {
uv_read_t* req;
uv_buf_t buf;
int result;
@ -524,7 +527,7 @@ static void uv_tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) {
if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
/* Process the req without IOCP. */
req->u.io.overlapped.InternalHigh = bytes;
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
/* The req will be processed with IOCP. */
if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
@ -533,12 +536,12 @@ static void uv_tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) {
req->event_handle, post_completion, (void*) req,
INFINITE, WT_EXECUTEINWAITTHREAD)) {
SET_REQ_ERROR(req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
}
} else {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, WSAGetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
}
}
@ -558,7 +561,7 @@ int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) {
}
int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
int uv__tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
unsigned int i, simultaneous_accepts;
uv_tcp_accept_t* req;
int err;
@ -578,7 +581,7 @@ int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
}
if (!(handle->flags & UV_HANDLE_BOUND)) {
err = uv_tcp_try_bind(handle,
err = uv__tcp_try_bind(handle,
(const struct sockaddr*) &uv_addr_ip4_any_,
sizeof(uv_addr_ip4_any_),
0);
@ -589,7 +592,7 @@ int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
}
if (!handle->tcp.serv.func_acceptex) {
if (!uv_get_acceptex_function(handle->socket, &handle->tcp.serv.func_acceptex)) {
if (!uv__get_acceptex_function(handle->socket, &handle->tcp.serv.func_acceptex)) {
return WSAEAFNOSUPPORT;
}
}
@ -630,7 +633,7 @@ int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
req->event_handle = NULL;
}
uv_tcp_queue_accept(handle, req);
uv__tcp_queue_accept(handle, req);
}
/* Initialize other unused requests too, because uv_tcp_endgame doesn't
@ -650,7 +653,7 @@ int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
}
int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
int uv__tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
uv_loop_t* loop = server->loop;
int err = 0;
int family;
@ -672,7 +675,7 @@ int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
family = AF_INET;
}
err = uv_tcp_set_socket(client->loop,
err = uv__tcp_set_socket(client->loop,
client,
req->accept_socket,
family,
@ -680,7 +683,7 @@ int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
if (err) {
closesocket(req->accept_socket);
} else {
uv_connection_init((uv_stream_t*) client);
uv__connection_init((uv_stream_t*) client);
/* AcceptEx() implicitly binds the accepted socket. */
client->flags |= UV_HANDLE_BOUND | UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
}
@ -693,7 +696,7 @@ int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
if (!(server->flags & UV_HANDLE_CLOSING)) {
/* Check if we're in a middle of changing the number of pending accepts. */
if (!(server->flags & UV_HANDLE_TCP_ACCEPT_STATE_CHANGING)) {
uv_tcp_queue_accept(server, req);
uv__tcp_queue_accept(server, req);
} else {
/* We better be switching to a single pending accept. */
assert(server->flags & UV_HANDLE_TCP_SINGLE_ACCEPT);
@ -706,7 +709,7 @@ int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
* All previously queued accept requests are now processed.
* We now switch to queueing just a single accept.
*/
uv_tcp_queue_accept(server, &server->tcp.serv.accept_reqs[0]);
uv__tcp_queue_accept(server, &server->tcp.serv.accept_reqs[0]);
server->flags &= ~UV_HANDLE_TCP_ACCEPT_STATE_CHANGING;
server->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
}
@ -719,7 +722,7 @@ int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
}
int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
int uv__tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
uv_read_cb read_cb) {
uv_loop_t* loop = handle->loop;
@ -738,7 +741,7 @@ int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
uv_fatal_error(GetLastError(), "CreateEvent");
}
}
uv_tcp_queue_read(loop, handle);
uv__tcp_queue_read(loop, handle);
}
return 0;
@ -779,7 +782,7 @@ static int uv__is_fast_loopback_fail_supported(void) {
return os_info.dwBuildNumber >= 16299;
}
static int uv_tcp_try_connect(uv_connect_t* req,
static int uv__tcp_try_connect(uv_connect_t* req,
uv_tcp_t* handle,
const struct sockaddr* addr,
unsigned int addrlen,
@ -807,7 +810,7 @@ static int uv_tcp_try_connect(uv_connect_t* req,
} else {
abort();
}
err = uv_tcp_try_bind(handle, bind_addr, addrlen, 0);
err = uv__tcp_try_bind(handle, bind_addr, addrlen, 0);
if (err)
return err;
if (handle->delayed_error != 0)
@ -815,7 +818,7 @@ static int uv_tcp_try_connect(uv_connect_t* req,
}
if (!handle->tcp.conn.func_connectex) {
if (!uv_get_connectex_function(handle->socket, &handle->tcp.conn.func_connectex)) {
if (!uv__get_connectex_function(handle->socket, &handle->tcp.conn.func_connectex)) {
return WSAEAFNOSUPPORT;
}
}
@ -850,7 +853,7 @@ out:
/* Process the req without IOCP. */
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
return 0;
}
@ -866,7 +869,7 @@ out:
/* Process the req without IOCP. */
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
} else if (UV_SUCCEEDED_WITH_IOCP(success)) {
/* The req will be processed with IOCP. */
handle->reqs_pending++;
@ -903,7 +906,7 @@ int uv_tcp_getpeername(const uv_tcp_t* handle,
}
int uv_tcp_write(uv_loop_t* loop,
int uv__tcp_write(uv_loop_t* loop,
uv_write_t* req,
uv_tcp_t* handle,
const uv_buf_t bufs[],
@ -941,7 +944,7 @@ int uv_tcp_write(uv_loop_t* loop,
handle->reqs_pending++;
handle->stream.conn.write_reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
uv_insert_pending_req(loop, (uv_req_t*) req);
uv__insert_pending_req(loop, (uv_req_t*) req);
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
/* Request queued by the kernel. */
req->u.io.queued_bytes = uv__count_bufs(bufs, nbufs);
@ -954,7 +957,7 @@ int uv_tcp_write(uv_loop_t* loop,
req->event_handle, post_write_completion, (void*) req,
INFINITE, WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE)) {
SET_REQ_ERROR(req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
}
} else {
/* Send failed due to an error, report it later */
@ -963,7 +966,7 @@ int uv_tcp_write(uv_loop_t* loop,
handle->stream.conn.write_reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
SET_REQ_ERROR(req, WSAGetLastError());
uv_insert_pending_req(loop, (uv_req_t*) req);
uv__insert_pending_req(loop, (uv_req_t*) req);
}
return 0;
@ -994,7 +997,7 @@ int uv__tcp_try_write(uv_tcp_t* handle,
}
void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
void uv__process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_req_t* req) {
DWORD bytes, flags, err;
uv_buf_t buf;
@ -1115,7 +1118,7 @@ done:
/* Post another read if still reading and not closing. */
if ((handle->flags & UV_HANDLE_READING) &&
!(handle->flags & UV_HANDLE_READ_PENDING)) {
uv_tcp_queue_read(loop, handle);
uv__tcp_queue_read(loop, handle);
}
}
@ -1123,7 +1126,7 @@ done:
}
void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
void uv__process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_write_t* req) {
int err;
@ -1160,16 +1163,17 @@ void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
closesocket(handle->socket);
handle->socket = INVALID_SOCKET;
}
if (handle->stream.conn.shutdown_req != NULL) {
uv_want_endgame(loop, (uv_handle_t*)handle);
}
if (handle->flags & UV_HANDLE_SHUTTING)
uv__process_tcp_shutdown_req(loop,
handle,
handle->stream.conn.shutdown_req);
}
DECREASE_PENDING_REQ_COUNT(handle);
}
void uv_process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
void uv__process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_req_t* raw_req) {
uv_tcp_accept_t* req = (uv_tcp_accept_t*) raw_req;
int err;
@ -1209,7 +1213,7 @@ void uv_process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
closesocket(req->accept_socket);
req->accept_socket = INVALID_SOCKET;
if (handle->flags & UV_HANDLE_LISTENING) {
uv_tcp_queue_accept(handle, req);
uv__tcp_queue_accept(handle, req);
}
}
@ -1217,7 +1221,7 @@ void uv_process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
}
void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
void uv__process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_connect_t* req) {
int err;
@ -1242,7 +1246,7 @@ void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
SO_UPDATE_CONNECT_CONTEXT,
NULL,
0) == 0) {
uv_connection_init((uv_stream_t*)handle);
uv__connection_init((uv_stream_t*)handle);
handle->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
loop->active_tcp_streams++;
} else {
@ -1312,7 +1316,7 @@ int uv__tcp_xfer_import(uv_tcp_t* tcp,
return WSAGetLastError();
}
err = uv_tcp_set_socket(
err = uv__tcp_set_socket(
tcp->loop, tcp, socket, xfer_info->socket_info.iAddressFamily, 1);
if (err) {
closesocket(socket);
@ -1323,7 +1327,7 @@ int uv__tcp_xfer_import(uv_tcp_t* tcp,
tcp->flags |= UV_HANDLE_BOUND | UV_HANDLE_SHARED_TCP_SOCKET;
if (xfer_type == UV__IPC_SOCKET_XFER_TCP_CONNECTION) {
uv_connection_init((uv_stream_t*)tcp);
uv__connection_init((uv_stream_t*)tcp);
tcp->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
}
@ -1404,14 +1408,14 @@ int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
}
static void uv_tcp_try_cancel_reqs(uv_tcp_t* tcp) {
static void uv__tcp_try_cancel_reqs(uv_tcp_t* tcp) {
SOCKET socket;
int non_ifs_lsp;
int reading;
int writing;
socket = tcp->socket;
reading = tcp->flags & UV_HANDLE_READING;
reading = tcp->flags & UV_HANDLE_READ_PENDING;
writing = tcp->stream.conn.write_reqs_pending > 0;
if (!reading && !writing)
return;
@ -1456,12 +1460,12 @@ static void uv_tcp_try_cancel_reqs(uv_tcp_t* tcp) {
}
void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) {
void uv__tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) {
if (tcp->flags & UV_HANDLE_CONNECTION) {
uv_tcp_try_cancel_reqs(tcp);
if (tcp->flags & UV_HANDLE_READING) {
uv_read_stop((uv_stream_t*) tcp);
}
uv__tcp_try_cancel_reqs(tcp);
} else {
if (tcp->tcp.serv.accept_reqs != NULL) {
/* First close the incoming sockets to cancel the accept operations before
@ -1483,6 +1487,9 @@ void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) {
DECREASE_ACTIVE_COUNT(loop, tcp);
}
tcp->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
uv__handle_closing(tcp);
/* If any overlapped req failed to cancel, calling `closesocket` now would
* cause Win32 to send an RST packet. Try to avoid that for writes, if
* possibly applicable, by waiting to process the completion notifications
@ -1494,12 +1501,8 @@ void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) {
tcp->socket = INVALID_SOCKET;
}
tcp->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
uv__handle_closing(tcp);
if (tcp->reqs_pending == 0) {
uv_want_endgame(tcp->loop, (uv_handle_t*)tcp);
}
if (tcp->reqs_pending == 0)
uv__want_endgame(loop, (uv_handle_t*) tcp);
}
@ -1520,7 +1523,7 @@ int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) {
return uv_translate_sys_error(GetLastError());
}
err = uv_tcp_set_socket(handle->loop,
err = uv__tcp_set_socket(handle->loop,
handle,
sock,
protocol_info.iAddressFamily,
@ -1537,7 +1540,7 @@ int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) {
saddr_len = sizeof(saddr);
if (!uv_tcp_getpeername(handle, (struct sockaddr*) &saddr, &saddr_len)) {
/* Socket is already connected. */
uv_connection_init((uv_stream_t*) handle);
uv__connection_init((uv_stream_t*) handle);
handle->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
}
}
@ -1555,7 +1558,7 @@ int uv__tcp_bind(uv_tcp_t* handle,
unsigned int flags) {
int err;
err = uv_tcp_try_bind(handle, addr, addrlen, flags);
err = uv__tcp_try_bind(handle, addr, addrlen, flags);
if (err)
return uv_translate_sys_error(err);
@ -1573,7 +1576,7 @@ int uv__tcp_connect(uv_connect_t* req,
uv_connect_cb cb) {
int err;
err = uv_tcp_try_connect(req, handle, addr, addrlen, cb);
err = uv__tcp_try_connect(req, handle, addr, addrlen, cb);
if (err)
return uv_translate_sys_error(err);
@ -1634,7 +1637,7 @@ int uv_socketpair(int type, int protocol, uv_os_sock_t fds[2], int flags0, int f
goto wsaerror;
if (!SetHandleInformation((HANDLE) client1, HANDLE_FLAG_INHERIT, 0))
goto error;
if (!uv_get_acceptex_function(server, &func_acceptex)) {
if (!uv__get_acceptex_function(server, &func_acceptex)) {
err = WSAEAFNOSUPPORT;
goto cleanup;
}

View file

@ -182,8 +182,9 @@ int uv_thread_create_ex(uv_thread_t* tid,
uv_thread_t uv_thread_self(void) {
uv_thread_t key;
uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);
uv_thread_t key = uv_key_get(&uv__current_thread_key);
key = uv_key_get(&uv__current_thread_key);
if (key == NULL) {
/* If the thread wasn't started by uv_thread_create (such as the main
* thread), we assign an id to it now. */
@ -248,113 +249,60 @@ void uv_mutex_unlock(uv_mutex_t* mutex) {
LeaveCriticalSection(mutex);
}
/* Ensure that the ABI for this type remains stable in v1.x */
#ifdef _WIN64
STATIC_ASSERT(sizeof(uv_rwlock_t) == 80);
#else
STATIC_ASSERT(sizeof(uv_rwlock_t) == 48);
#endif
int uv_rwlock_init(uv_rwlock_t* rwlock) {
/* Initialize the semaphore that acts as the write lock. */
HANDLE handle = CreateSemaphoreW(NULL, 1, 1, NULL);
if (handle == NULL)
return uv_translate_sys_error(GetLastError());
rwlock->state_.write_semaphore_ = handle;
/* Initialize the critical section protecting the reader count. */
InitializeCriticalSection(&rwlock->state_.num_readers_lock_);
/* Initialize the reader count. */
rwlock->state_.num_readers_ = 0;
memset(rwlock, 0, sizeof(*rwlock));
InitializeSRWLock(&rwlock->read_write_lock_);
return 0;
}
void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
DeleteCriticalSection(&rwlock->state_.num_readers_lock_);
CloseHandle(rwlock->state_.write_semaphore_);
/* SRWLock does not need explicit destruction so long as there are no waiting threads
See: https://docs.microsoft.com/windows/win32/api/synchapi/nf-synchapi-initializesrwlock#remarks */
}
void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
/* Acquire the lock that protects the reader count. */
EnterCriticalSection(&rwlock->state_.num_readers_lock_);
/* Increase the reader count, and lock for write if this is the first
* reader.
*/
if (++rwlock->state_.num_readers_ == 1) {
DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, INFINITE);
if (r != WAIT_OBJECT_0)
uv_fatal_error(GetLastError(), "WaitForSingleObject");
}
/* Release the lock that protects the reader count. */
LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
AcquireSRWLockShared(&rwlock->read_write_lock_);
}
int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
int err;
if (!TryEnterCriticalSection(&rwlock->state_.num_readers_lock_))
if (!TryAcquireSRWLockShared(&rwlock->read_write_lock_))
return UV_EBUSY;
err = 0;
if (rwlock->state_.num_readers_ == 0) {
/* Currently there are no other readers, which means that the write lock
* needs to be acquired.
*/
DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, 0);
if (r == WAIT_OBJECT_0)
rwlock->state_.num_readers_++;
else if (r == WAIT_TIMEOUT)
err = UV_EBUSY;
else if (r == WAIT_FAILED)
uv_fatal_error(GetLastError(), "WaitForSingleObject");
} else {
/* The write lock has already been acquired because there are other
* active readers.
*/
rwlock->state_.num_readers_++;
}
LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
return err;
return 0;
}
void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
EnterCriticalSection(&rwlock->state_.num_readers_lock_);
if (--rwlock->state_.num_readers_ == 0) {
if (!ReleaseSemaphore(rwlock->state_.write_semaphore_, 1, NULL))
uv_fatal_error(GetLastError(), "ReleaseSemaphore");
}
LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
ReleaseSRWLockShared(&rwlock->read_write_lock_);
}
void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, INFINITE);
if (r != WAIT_OBJECT_0)
uv_fatal_error(GetLastError(), "WaitForSingleObject");
AcquireSRWLockExclusive(&rwlock->read_write_lock_);
}
int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, 0);
if (r == WAIT_OBJECT_0)
return 0;
else if (r == WAIT_TIMEOUT)
if (!TryAcquireSRWLockExclusive(&rwlock->read_write_lock_))
return UV_EBUSY;
else
uv_fatal_error(GetLastError(), "WaitForSingleObject");
return 0;
}
void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
if (!ReleaseSemaphore(rwlock->state_.write_semaphore_, 1, NULL))
uv_fatal_error(GetLastError(), "ReleaseSemaphore");
ReleaseSRWLockExclusive(&rwlock->read_write_lock_);
}

203
deps/uv/src/win/tty.c vendored
View file

@ -67,10 +67,10 @@
#define CURSOR_SIZE_SMALL 25
#define CURSOR_SIZE_LARGE 100
static void uv_tty_capture_initial_style(
static void uv__tty_capture_initial_style(
CONSOLE_SCREEN_BUFFER_INFO* screen_buffer_info,
CONSOLE_CURSOR_INFO* cursor_info);
static void uv_tty_update_virtual_window(CONSOLE_SCREEN_BUFFER_INFO* info);
static void uv__tty_update_virtual_window(CONSOLE_SCREEN_BUFFER_INFO* info);
static int uv__cancel_read_console(uv_tty_t* handle);
@ -163,7 +163,7 @@ static BOOL uv__need_check_vterm_state = TRUE;
static uv_tty_vtermstate_t uv__vterm_state = UV_TTY_UNSUPPORTED;
static void uv__determine_vterm_state(HANDLE handle);
void uv_console_init(void) {
void uv__console_init(void) {
if (uv_sem_init(&uv_tty_output_lock, 1))
abort();
uv__tty_console_handle = CreateFileW(L"CONOUT$",
@ -238,16 +238,16 @@ int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, uv_file fd, int unused) {
uv__determine_vterm_state(handle);
/* Remember the original console text attributes and cursor info. */
uv_tty_capture_initial_style(&screen_buffer_info, &cursor_info);
uv__tty_capture_initial_style(&screen_buffer_info, &cursor_info);
uv_tty_update_virtual_window(&screen_buffer_info);
uv__tty_update_virtual_window(&screen_buffer_info);
uv_sem_post(&uv_tty_output_lock);
}
uv_stream_init(loop, (uv_stream_t*) tty, UV_TTY);
uv_connection_init((uv_stream_t*) tty);
uv__stream_init(loop, (uv_stream_t*) tty, UV_TTY);
uv__connection_init((uv_stream_t*) tty);
tty->handle = handle;
tty->u.fd = fd;
@ -289,7 +289,7 @@ int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, uv_file fd, int unused) {
/* Set the default console text attributes based on how the console was
* configured when libuv started.
*/
static void uv_tty_capture_initial_style(
static void uv__tty_capture_initial_style(
CONSOLE_SCREEN_BUFFER_INFO* screen_buffer_info,
CONSOLE_CURSOR_INFO* cursor_info) {
static int style_captured = 0;
@ -380,7 +380,7 @@ int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
was_reading = 1;
alloc_cb = tty->alloc_cb;
read_cb = tty->read_cb;
err = uv_tty_read_stop(tty);
err = uv__tty_read_stop(tty);
if (err) {
return uv_translate_sys_error(err);
}
@ -404,7 +404,7 @@ int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
/* If we just stopped reading, restart. */
if (was_reading) {
err = uv_tty_read_start(tty, alloc_cb, read_cb);
err = uv__tty_read_start(tty, alloc_cb, read_cb);
if (err) {
return uv_translate_sys_error(err);
}
@ -422,7 +422,7 @@ int uv_tty_get_winsize(uv_tty_t* tty, int* width, int* height) {
}
uv_sem_wait(&uv_tty_output_lock);
uv_tty_update_virtual_window(&info);
uv__tty_update_virtual_window(&info);
uv_sem_post(&uv_tty_output_lock);
*width = uv_tty_virtual_width;
@ -452,7 +452,7 @@ static void CALLBACK uv_tty_post_raw_read(void* data, BOOLEAN didTimeout) {
}
static void uv_tty_queue_read_raw(uv_loop_t* loop, uv_tty_t* handle) {
static void uv__tty_queue_read_raw(uv_loop_t* loop, uv_tty_t* handle) {
uv_read_t* req;
BOOL r;
@ -475,7 +475,7 @@ static void uv_tty_queue_read_raw(uv_loop_t* loop, uv_tty_t* handle) {
if (!r) {
handle->tty.rd.read_raw_wait = NULL;
SET_REQ_ERROR(req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
}
handle->flags |= UV_HANDLE_READ_PENDING;
@ -579,7 +579,7 @@ static DWORD CALLBACK uv_tty_line_read_thread(void* data) {
}
static void uv_tty_queue_read_line(uv_loop_t* loop, uv_tty_t* handle) {
static void uv__tty_queue_read_line(uv_loop_t* loop, uv_tty_t* handle) {
uv_read_t* req;
BOOL r;
@ -611,7 +611,7 @@ static void uv_tty_queue_read_line(uv_loop_t* loop, uv_tty_t* handle) {
WT_EXECUTELONGFUNCTION);
if (!r) {
SET_REQ_ERROR(req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
}
handle->flags |= UV_HANDLE_READ_PENDING;
@ -619,11 +619,11 @@ static void uv_tty_queue_read_line(uv_loop_t* loop, uv_tty_t* handle) {
}
static void uv_tty_queue_read(uv_loop_t* loop, uv_tty_t* handle) {
static void uv__tty_queue_read(uv_loop_t* loop, uv_tty_t* handle) {
if (handle->flags & UV_HANDLE_TTY_RAW) {
uv_tty_queue_read_raw(loop, handle);
uv__tty_queue_read_raw(loop, handle);
} else {
uv_tty_queue_read_line(loop, handle);
uv__tty_queue_read_line(loop, handle);
}
}
@ -947,7 +947,7 @@ void uv_process_tty_read_raw_req(uv_loop_t* loop, uv_tty_t* handle,
/* Wait for more input events. */
if ((handle->flags & UV_HANDLE_READING) &&
!(handle->flags & UV_HANDLE_READ_PENDING)) {
uv_tty_queue_read(loop, handle);
uv__tty_queue_read(loop, handle);
}
DECREASE_PENDING_REQ_COUNT(handle);
@ -992,14 +992,14 @@ void uv_process_tty_read_line_req(uv_loop_t* loop, uv_tty_t* handle,
/* Wait for more input events. */
if ((handle->flags & UV_HANDLE_READING) &&
!(handle->flags & UV_HANDLE_READ_PENDING)) {
uv_tty_queue_read(loop, handle);
uv__tty_queue_read(loop, handle);
}
DECREASE_PENDING_REQ_COUNT(handle);
}
void uv_process_tty_read_req(uv_loop_t* loop, uv_tty_t* handle,
void uv__process_tty_read_req(uv_loop_t* loop, uv_tty_t* handle,
uv_req_t* req) {
assert(handle->type == UV_TTY);
assert(handle->flags & UV_HANDLE_TTY_READABLE);
@ -1015,7 +1015,7 @@ void uv_process_tty_read_req(uv_loop_t* loop, uv_tty_t* handle,
}
int uv_tty_read_start(uv_tty_t* handle, uv_alloc_cb alloc_cb,
int uv__tty_read_start(uv_tty_t* handle, uv_alloc_cb alloc_cb,
uv_read_cb read_cb) {
uv_loop_t* loop = handle->loop;
@ -1038,20 +1038,20 @@ int uv_tty_read_start(uv_tty_t* handle, uv_alloc_cb alloc_cb,
* Short-circuit if this could be the case. */
if (handle->tty.rd.last_key_len > 0) {
SET_REQ_SUCCESS(&handle->read_req);
uv_insert_pending_req(handle->loop, (uv_req_t*) &handle->read_req);
uv__insert_pending_req(handle->loop, (uv_req_t*) &handle->read_req);
/* Make sure no attempt is made to insert it again until it's handled. */
handle->flags |= UV_HANDLE_READ_PENDING;
handle->reqs_pending++;
return 0;
}
uv_tty_queue_read(loop, handle);
uv__tty_queue_read(loop, handle);
return 0;
}
int uv_tty_read_stop(uv_tty_t* handle) {
int uv__tty_read_stop(uv_tty_t* handle) {
INPUT_RECORD record;
DWORD written, err;
@ -1137,7 +1137,7 @@ static int uv__cancel_read_console(uv_tty_t* handle) {
}
static void uv_tty_update_virtual_window(CONSOLE_SCREEN_BUFFER_INFO* info) {
static void uv__tty_update_virtual_window(CONSOLE_SCREEN_BUFFER_INFO* info) {
uv_tty_virtual_width = info->dwSize.X;
uv_tty_virtual_height = info->srWindow.Bottom - info->srWindow.Top + 1;
@ -1160,12 +1160,12 @@ static void uv_tty_update_virtual_window(CONSOLE_SCREEN_BUFFER_INFO* info) {
}
static COORD uv_tty_make_real_coord(uv_tty_t* handle,
static COORD uv__tty_make_real_coord(uv_tty_t* handle,
CONSOLE_SCREEN_BUFFER_INFO* info, int x, unsigned char x_relative, int y,
unsigned char y_relative) {
COORD result;
uv_tty_update_virtual_window(info);
uv__tty_update_virtual_window(info);
/* Adjust y position */
if (y_relative) {
@ -1197,7 +1197,7 @@ static COORD uv_tty_make_real_coord(uv_tty_t* handle,
}
static int uv_tty_emit_text(uv_tty_t* handle, WCHAR buffer[], DWORD length,
static int uv__tty_emit_text(uv_tty_t* handle, WCHAR buffer[], DWORD length,
DWORD* error) {
DWORD written;
@ -1218,7 +1218,7 @@ static int uv_tty_emit_text(uv_tty_t* handle, WCHAR buffer[], DWORD length,
}
static int uv_tty_move_caret(uv_tty_t* handle, int x, unsigned char x_relative,
static int uv__tty_move_caret(uv_tty_t* handle, int x, unsigned char x_relative,
int y, unsigned char y_relative, DWORD* error) {
CONSOLE_SCREEN_BUFFER_INFO info;
COORD pos;
@ -1232,7 +1232,7 @@ static int uv_tty_move_caret(uv_tty_t* handle, int x, unsigned char x_relative,
*error = GetLastError();
}
pos = uv_tty_make_real_coord(handle, &info, x, x_relative, y, y_relative);
pos = uv__tty_make_real_coord(handle, &info, x, x_relative, y, y_relative);
if (!SetConsoleCursorPosition(handle->handle, pos)) {
if (GetLastError() == ERROR_INVALID_PARAMETER) {
@ -1248,7 +1248,7 @@ static int uv_tty_move_caret(uv_tty_t* handle, int x, unsigned char x_relative,
}
static int uv_tty_reset(uv_tty_t* handle, DWORD* error) {
static int uv__tty_reset(uv_tty_t* handle, DWORD* error) {
const COORD origin = {0, 0};
const WORD char_attrs = uv_tty_default_text_attributes;
CONSOLE_SCREEN_BUFFER_INFO screen_buffer_info;
@ -1300,7 +1300,7 @@ static int uv_tty_reset(uv_tty_t* handle, DWORD* error) {
/* Move the virtual window up to the top. */
uv_tty_virtual_offset = 0;
uv_tty_update_virtual_window(&screen_buffer_info);
uv__tty_update_virtual_window(&screen_buffer_info);
/* Reset the cursor size and the cursor state. */
if (!SetConsoleCursorInfo(handle->handle, &uv_tty_default_cursor_info)) {
@ -1312,7 +1312,7 @@ static int uv_tty_reset(uv_tty_t* handle, DWORD* error) {
}
static int uv_tty_clear(uv_tty_t* handle, int dir, char entire_screen,
static int uv__tty_clear(uv_tty_t* handle, int dir, char entire_screen,
DWORD* error) {
CONSOLE_SCREEN_BUFFER_INFO info;
COORD start, end;
@ -1341,7 +1341,7 @@ static int uv_tty_clear(uv_tty_t* handle, int dir, char entire_screen,
x2r = 1;
} else {
/* Clear to end of row. We pretend the console is 65536 characters wide,
* uv_tty_make_real_coord will clip it to the actual console width. */
* uv__tty_make_real_coord will clip it to the actual console width. */
x2 = 0xffff;
x2r = 0;
}
@ -1364,8 +1364,8 @@ static int uv_tty_clear(uv_tty_t* handle, int dir, char entire_screen,
return -1;
}
start = uv_tty_make_real_coord(handle, &info, x1, x1r, y1, y1r);
end = uv_tty_make_real_coord(handle, &info, x2, x2r, y2, y2r);
start = uv__tty_make_real_coord(handle, &info, x1, x1r, y1, y1r);
end = uv__tty_make_real_coord(handle, &info, x2, x2r, y2, y2r);
count = (end.Y * info.dwSize.X + end.X) -
(start.Y * info.dwSize.X + start.X) + 1;
@ -1400,7 +1400,7 @@ static int uv_tty_clear(uv_tty_t* handle, int dir, char entire_screen,
info.wAttributes |= bg >> 4; \
} while (0)
static int uv_tty_set_style(uv_tty_t* handle, DWORD* error) {
static int uv__tty_set_style(uv_tty_t* handle, DWORD* error) {
unsigned short argc = handle->tty.wr.ansi_csi_argc;
unsigned short* argv = handle->tty.wr.ansi_csi_argv;
int i;
@ -1556,7 +1556,7 @@ static int uv_tty_set_style(uv_tty_t* handle, DWORD* error) {
}
static int uv_tty_save_state(uv_tty_t* handle, unsigned char save_attributes,
static int uv__tty_save_state(uv_tty_t* handle, unsigned char save_attributes,
DWORD* error) {
CONSOLE_SCREEN_BUFFER_INFO info;
@ -1569,10 +1569,11 @@ static int uv_tty_save_state(uv_tty_t* handle, unsigned char save_attributes,
return -1;
}
uv_tty_update_virtual_window(&info);
uv__tty_update_virtual_window(&info);
handle->tty.wr.saved_position.X = info.dwCursorPosition.X;
handle->tty.wr.saved_position.Y = info.dwCursorPosition.Y - uv_tty_virtual_offset;
handle->tty.wr.saved_position.Y = info.dwCursorPosition.Y -
uv_tty_virtual_offset;
handle->flags |= UV_HANDLE_TTY_SAVED_POSITION;
if (save_attributes) {
@ -1585,7 +1586,7 @@ static int uv_tty_save_state(uv_tty_t* handle, unsigned char save_attributes,
}
static int uv_tty_restore_state(uv_tty_t* handle,
static int uv__tty_restore_state(uv_tty_t* handle,
unsigned char restore_attributes, DWORD* error) {
CONSOLE_SCREEN_BUFFER_INFO info;
WORD new_attributes;
@ -1595,7 +1596,7 @@ static int uv_tty_restore_state(uv_tty_t* handle,
}
if (handle->flags & UV_HANDLE_TTY_SAVED_POSITION) {
if (uv_tty_move_caret(handle,
if (uv__tty_move_caret(handle,
handle->tty.wr.saved_position.X,
0,
handle->tty.wr.saved_position.Y,
@ -1625,7 +1626,7 @@ static int uv_tty_restore_state(uv_tty_t* handle,
return 0;
}
static int uv_tty_set_cursor_visibility(uv_tty_t* handle,
static int uv__tty_set_cursor_visibility(uv_tty_t* handle,
BOOL visible,
DWORD* error) {
CONSOLE_CURSOR_INFO cursor_info;
@ -1645,7 +1646,7 @@ static int uv_tty_set_cursor_visibility(uv_tty_t* handle,
return 0;
}
static int uv_tty_set_cursor_shape(uv_tty_t* handle, int style, DWORD* error) {
static int uv__tty_set_cursor_shape(uv_tty_t* handle, int style, DWORD* error) {
CONSOLE_CURSOR_INFO cursor_info;
if (!GetConsoleCursorInfo(handle->handle, &cursor_info)) {
@ -1670,7 +1671,7 @@ static int uv_tty_set_cursor_shape(uv_tty_t* handle, int style, DWORD* error) {
}
static int uv_tty_write_bufs(uv_tty_t* handle,
static int uv__tty_write_bufs(uv_tty_t* handle,
const uv_buf_t bufs[],
unsigned int nbufs,
DWORD* error) {
@ -1683,7 +1684,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
#define FLUSH_TEXT() \
do { \
if (utf16_buf_used > 0) { \
uv_tty_emit_text(handle, utf16_buf, utf16_buf_used, error); \
uv__tty_emit_text(handle, utf16_buf, utf16_buf_used, error); \
utf16_buf_used = 0; \
} \
} while (0)
@ -1802,21 +1803,21 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
case 'c':
/* Full console reset. */
FLUSH_TEXT();
uv_tty_reset(handle, error);
uv__tty_reset(handle, error);
ansi_parser_state = ANSI_NORMAL;
continue;
case '7':
/* Save the cursor position and text attributes. */
FLUSH_TEXT();
uv_tty_save_state(handle, 1, error);
uv__tty_save_state(handle, 1, error);
ansi_parser_state = ANSI_NORMAL;
continue;
case '8':
/* Restore the cursor position and text attributes */
FLUSH_TEXT();
uv_tty_restore_state(handle, 1, error);
uv__tty_restore_state(handle, 1, error);
ansi_parser_state = ANSI_NORMAL;
continue;
@ -1849,7 +1850,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
? handle->tty.wr.ansi_csi_argv[0] : 1;
if (style >= 0 && style <= 6) {
FLUSH_TEXT();
uv_tty_set_cursor_shape(handle, style, error);
uv__tty_set_cursor_shape(handle, style, error);
}
}
@ -1947,7 +1948,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
if (handle->tty.wr.ansi_csi_argc == 1 &&
handle->tty.wr.ansi_csi_argv[0] == 25) {
FLUSH_TEXT();
uv_tty_set_cursor_visibility(handle, 0, error);
uv__tty_set_cursor_visibility(handle, 0, error);
}
break;
@ -1956,7 +1957,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
if (handle->tty.wr.ansi_csi_argc == 1 &&
handle->tty.wr.ansi_csi_argv[0] == 25) {
FLUSH_TEXT();
uv_tty_set_cursor_visibility(handle, 1, error);
uv__tty_set_cursor_visibility(handle, 1, error);
}
break;
}
@ -1970,7 +1971,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
FLUSH_TEXT();
y = -(handle->tty.wr.ansi_csi_argc
? handle->tty.wr.ansi_csi_argv[0] : 1);
uv_tty_move_caret(handle, 0, 1, y, 1, error);
uv__tty_move_caret(handle, 0, 1, y, 1, error);
break;
case 'B':
@ -1978,7 +1979,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
FLUSH_TEXT();
y = handle->tty.wr.ansi_csi_argc
? handle->tty.wr.ansi_csi_argv[0] : 1;
uv_tty_move_caret(handle, 0, 1, y, 1, error);
uv__tty_move_caret(handle, 0, 1, y, 1, error);
break;
case 'C':
@ -1986,7 +1987,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
FLUSH_TEXT();
x = handle->tty.wr.ansi_csi_argc
? handle->tty.wr.ansi_csi_argv[0] : 1;
uv_tty_move_caret(handle, x, 1, 0, 1, error);
uv__tty_move_caret(handle, x, 1, 0, 1, error);
break;
case 'D':
@ -1994,7 +1995,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
FLUSH_TEXT();
x = -(handle->tty.wr.ansi_csi_argc
? handle->tty.wr.ansi_csi_argv[0] : 1);
uv_tty_move_caret(handle, x, 1, 0, 1, error);
uv__tty_move_caret(handle, x, 1, 0, 1, error);
break;
case 'E':
@ -2002,7 +2003,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
FLUSH_TEXT();
y = handle->tty.wr.ansi_csi_argc
? handle->tty.wr.ansi_csi_argv[0] : 1;
uv_tty_move_caret(handle, 0, 0, y, 1, error);
uv__tty_move_caret(handle, 0, 0, y, 1, error);
break;
case 'F':
@ -2010,7 +2011,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
FLUSH_TEXT();
y = -(handle->tty.wr.ansi_csi_argc
? handle->tty.wr.ansi_csi_argv[0] : 1);
uv_tty_move_caret(handle, 0, 0, y, 1, error);
uv__tty_move_caret(handle, 0, 0, y, 1, error);
break;
case 'G':
@ -2019,7 +2020,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
x = (handle->tty.wr.ansi_csi_argc >= 1 &&
handle->tty.wr.ansi_csi_argv[0])
? handle->tty.wr.ansi_csi_argv[0] - 1 : 0;
uv_tty_move_caret(handle, x, 0, 0, 1, error);
uv__tty_move_caret(handle, x, 0, 0, 1, error);
break;
case 'H':
@ -2032,7 +2033,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
x = (handle->tty.wr.ansi_csi_argc >= 2 &&
handle->tty.wr.ansi_csi_argv[1])
? handle->tty.wr.ansi_csi_argv[1] - 1 : 0;
uv_tty_move_caret(handle, x, 0, y, 0, error);
uv__tty_move_caret(handle, x, 0, y, 0, error);
break;
case 'J':
@ -2041,7 +2042,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
d = handle->tty.wr.ansi_csi_argc
? handle->tty.wr.ansi_csi_argv[0] : 0;
if (d >= 0 && d <= 2) {
uv_tty_clear(handle, d, 1, error);
uv__tty_clear(handle, d, 1, error);
}
break;
@ -2051,26 +2052,26 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
d = handle->tty.wr.ansi_csi_argc
? handle->tty.wr.ansi_csi_argv[0] : 0;
if (d >= 0 && d <= 2) {
uv_tty_clear(handle, d, 0, error);
uv__tty_clear(handle, d, 0, error);
}
break;
case 'm':
/* Set style */
FLUSH_TEXT();
uv_tty_set_style(handle, error);
uv__tty_set_style(handle, error);
break;
case 's':
/* Save the cursor position. */
FLUSH_TEXT();
uv_tty_save_state(handle, 0, error);
uv__tty_save_state(handle, 0, error);
break;
case 'u':
/* Restore the cursor position */
FLUSH_TEXT();
uv_tty_restore_state(handle, 0, error);
uv__tty_restore_state(handle, 0, error);
break;
}
}
@ -2179,7 +2180,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
}
int uv_tty_write(uv_loop_t* loop,
int uv__tty_write(uv_loop_t* loop,
uv_write_t* req,
uv_tty_t* handle,
const uv_buf_t bufs[],
@ -2197,13 +2198,13 @@ int uv_tty_write(uv_loop_t* loop,
req->u.io.queued_bytes = 0;
if (!uv_tty_write_bufs(handle, bufs, nbufs, &error)) {
if (!uv__tty_write_bufs(handle, bufs, nbufs, &error)) {
SET_REQ_SUCCESS(req);
} else {
SET_REQ_ERROR(req, error);
}
uv_insert_pending_req(loop, (uv_req_t*) req);
uv__insert_pending_req(loop, (uv_req_t*) req);
return 0;
}
@ -2217,14 +2218,14 @@ int uv__tty_try_write(uv_tty_t* handle,
if (handle->stream.conn.write_reqs_pending > 0)
return UV_EAGAIN;
if (uv_tty_write_bufs(handle, bufs, nbufs, &error))
if (uv__tty_write_bufs(handle, bufs, nbufs, &error))
return uv_translate_sys_error(error);
return uv__count_bufs(bufs, nbufs);
}
void uv_process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
void uv__process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
uv_write_t* req) {
int err;
@ -2236,20 +2237,22 @@ void uv_process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
req->cb(req, uv_translate_sys_error(err));
}
handle->stream.conn.write_reqs_pending--;
if (handle->stream.conn.shutdown_req != NULL &&
handle->stream.conn.write_reqs_pending == 0) {
uv_want_endgame(loop, (uv_handle_t*)handle);
}
if (handle->stream.conn.write_reqs_pending == 0)
if (handle->flags & UV_HANDLE_SHUTTING)
uv__process_tty_shutdown_req(loop,
handle,
handle->stream.conn.shutdown_req);
DECREASE_PENDING_REQ_COUNT(handle);
}
void uv_tty_close(uv_tty_t* handle) {
void uv__tty_close(uv_tty_t* handle) {
assert(handle->u.fd == -1 || handle->u.fd > 2);
if (handle->flags & UV_HANDLE_READING)
uv_tty_read_stop(handle);
uv__tty_read_stop(handle);
if (handle->u.fd == -1)
CloseHandle(handle->handle);
@ -2261,35 +2264,36 @@ void uv_tty_close(uv_tty_t* handle) {
handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
uv__handle_closing(handle);
if (handle->reqs_pending == 0) {
uv_want_endgame(handle->loop, (uv_handle_t*) handle);
}
if (handle->reqs_pending == 0)
uv__want_endgame(handle->loop, (uv_handle_t*) handle);
}
void uv_tty_endgame(uv_loop_t* loop, uv_tty_t* handle) {
if (!(handle->flags & UV_HANDLE_TTY_READABLE) &&
handle->stream.conn.shutdown_req != NULL &&
handle->stream.conn.write_reqs_pending == 0) {
UNREGISTER_HANDLE_REQ(loop, handle, handle->stream.conn.shutdown_req);
void uv__process_tty_shutdown_req(uv_loop_t* loop, uv_tty_t* stream, uv_shutdown_t* req) {
assert(stream->stream.conn.write_reqs_pending == 0);
assert(req);
stream->stream.conn.shutdown_req = NULL;
stream->flags &= ~UV_HANDLE_SHUTTING;
UNREGISTER_HANDLE_REQ(loop, stream, req);
/* TTY shutdown is really just a no-op */
if (handle->stream.conn.shutdown_req->cb) {
if (handle->flags & UV_HANDLE_CLOSING) {
handle->stream.conn.shutdown_req->cb(handle->stream.conn.shutdown_req, UV_ECANCELED);
if (req->cb) {
if (stream->flags & UV_HANDLE_CLOSING) {
req->cb(req, UV_ECANCELED);
} else {
handle->stream.conn.shutdown_req->cb(handle->stream.conn.shutdown_req, 0);
req->cb(req, 0);
}
}
handle->stream.conn.shutdown_req = NULL;
DECREASE_PENDING_REQ_COUNT(stream);
}
DECREASE_PENDING_REQ_COUNT(handle);
return;
}
if (handle->flags & UV_HANDLE_CLOSING &&
handle->reqs_pending == 0) {
void uv__tty_endgame(uv_loop_t* loop, uv_tty_t* handle) {
assert(handle->flags & UV_HANDLE_CLOSING);
assert(handle->reqs_pending == 0);
/* The wait handle used for raw reading should be unregistered when the
* wait callback runs. */
assert(!(handle->flags & UV_HANDLE_TTY_READABLE) ||
@ -2297,25 +2301,24 @@ void uv_tty_endgame(uv_loop_t* loop, uv_tty_t* handle) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
uv__handle_close(handle);
}
}
/*
* uv_process_tty_accept_req() is a stub to keep DELEGATE_STREAM_REQ working
* uv__process_tty_accept_req() is a stub to keep DELEGATE_STREAM_REQ working
* TODO: find a way to remove it
*/
void uv_process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle,
void uv__process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle,
uv_req_t* raw_req) {
abort();
}
/*
* uv_process_tty_connect_req() is a stub to keep DELEGATE_STREAM_REQ working
* uv__process_tty_connect_req() is a stub to keep DELEGATE_STREAM_REQ working
* TODO: find a way to remove it
*/
void uv_process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle,
void uv__process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle,
uv_connect_t* req) {
abort();
}

63
deps/uv/src/win/udp.c vendored
View file

@ -60,7 +60,7 @@ int uv_udp_getsockname(const uv_udp_t* handle,
}
static int uv_udp_set_socket(uv_loop_t* loop, uv_udp_t* handle, SOCKET socket,
static int uv__udp_set_socket(uv_loop_t* loop, uv_udp_t* handle, SOCKET socket,
int family) {
DWORD yes = 1;
WSAPROTOCOL_INFOW info;
@ -106,8 +106,8 @@ static int uv_udp_set_socket(uv_loop_t* loop, uv_udp_t* handle, SOCKET socket,
FILE_SKIP_SET_EVENT_ON_HANDLE |
FILE_SKIP_COMPLETION_PORT_ON_SUCCESS)) {
handle->flags |= UV_HANDLE_SYNC_BYPASS_IOCP;
handle->func_wsarecv = uv_wsarecv_workaround;
handle->func_wsarecvfrom = uv_wsarecvfrom_workaround;
handle->func_wsarecv = uv__wsarecv_workaround;
handle->func_wsarecvfrom = uv__wsarecvfrom_workaround;
} else if (GetLastError() != ERROR_INVALID_FUNCTION) {
return GetLastError();
}
@ -155,7 +155,7 @@ int uv__udp_init_ex(uv_loop_t* loop,
return uv_translate_sys_error(err);
}
err = uv_udp_set_socket(handle->loop, handle, sock, domain);
err = uv__udp_set_socket(handle->loop, handle, sock, domain);
if (err) {
closesocket(sock);
QUEUE_REMOVE(&handle->handle_queue);
@ -167,7 +167,7 @@ int uv__udp_init_ex(uv_loop_t* loop,
}
void uv_udp_close(uv_loop_t* loop, uv_udp_t* handle) {
void uv__udp_close(uv_loop_t* loop, uv_udp_t* handle) {
uv_udp_recv_stop(handle);
closesocket(handle->socket);
handle->socket = INVALID_SOCKET;
@ -175,12 +175,12 @@ void uv_udp_close(uv_loop_t* loop, uv_udp_t* handle) {
uv__handle_closing(handle);
if (handle->reqs_pending == 0) {
uv_want_endgame(loop, (uv_handle_t*) handle);
uv__want_endgame(loop, (uv_handle_t*) handle);
}
}
void uv_udp_endgame(uv_loop_t* loop, uv_udp_t* handle) {
void uv__udp_endgame(uv_loop_t* loop, uv_udp_t* handle) {
if (handle->flags & UV_HANDLE_CLOSING &&
handle->reqs_pending == 0) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
@ -194,7 +194,7 @@ int uv_udp_using_recvmmsg(const uv_udp_t* handle) {
}
static int uv_udp_maybe_bind(uv_udp_t* handle,
static int uv__udp_maybe_bind(uv_udp_t* handle,
const struct sockaddr* addr,
unsigned int addrlen,
unsigned int flags) {
@ -216,7 +216,7 @@ static int uv_udp_maybe_bind(uv_udp_t* handle,
return WSAGetLastError();
}
err = uv_udp_set_socket(handle->loop, handle, sock, addr->sa_family);
err = uv__udp_set_socket(handle->loop, handle, sock, addr->sa_family);
if (err) {
closesocket(sock);
return err;
@ -264,7 +264,7 @@ static int uv_udp_maybe_bind(uv_udp_t* handle,
}
static void uv_udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
static void uv__udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
uv_req_t* req;
uv_buf_t buf;
DWORD bytes, flags;
@ -311,7 +311,7 @@ static void uv_udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
handle->flags |= UV_HANDLE_READ_PENDING;
req->u.io.overlapped.InternalHigh = bytes;
handle->reqs_pending++;
uv_insert_pending_req(loop, req);
uv__insert_pending_req(loop, req);
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
/* The req will be processed with IOCP. */
handle->flags |= UV_HANDLE_READ_PENDING;
@ -319,7 +319,7 @@ static void uv_udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
} else {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, WSAGetLastError());
uv_insert_pending_req(loop, req);
uv__insert_pending_req(loop, req);
handle->reqs_pending++;
}
@ -343,7 +343,7 @@ static void uv_udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
handle->flags |= UV_HANDLE_READ_PENDING;
req->u.io.overlapped.InternalHigh = bytes;
handle->reqs_pending++;
uv_insert_pending_req(loop, req);
uv__insert_pending_req(loop, req);
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
/* The req will be processed with IOCP. */
handle->flags |= UV_HANDLE_READ_PENDING;
@ -351,7 +351,7 @@ static void uv_udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
} else {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, WSAGetLastError());
uv_insert_pending_req(loop, req);
uv__insert_pending_req(loop, req);
handle->reqs_pending++;
}
}
@ -367,7 +367,7 @@ int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb,
return UV_EALREADY;
}
err = uv_udp_maybe_bind(handle,
err = uv__udp_maybe_bind(handle,
(const struct sockaddr*) &uv_addr_ip4_any_,
sizeof(uv_addr_ip4_any_),
0);
@ -384,7 +384,7 @@ int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb,
/* If reading was stopped and then started again, there could still be a recv
* request pending. */
if (!(handle->flags & UV_HANDLE_READ_PENDING))
uv_udp_queue_recv(loop, handle);
uv__udp_queue_recv(loop, handle);
return 0;
}
@ -433,7 +433,7 @@ static int uv__send(uv_udp_send_t* req,
handle->send_queue_size += req->u.io.queued_bytes;
handle->send_queue_count++;
REGISTER_HANDLE_REQ(loop, handle, req);
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
/* Request queued by the kernel. */
req->u.io.queued_bytes = uv__count_bufs(bufs, nbufs);
@ -450,7 +450,7 @@ static int uv__send(uv_udp_send_t* req,
}
void uv_process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle,
void uv__process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle,
uv_req_t* req) {
uv_buf_t buf;
int partial;
@ -554,14 +554,14 @@ done:
/* Post another read if still reading and not closing. */
if ((handle->flags & UV_HANDLE_READING) &&
!(handle->flags & UV_HANDLE_READ_PENDING)) {
uv_udp_queue_recv(loop, handle);
uv__udp_queue_recv(loop, handle);
}
DECREASE_PENDING_REQ_COUNT(handle);
}
void uv_process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle,
void uv__process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle,
uv_udp_send_t* req) {
int err;
@ -598,7 +598,7 @@ static int uv__udp_set_membership4(uv_udp_t* handle,
return UV_EINVAL;
/* If the socket is unbound, bind to inaddr_any. */
err = uv_udp_maybe_bind(handle,
err = uv__udp_maybe_bind(handle,
(const struct sockaddr*) &uv_addr_ip4_any_,
sizeof(uv_addr_ip4_any_),
UV_UDP_REUSEADDR);
@ -652,7 +652,7 @@ int uv__udp_set_membership6(uv_udp_t* handle,
if ((handle->flags & UV_HANDLE_BOUND) && !(handle->flags & UV_HANDLE_IPV6))
return UV_EINVAL;
err = uv_udp_maybe_bind(handle,
err = uv__udp_maybe_bind(handle,
(const struct sockaddr*) &uv_addr_ip6_any_,
sizeof(uv_addr_ip6_any_),
UV_UDP_REUSEADDR);
@ -708,7 +708,7 @@ static int uv__udp_set_source_membership4(uv_udp_t* handle,
return UV_EINVAL;
/* If the socket is unbound, bind to inaddr_any. */
err = uv_udp_maybe_bind(handle,
err = uv__udp_maybe_bind(handle,
(const struct sockaddr*) &uv_addr_ip4_any_,
sizeof(uv_addr_ip4_any_),
UV_UDP_REUSEADDR);
@ -763,7 +763,7 @@ int uv__udp_set_source_membership6(uv_udp_t* handle,
if ((handle->flags & UV_HANDLE_BOUND) && !(handle->flags & UV_HANDLE_IPV6))
return UV_EINVAL;
err = uv_udp_maybe_bind(handle,
err = uv__udp_maybe_bind(handle,
(const struct sockaddr*) &uv_addr_ip6_any_,
sizeof(uv_addr_ip6_any_),
UV_UDP_REUSEADDR);
@ -962,7 +962,7 @@ int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
return uv_translate_sys_error(GetLastError());
}
err = uv_udp_set_socket(handle->loop,
err = uv__udp_set_socket(handle->loop,
handle,
sock,
protocol_info.iAddressFamily);
@ -1044,7 +1044,7 @@ int uv__udp_bind(uv_udp_t* handle,
unsigned int flags) {
int err;
err = uv_udp_maybe_bind(handle, addr, addrlen, flags);
err = uv__udp_maybe_bind(handle, addr, addrlen, flags);
if (err)
return uv_translate_sys_error(err);
@ -1066,7 +1066,7 @@ int uv__udp_connect(uv_udp_t* handle,
else
return UV_EINVAL;
err = uv_udp_maybe_bind(handle, bind_addr, addrlen, 0);
err = uv__udp_maybe_bind(handle, bind_addr, addrlen, 0);
if (err)
return uv_translate_sys_error(err);
}
@ -1087,7 +1087,7 @@ int uv__udp_disconnect(uv_udp_t* handle) {
memset(&addr, 0, sizeof(addr));
err = connect(handle->socket, &addr, sizeof(addr));
err = connect(handle->socket, (struct sockaddr*) &addr, sizeof(addr));
if (err)
return uv_translate_sys_error(WSAGetLastError());
@ -1117,7 +1117,7 @@ int uv__udp_send(uv_udp_send_t* req,
else
return UV_EINVAL;
err = uv_udp_maybe_bind(handle, bind_addr, addrlen, 0);
err = uv__udp_maybe_bind(handle, bind_addr, addrlen, 0);
if (err)
return uv_translate_sys_error(err);
}
@ -1146,6 +1146,7 @@ int uv__udp_try_send(uv_udp_t* handle,
err = uv__convert_to_localhost_if_unspecified(addr, &converted);
if (err)
return err;
addr = (const struct sockaddr*) &converted;
}
/* Already sending a message.*/
@ -1159,7 +1160,7 @@ int uv__udp_try_send(uv_udp_t* handle,
bind_addr = (const struct sockaddr*) &uv_addr_ip6_any_;
else
return UV_EINVAL;
err = uv_udp_maybe_bind(handle, bind_addr, addrlen, 0);
err = uv__udp_maybe_bind(handle, bind_addr, addrlen, 0);
if (err)
return uv_translate_sys_error(err);
}
@ -1169,7 +1170,7 @@ int uv__udp_try_send(uv_udp_t* handle,
nbufs,
&bytes,
0,
(const struct sockaddr*) &converted,
addr,
addrlen,
NULL,
NULL);

108
deps/uv/src/win/util.c vendored
View file

@ -531,103 +531,25 @@ int uv_resident_set_memory(size_t* rss) {
int uv_uptime(double* uptime) {
BYTE stack_buffer[4096];
BYTE* malloced_buffer = NULL;
BYTE* buffer = (BYTE*) stack_buffer;
size_t buffer_size = sizeof(stack_buffer);
DWORD data_size;
PERF_DATA_BLOCK* data_block;
PERF_OBJECT_TYPE* object_type;
PERF_COUNTER_DEFINITION* counter_definition;
DWORD i;
for (;;) {
LONG result;
data_size = (DWORD) buffer_size;
result = RegQueryValueExW(HKEY_PERFORMANCE_DATA,
L"2",
NULL,
NULL,
buffer,
&data_size);
if (result == ERROR_SUCCESS) {
break;
} else if (result != ERROR_MORE_DATA) {
*uptime = 0;
return uv_translate_sys_error(result);
}
buffer_size *= 2;
/* Don't let the buffer grow infinitely. */
if (buffer_size > 1 << 20) {
goto internalError;
}
uv__free(malloced_buffer);
buffer = malloced_buffer = (BYTE*) uv__malloc(buffer_size);
if (malloced_buffer == NULL) {
*uptime = 0;
return UV_ENOMEM;
}
}
if (data_size < sizeof(*data_block))
goto internalError;
data_block = (PERF_DATA_BLOCK*) buffer;
if (wmemcmp(data_block->Signature, L"PERF", 4) != 0)
goto internalError;
if (data_size < data_block->HeaderLength + sizeof(*object_type))
goto internalError;
object_type = (PERF_OBJECT_TYPE*) (buffer + data_block->HeaderLength);
if (object_type->NumInstances != PERF_NO_INSTANCES)
goto internalError;
counter_definition = (PERF_COUNTER_DEFINITION*) (buffer +
data_block->HeaderLength + object_type->HeaderLength);
for (i = 0; i < object_type->NumCounters; i++) {
if ((BYTE*) counter_definition + sizeof(*counter_definition) >
buffer + data_size) {
break;
}
if (counter_definition->CounterNameTitleIndex == 674 &&
counter_definition->CounterSize == sizeof(uint64_t)) {
if (counter_definition->CounterOffset + sizeof(uint64_t) > data_size ||
!(counter_definition->CounterType & PERF_OBJECT_TIMER)) {
goto internalError;
} else {
BYTE* address = (BYTE*) object_type + object_type->DefinitionLength +
counter_definition->CounterOffset;
uint64_t value = *((uint64_t*) address);
*uptime = floor((double) (object_type->PerfTime.QuadPart - value) /
(double) object_type->PerfFreq.QuadPart);
uv__free(malloced_buffer);
*uptime = GetTickCount64() / 1000.0;
return 0;
}
}
}
counter_definition = (PERF_COUNTER_DEFINITION*)
((BYTE*) counter_definition + counter_definition->ByteLength);
}
/* If we get here, the uptime value was not found. */
uv__free(malloced_buffer);
*uptime = 0;
return UV_ENOSYS;
unsigned int uv_available_parallelism(void) {
SYSTEM_INFO info;
unsigned rc;
internalError:
uv__free(malloced_buffer);
*uptime = 0;
return UV_EIO;
/* TODO(bnoordhuis) Use GetLogicalProcessorInformationEx() to support systems
* with > 64 CPUs? See https://github.com/libuv/libuv/pull/3458
*/
GetSystemInfo(&info);
rc = info.dwNumberOfProcessors;
if (rc < 1)
rc = 1;
return rc;
}

View file

@ -48,7 +48,7 @@ sSetWinEventHook pSetWinEventHook;
/* ws2_32.dll function pointer */
uv_sGetHostNameW pGetHostNameW;
void uv_winapi_init(void) {
void uv__winapi_init(void) {
HMODULE ntdll_module;
HMODULE powrprof_module;
HMODULE user32_module;
@ -126,19 +126,19 @@ void uv_winapi_init(void) {
kernel32_module,
"GetQueuedCompletionStatusEx");
powrprof_module = LoadLibraryA("powrprof.dll");
powrprof_module = LoadLibraryExA("powrprof.dll", NULL, LOAD_LIBRARY_SEARCH_SYSTEM32);
if (powrprof_module != NULL) {
pPowerRegisterSuspendResumeNotification = (sPowerRegisterSuspendResumeNotification)
GetProcAddress(powrprof_module, "PowerRegisterSuspendResumeNotification");
}
user32_module = LoadLibraryA("user32.dll");
user32_module = GetModuleHandleA("user32.dll");
if (user32_module != NULL) {
pSetWinEventHook = (sSetWinEventHook)
GetProcAddress(user32_module, "SetWinEventHook");
}
ws2_32_module = LoadLibraryA("ws2_32.dll");
ws2_32_module = GetModuleHandleA("ws2_32.dll");
if (ws2_32_module != NULL) {
pGetHostNameW = (uv_sGetHostNameW) GetProcAddress(
ws2_32_module,

View file

@ -38,7 +38,7 @@ struct sockaddr_in6 uv_addr_ip6_any_;
/*
* Retrieves the pointer to a winsock extension function.
*/
static BOOL uv_get_extension_function(SOCKET socket, GUID guid,
static BOOL uv__get_extension_function(SOCKET socket, GUID guid,
void **target) {
int result;
DWORD bytes;
@ -62,20 +62,20 @@ static BOOL uv_get_extension_function(SOCKET socket, GUID guid,
}
BOOL uv_get_acceptex_function(SOCKET socket, LPFN_ACCEPTEX* target) {
BOOL uv__get_acceptex_function(SOCKET socket, LPFN_ACCEPTEX* target) {
const GUID wsaid_acceptex = WSAID_ACCEPTEX;
return uv_get_extension_function(socket, wsaid_acceptex, (void**)target);
return uv__get_extension_function(socket, wsaid_acceptex, (void**)target);
}
BOOL uv_get_connectex_function(SOCKET socket, LPFN_CONNECTEX* target) {
BOOL uv__get_connectex_function(SOCKET socket, LPFN_CONNECTEX* target) {
const GUID wsaid_connectex = WSAID_CONNECTEX;
return uv_get_extension_function(socket, wsaid_connectex, (void**)target);
return uv__get_extension_function(socket, wsaid_connectex, (void**)target);
}
void uv_winsock_init(void) {
void uv__winsock_init(void) {
WSADATA wsa_data;
int errorno;
SOCKET dummy;
@ -134,7 +134,7 @@ void uv_winsock_init(void) {
}
int uv_ntstatus_to_winsock_error(NTSTATUS status) {
int uv__ntstatus_to_winsock_error(NTSTATUS status) {
switch (status) {
case STATUS_SUCCESS:
return ERROR_SUCCESS;
@ -267,7 +267,7 @@ int uv_ntstatus_to_winsock_error(NTSTATUS status) {
* the user to use the default msafd driver, doesn't work when other LSPs are
* stacked on top of it.
*/
int WSAAPI uv_wsarecv_workaround(SOCKET socket, WSABUF* buffers,
int WSAAPI uv__wsarecv_workaround(SOCKET socket, WSABUF* buffers,
DWORD buffer_count, DWORD* bytes, DWORD* flags, WSAOVERLAPPED *overlapped,
LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine) {
NTSTATUS status;
@ -346,7 +346,7 @@ int WSAAPI uv_wsarecv_workaround(SOCKET socket, WSABUF* buffers,
break;
default:
error = uv_ntstatus_to_winsock_error(status);
error = uv__ntstatus_to_winsock_error(status);
break;
}
@ -360,8 +360,8 @@ int WSAAPI uv_wsarecv_workaround(SOCKET socket, WSABUF* buffers,
}
/* See description of uv_wsarecv_workaround. */
int WSAAPI uv_wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers,
/* See description of uv__wsarecv_workaround. */
int WSAAPI uv__wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers,
DWORD buffer_count, DWORD* bytes, DWORD* flags, struct sockaddr* addr,
int* addr_len, WSAOVERLAPPED *overlapped,
LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine) {
@ -444,7 +444,7 @@ int WSAAPI uv_wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers,
break;
default:
error = uv_ntstatus_to_winsock_error(status);
error = uv__ntstatus_to_winsock_error(status);
break;
}
@ -458,7 +458,7 @@ int WSAAPI uv_wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers,
}
int WSAAPI uv_msafd_poll(SOCKET socket, AFD_POLL_INFO* info_in,
int WSAAPI uv__msafd_poll(SOCKET socket, AFD_POLL_INFO* info_in,
AFD_POLL_INFO* info_out, OVERLAPPED* overlapped) {
IO_STATUS_BLOCK iosb;
IO_STATUS_BLOCK* iosb_ptr;
@ -531,7 +531,7 @@ int WSAAPI uv_msafd_poll(SOCKET socket, AFD_POLL_INFO* info_in,
break;
default:
error = uv_ntstatus_to_winsock_error(status);
error = uv__ntstatus_to_winsock_error(status);
break;
}

View file

@ -23,7 +23,9 @@ BENCHMARK_DECLARE (sizes)
BENCHMARK_DECLARE (loop_count)
BENCHMARK_DECLARE (loop_count_timed)
BENCHMARK_DECLARE (ping_pongs)
BENCHMARK_DECLARE (ping_udp)
BENCHMARK_DECLARE (ping_udp1)
BENCHMARK_DECLARE (ping_udp10)
BENCHMARK_DECLARE (ping_udp100)
BENCHMARK_DECLARE (tcp_write_batch)
BENCHMARK_DECLARE (tcp4_pound_100)
BENCHMARK_DECLARE (tcp4_pound_1000)
@ -72,6 +74,7 @@ BENCHMARK_DECLARE (async_pummel_1)
BENCHMARK_DECLARE (async_pummel_2)
BENCHMARK_DECLARE (async_pummel_4)
BENCHMARK_DECLARE (async_pummel_8)
BENCHMARK_DECLARE (queue_work)
BENCHMARK_DECLARE (spawn)
BENCHMARK_DECLARE (thread_create)
BENCHMARK_DECLARE (million_async)
@ -90,6 +93,10 @@ TASK_LIST_START
BENCHMARK_ENTRY (ping_pongs)
BENCHMARK_HELPER (ping_pongs, tcp4_echo_server)
BENCHMARK_ENTRY (ping_udp1)
BENCHMARK_ENTRY (ping_udp10)
BENCHMARK_ENTRY (ping_udp100)
BENCHMARK_ENTRY (tcp_write_batch)
BENCHMARK_HELPER (tcp_write_batch, tcp4_blackhole_server)
@ -155,6 +162,7 @@ TASK_LIST_START
BENCHMARK_ENTRY (async_pummel_2)
BENCHMARK_ENTRY (async_pummel_4)
BENCHMARK_ENTRY (async_pummel_8)
BENCHMARK_ENTRY (queue_work)
BENCHMARK_ENTRY (spawn)
BENCHMARK_ENTRY (thread_create)

View file

@ -94,6 +94,9 @@ static void pinger_read_cb(uv_udp_t* udp,
pinger_t* pinger;
pinger = (pinger_t*)udp->data;
/* No data here means something went wrong */
ASSERT(nread > 0);
/* Now we count the pings */
for (i = 0; i < nread; i++) {
ASSERT(buf->base[i] == PING[pinger->state]);
@ -108,6 +111,7 @@ static void pinger_read_cb(uv_udp_t* udp,
}
}
if (buf && !(flags & UV_UDP_MMSG_CHUNK))
buf_free(buf);
}
@ -122,6 +126,8 @@ static void udp_pinger_new(void) {
/* Try to do NUM_PINGS ping-pongs (connection-less). */
r = uv_udp_init(loop, &pinger->udp);
ASSERT(r == 0);
r = uv_udp_bind(&pinger->udp, (const struct sockaddr*) &pinger->server_addr, 0);
ASSERT(r == 0);
pinger->udp.data = pinger;

View file

@ -1,4 +1,4 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
/* Copyright libuv contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
@ -19,58 +19,49 @@
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "task.h"
#include "uv.h"
static int idle_cb_called;
static int timer_cb_called;
static int done = 0;
static unsigned events = 0;
static unsigned result;
static uv_idle_t idle_handle;
static uv_timer_t timer_handle;
/* idle_cb should run before timer_cb */
static void idle_cb(uv_idle_t* handle) {
ASSERT(idle_cb_called == 0);
ASSERT(timer_cb_called == 0);
uv_idle_stop(handle);
idle_cb_called++;
static unsigned fastrand(void) {
static unsigned g = 0;
g = g * 214013 + 2531011;
return g;
}
static void timer_cb(uv_timer_t* handle) {
ASSERT(idle_cb_called == 1);
ASSERT(timer_cb_called == 0);
uv_timer_stop(handle);
timer_cb_called++;
static void work_cb(uv_work_t* req) {
req->data = &result;
*(unsigned*)req->data = fastrand();
}
static void next_tick(uv_idle_t* handle) {
uv_loop_t* loop = handle->loop;
uv_idle_stop(handle);
uv_idle_init(loop, &idle_handle);
uv_idle_start(&idle_handle, idle_cb);
uv_timer_init(loop, &timer_handle);
uv_timer_start(&timer_handle, timer_cb, 0, 0);
static void after_work_cb(uv_work_t* req, int status) {
events++;
if (!done)
ASSERT_EQ(0, uv_queue_work(req->loop, req, work_cb, after_work_cb));
}
static void timer_cb(uv_timer_t* handle) { done = 1; }
TEST_IMPL(callback_order) {
BENCHMARK_IMPL(queue_work) {
uv_timer_t timer_handle;
uv_work_t work;
uv_loop_t* loop;
uv_idle_t idle;
int timeout;
loop = uv_default_loop();
uv_idle_init(loop, &idle);
uv_idle_start(&idle, next_tick);
timeout = 5000;
ASSERT(idle_cb_called == 0);
ASSERT(timer_cb_called == 0);
ASSERT_EQ(0, uv_timer_init(loop, &timer_handle));
ASSERT_EQ(0, uv_timer_start(&timer_handle, timer_cb, timeout, 0));
uv_run(loop, UV_RUN_DEFAULT);
ASSERT_EQ(0, uv_queue_work(loop, &work, work_cb, after_work_cb));
ASSERT_EQ(0, uv_run(loop, UV_RUN_DEFAULT));
ASSERT(idle_cb_called == 1);
ASSERT(timer_cb_called == 1);
printf("%s async jobs in %.1f seconds (%s/s)\n", fmt(events), timeout / 1000.,
fmt(events / (timeout / 1000.)));
MAKE_VALGRIND_HAPPY();
return 0;

View file

@ -49,7 +49,6 @@ __attribute__((constructor)) void init() {
int ipc_helper(int listen_after_write);
int ipc_helper_heavy_traffic_deadlock_bug(void);
int ipc_helper_tcp_connection(void);
int ipc_helper_closed_handle(void);
int ipc_send_recv_helper(void);
int ipc_helper_bind_twice(void);
int ipc_helper_send_zero(void);
@ -119,10 +118,6 @@ static int maybe_run_test(int argc, char **argv) {
return ipc_helper_tcp_connection();
}
if (strcmp(argv[1], "ipc_helper_closed_handle") == 0) {
return ipc_helper_closed_handle();
}
if (strcmp(argv[1], "ipc_helper_bind_twice") == 0) {
return ipc_helper_bind_twice();
}

View file

@ -333,8 +333,8 @@ int process_wait(process_info_t* vec, int n, int timeout) {
abort();
terminate:
close(args.pipe[0]);
close(args.pipe[1]);
closefd(args.pipe[0]);
closefd(args.pipe[1]);
return retval;
}

View file

@ -1,4 +1,4 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
@ -25,115 +25,55 @@
#include <stdlib.h>
#include <errno.h>
#ifndef HAVE_KQUEUE
# if defined(__APPLE__) || \
defined(__DragonFly__) || \
defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__) || \
defined(__OpenBSD__) || \
defined(__NetBSD__)
# define HAVE_KQUEUE 1
# endif
#if !defined(_WIN32) && !defined(_AIX)
#include <poll.h>
#endif
#ifndef HAVE_EPOLL
# if defined(__linux__)
# define HAVE_EPOLL 1
# endif
#endif
#if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
#if defined(HAVE_KQUEUE)
# include <sys/types.h>
# include <sys/event.h>
# include <sys/time.h>
#endif
#if defined(HAVE_EPOLL)
# include <sys/epoll.h>
#endif
static uv_thread_t embed_thread;
static uv_sem_t embed_sem;
static uv_timer_t embed_timer;
static uv_async_t embed_async;
static volatile int embed_closed;
static int embed_timer_called;
static uv_async_t async;
static uv_barrier_t barrier;
static void embed_thread_runner(void* arg) {
int r;
int fd;
int timeout;
while (!embed_closed) {
fd = uv_backend_fd(uv_default_loop());
timeout = uv_backend_timeout(uv_default_loop());
do {
#if defined(HAVE_KQUEUE)
struct timespec ts;
ts.tv_sec = timeout / 1000;
ts.tv_nsec = (timeout % 1000) * 1000000;
r = kevent(fd, NULL, 0, NULL, 0, &ts);
#elif defined(HAVE_EPOLL)
{
struct epoll_event ev;
r = epoll_wait(fd, &ev, 1, timeout);
}
#endif
} while (r == -1 && errno == EINTR);
uv_async_send(&embed_async);
uv_sem_wait(&embed_sem);
}
static void thread_main(void* arg) {
ASSERT_LE(0, uv_barrier_wait(&barrier));
uv_sleep(250);
ASSERT_EQ(0, uv_async_send(&async));
}
static void embed_cb(uv_async_t* async) {
uv_run(uv_default_loop(), UV_RUN_ONCE);
uv_sem_post(&embed_sem);
static void async_cb(uv_async_t* handle) {
uv_close((uv_handle_t*) handle, NULL);
}
static void embed_timer_cb(uv_timer_t* timer) {
embed_timer_called++;
embed_closed = 1;
uv_close((uv_handle_t*) &embed_async, NULL);
}
#endif
TEST_IMPL(embed) {
#if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
uv_loop_t external;
uv_thread_t thread;
uv_loop_t* loop;
ASSERT(0 == uv_loop_init(&external));
loop = uv_default_loop();
ASSERT_EQ(0, uv_async_init(loop, &async, async_cb));
ASSERT_EQ(0, uv_barrier_init(&barrier, 2));
ASSERT_EQ(0, uv_thread_create(&thread, thread_main, NULL));
ASSERT_LE(0, uv_barrier_wait(&barrier));
embed_timer_called = 0;
embed_closed = 0;
uv_async_init(&external, &embed_async, embed_cb);
/* Start timer in default loop */
uv_timer_init(uv_default_loop(), &embed_timer);
uv_timer_start(&embed_timer, embed_timer_cb, 250, 0);
/* Start worker that will interrupt external loop */
uv_sem_init(&embed_sem, 0);
uv_thread_create(&embed_thread, embed_thread_runner, NULL);
/* But run external loop */
uv_run(&external, UV_RUN_DEFAULT);
uv_thread_join(&embed_thread);
uv_loop_close(&external);
ASSERT(embed_timer_called == 1);
while (uv_loop_alive(loop)) {
#if defined(_WIN32) || defined(_AIX)
ASSERT_LE(0, uv_run(loop, UV_RUN_ONCE));
#else
int rc;
do {
struct pollfd p;
p.fd = uv_backend_fd(loop);
p.events = POLLIN;
p.revents = 0;
rc = poll(&p, 1, uv_backend_timeout(loop));
} while (rc == -1 && errno == EINTR);
ASSERT_LE(0, uv_run(loop, UV_RUN_NOWAIT));
#endif
}
ASSERT_EQ(0, uv_thread_join(&thread));
uv_barrier_destroy(&barrier);
MAKE_VALGRIND_HAPPY();
return 0;
}

View file

@ -334,19 +334,8 @@ static void fs_event_cb_file(uv_fs_event_t* handle, const char* filename,
uv_close((uv_handle_t*)handle, close_cb);
}
static void timer_cb_close_handle(uv_timer_t* timer) {
uv_handle_t* handle;
ASSERT_NOT_NULL(timer);
handle = timer->data;
uv_close((uv_handle_t*)timer, NULL);
uv_close((uv_handle_t*)handle, close_cb);
}
static void fs_event_cb_file_current_dir(uv_fs_event_t* handle,
const char* filename, int events, int status) {
ASSERT(fs_event_cb_called == 0);
++fs_event_cb_called;
ASSERT(handle == &fs_event);
@ -358,13 +347,7 @@ static void fs_event_cb_file_current_dir(uv_fs_event_t* handle,
ASSERT(filename == NULL || strcmp(filename, "watch_file") == 0);
#endif
/* Regression test for SunOS: touch should generate just one event. */
{
static uv_timer_t timer;
uv_timer_init(handle->loop, &timer);
timer.data = handle;
uv_timer_start(&timer, timer_cb_close_handle, 250, 0);
}
uv_close((uv_handle_t*)handle, close_cb);
}
static void timer_cb_file(uv_timer_t* handle) {
@ -738,7 +721,8 @@ TEST_IMPL(fs_event_watch_file_current_dir) {
uv_run(loop, UV_RUN_DEFAULT);
ASSERT(timer_cb_touch_called == 1);
ASSERT(fs_event_cb_called == 1);
/* FSEvents on macOS sometimes sends one change event, sometimes two. */
ASSERT_NE(0, fs_event_cb_called);
ASSERT(close_cb_called == 1);
/* Cleanup */
@ -923,6 +907,44 @@ TEST_IMPL(fs_event_close_with_pending_event) {
return 0;
}
TEST_IMPL(fs_event_close_with_pending_delete_event) {
#if defined(NO_FS_EVENTS)
RETURN_SKIP(NO_FS_EVENTS);
#endif
uv_loop_t* loop;
int r;
loop = uv_default_loop();
create_dir("watch_dir");
create_file("watch_dir/file");
r = uv_fs_event_init(loop, &fs_event);
ASSERT(r == 0);
r = uv_fs_event_start(&fs_event, fs_event_fail, "watch_dir/file", 0);
ASSERT(r == 0);
/* Generate an fs event. */
remove("watch_dir/file");
/* Allow time for the remove event to propagate to the pending list. */
/* XXX - perhaps just for __sun? */
uv_sleep(1100);
uv_update_time(loop);
uv_close((uv_handle_t*)&fs_event, close_cb);
uv_run(loop, UV_RUN_DEFAULT);
ASSERT(close_cb_called == 1);
/* Clean up */
remove("watch_dir/");
MAKE_VALGRIND_HAPPY();
return 0;
}
TEST_IMPL(fs_event_close_in_callback) {
#if defined(NO_FS_EVENTS)
RETURN_SKIP(NO_FS_EVENTS);

View file

@ -851,6 +851,11 @@ static void check_utime(const char* path,
#endif
st_atim = s->st_atim.tv_sec + s->st_atim.tv_nsec / 1e9;
st_mtim = s->st_mtim.tv_sec + s->st_mtim.tv_nsec / 1e9;
/*
* Linux does not allow reading reliably the atime of a symlink
* since readlink() can update it
*/
if (!test_lutime)
ASSERT_DOUBLE_EQ(st_atim, atime);
ASSERT_DOUBLE_EQ(st_mtim, mtime);
}

View file

@ -22,6 +22,10 @@
#include "uv.h"
#include "task.h"
#include <string.h>
#ifndef _WIN32
#include <unistd.h>
#include <sys/types.h>
#endif
TEST_IMPL(get_passwd) {
/* TODO(gengjiawen): Fix test on QEMU. */
@ -64,11 +68,15 @@ TEST_IMPL(get_passwd) {
#endif
#ifdef _WIN32
ASSERT(pwd.uid == -1);
ASSERT(pwd.gid == -1);
ASSERT_EQ(pwd.uid, (unsigned)-1);
ASSERT_EQ(pwd.gid, (unsigned)-1);
#else
ASSERT(pwd.uid >= 0);
ASSERT(pwd.gid >= 0);
ASSERT_NE(pwd.uid, (unsigned)-1);
ASSERT_NE(pwd.gid, (unsigned)-1);
ASSERT_EQ(pwd.uid, geteuid());
if (pwd.uid != 0 && pwd.gid != getgid())
/* This will be likely true, as only root could have changed it. */
ASSERT_EQ(pwd.gid, getegid());
#endif
/* Test uv_os_free_passwd() */

View file

@ -30,8 +30,9 @@ static const int server_port = TEST_PORT;
/* Will be updated right after making the uv_connect_call */
static int connect_port = -1;
static int getsocknamecount = 0;
static int getsocknamecount_tcp = 0;
static int getpeernamecount = 0;
static int getsocknamecount_udp = 0;
static uv_loop_t* loop;
static uv_tcp_t tcp;
@ -131,7 +132,7 @@ static void on_connection(uv_stream_t* server, int status) {
r = uv_tcp_getsockname(handle, &sockname, &namelen);
ASSERT(r == 0);
check_sockname(&sockname, "127.0.0.1", server_port, "accepted socket");
getsocknamecount++;
getsocknamecount_tcp++;
namelen = sizeof peername;
r = uv_tcp_getpeername(handle, &peername, &namelen);
@ -154,7 +155,7 @@ static void on_connect(uv_connect_t* req, int status) {
r = uv_tcp_getsockname((uv_tcp_t*) req->handle, &sockname, &namelen);
ASSERT(r == 0);
check_sockname(&sockname, "127.0.0.1", 0, "connected socket");
getsocknamecount++;
getsocknamecount_tcp++;
namelen = sizeof peername;
r = uv_tcp_getpeername((uv_tcp_t*) req->handle, &peername, &namelen);
@ -197,7 +198,7 @@ static int tcp_listener(void) {
r = uv_tcp_getsockname(&tcpServer, &sockname, &namelen);
ASSERT(r == 0);
check_sockname(&sockname, "0.0.0.0", server_port, "server socket");
getsocknamecount++;
getsocknamecount_tcp++;
namelen = sizeof sockname;
r = uv_tcp_getpeername(&tcpServer, &peername, &namelen);
@ -256,7 +257,7 @@ static void udp_recv(uv_udp_t* handle,
r = uv_udp_getsockname(&udp, &sockname, &namelen);
ASSERT(r == 0);
check_sockname(&sockname, "0.0.0.0", 0, "udp receiving socket");
getsocknamecount++;
getsocknamecount_udp++;
uv_close((uv_handle_t*) &udp, NULL);
uv_close((uv_handle_t*) handle, NULL);
@ -293,7 +294,7 @@ static int udp_listener(void) {
r = uv_udp_getsockname(&udpServer, &sockname, &namelen);
ASSERT(r == 0);
check_sockname(&sockname, "0.0.0.0", server_port, "udp listener socket");
getsocknamecount++;
getsocknamecount_udp++;
r = uv_udp_recv_start(&udpServer, alloc, udp_recv);
ASSERT(r == 0);
@ -333,7 +334,7 @@ TEST_IMPL(getsockname_tcp) {
uv_run(loop, UV_RUN_DEFAULT);
ASSERT(getsocknamecount == 3);
ASSERT(getsocknamecount_tcp == 3);
ASSERT(getpeernamecount == 3);
MAKE_VALGRIND_HAPPY();
@ -351,7 +352,7 @@ TEST_IMPL(getsockname_udp) {
uv_run(loop, UV_RUN_DEFAULT);
ASSERT(getsocknamecount == 2);
ASSERT(getsocknamecount_udp == 2);
ASSERT(udp.send_queue_size == 0);
ASSERT(udpServer.send_queue_size == 0);

View file

@ -97,3 +97,29 @@ TEST_IMPL(idle_starvation) {
MAKE_VALGRIND_HAPPY();
return 0;
}
static void idle_stop(uv_idle_t* handle) {
uv_idle_stop(handle);
}
TEST_IMPL(idle_check) {
ASSERT_EQ(0, uv_idle_init(uv_default_loop(), &idle_handle));
ASSERT_EQ(0, uv_idle_start(&idle_handle, idle_stop));
ASSERT_EQ(0, uv_check_init(uv_default_loop(), &check_handle));
ASSERT_EQ(0, uv_check_start(&check_handle, check_cb));
ASSERT_EQ(1, uv_run(uv_default_loop(), UV_RUN_ONCE));
ASSERT_EQ(1, check_cb_called);
ASSERT_EQ(0, close_cb_called);
uv_close((uv_handle_t*) &idle_handle, close_cb);
uv_close((uv_handle_t*) &check_handle, close_cb);
ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_ONCE));
ASSERT_EQ(2, close_cb_called);
MAKE_VALGRIND_HAPPY();
return 0;
}

View file

@ -308,8 +308,12 @@ static void read_cb(uv_stream_t* handle,
return;
}
ASSERT_GE(nread, 0);
pipe = (uv_pipe_t*) handle;
do {
ASSERT_EQ(pipe, &ctx2.channel);
while (uv_pipe_pending_count(pipe) > 0) {
if (++read_cb_count == 2) {
recv = &ctx2.recv;
write_req = &ctx2.write_req;
@ -318,10 +322,6 @@ static void read_cb(uv_stream_t* handle,
write_req = &ctx2.write_req2;
}
ASSERT(pipe == &ctx2.channel);
ASSERT(nread >= 0);
ASSERT(uv_pipe_pending_count(pipe) > 0);
pending = uv_pipe_pending_type(pipe);
ASSERT(pending == UV_NAMED_PIPE || pending == UV_TCP);
@ -344,7 +344,7 @@ static void read_cb(uv_stream_t* handle,
&recv->stream,
write2_cb);
ASSERT(r == 0);
} while (uv_pipe_pending_count(pipe) > 0);
}
}
static void send_recv_start(void) {

View file

@ -45,8 +45,6 @@ static int close_cb_called;
static int connection_accepted;
static int tcp_conn_read_cb_called;
static int tcp_conn_write_cb_called;
static int closed_handle_data_read;
static int closed_handle_write;
static int send_zero_write;
typedef struct {
@ -57,15 +55,6 @@ typedef struct {
#define CONN_COUNT 100
#define BACKLOG 128
#define LARGE_SIZE 100000
static uv_buf_t large_buf;
static char buffer[LARGE_SIZE];
static uv_write_t write_reqs[300];
static int write_reqs_completed;
static unsigned int write_until_data_queued(void);
static void send_handle_and_close(void);
static void close_server_conn_cb(uv_handle_t* handle) {
@ -417,26 +406,6 @@ static void on_read_connection(uv_stream_t* handle,
}
#ifndef _WIN32
static void on_read_closed_handle(uv_stream_t* handle,
ssize_t nread,
const uv_buf_t* buf) {
if (nread == 0 || nread == UV_EOF) {
free(buf->base);
return;
}
if (nread < 0) {
printf("error recving on channel: %s\n", uv_strerror(nread));
abort();
}
closed_handle_data_read += nread;
free(buf->base);
}
#endif
static void on_read_send_zero(uv_stream_t* handle,
ssize_t nread,
const uv_buf_t* buf) {
@ -498,15 +467,6 @@ TEST_IMPL(ipc_tcp_connection) {
return r;
}
#ifndef _WIN32
TEST_IMPL(ipc_closed_handle) {
int r;
r = run_ipc_test("ipc_helper_closed_handle", on_read_closed_handle);
ASSERT_EQ(r, 0);
return 0;
}
#endif
#ifdef _WIN32
TEST_IMPL(listen_with_simultaneous_accepts) {
@ -602,23 +562,6 @@ static void tcp_connection_write_cb(uv_write_t* req, int status) {
}
static void closed_handle_large_write_cb(uv_write_t* req, int status) {
ASSERT_EQ(status, 0);
ASSERT(closed_handle_data_read = LARGE_SIZE);
if (++write_reqs_completed == ARRAY_SIZE(write_reqs)) {
write_reqs_completed = 0;
if (write_until_data_queued() > 0)
send_handle_and_close();
}
}
static void closed_handle_write_cb(uv_write_t* req, int status) {
ASSERT_EQ(status, UV_EBADF);
closed_handle_write = 1;
}
static void send_zero_write_cb(uv_write_t* req, int status) {
ASSERT_EQ(status, 0);
send_zero_write++;
@ -835,76 +778,6 @@ int ipc_helper_tcp_connection(void) {
return 0;
}
static unsigned int write_until_data_queued() {
unsigned int i;
int r;
i = 0;
do {
r = uv_write(&write_reqs[i],
(uv_stream_t*)&channel,
&large_buf,
1,
closed_handle_large_write_cb);
ASSERT_EQ(r, 0);
i++;
} while (channel.write_queue_size == 0 &&
i < ARRAY_SIZE(write_reqs));
return channel.write_queue_size;
}
static void send_handle_and_close() {
int r;
struct sockaddr_in addr;
r = uv_tcp_init(uv_default_loop(), &tcp_server);
ASSERT_EQ(r, 0);
ASSERT_EQ(0, uv_ip4_addr("0.0.0.0", TEST_PORT, &addr));
r = uv_tcp_bind(&tcp_server, (const struct sockaddr*) &addr, 0);
ASSERT_EQ(r, 0);
r = uv_write2(&write_req,
(uv_stream_t*)&channel,
&large_buf,
1,
(uv_stream_t*)&tcp_server,
closed_handle_write_cb);
ASSERT_EQ(r, 0);
uv_close((uv_handle_t*)&tcp_server, NULL);
}
int ipc_helper_closed_handle(void) {
int r;
memset(buffer, '.', LARGE_SIZE);
large_buf = uv_buf_init(buffer, LARGE_SIZE);
r = uv_pipe_init(uv_default_loop(), &channel, 1);
ASSERT_EQ(r, 0);
uv_pipe_open(&channel, 0);
ASSERT_EQ(1, uv_is_readable((uv_stream_t*) &channel));
ASSERT_EQ(1, uv_is_writable((uv_stream_t*) &channel));
ASSERT_EQ(0, uv_is_closing((uv_handle_t*) &channel));
if (write_until_data_queued() > 0)
send_handle_and_close();
r = uv_run(uv_default_loop(), UV_RUN_DEFAULT);
ASSERT_EQ(r, 0);
ASSERT_EQ(closed_handle_write, 1);
MAKE_VALGRIND_HAPPY();
return 0;
}
int ipc_helper_bind_twice(void) {
/*
* This is launched from test-ipc.c. stdin is a duplex channel

View file

@ -22,7 +22,6 @@
#include "uv.h"
TEST_DECLARE (platform_output)
TEST_DECLARE (callback_order)
TEST_DECLARE (close_order)
TEST_DECLARE (run_once)
TEST_DECLARE (run_nowait)
@ -91,9 +90,6 @@ TEST_DECLARE (ipc_send_recv_tcp)
TEST_DECLARE (ipc_send_recv_tcp_inprocess)
TEST_DECLARE (ipc_tcp_connection)
TEST_DECLARE (ipc_send_zero)
#ifndef _WIN32
TEST_DECLARE (ipc_closed_handle)
#endif
TEST_DECLARE (tcp_alloc_cb_fail)
TEST_DECLARE (tcp_ping_pong)
TEST_DECLARE (tcp_ping_pong_vec)
@ -126,15 +122,18 @@ TEST_DECLARE (tcp_bind_error_inval)
TEST_DECLARE (tcp_bind_localhost_ok)
TEST_DECLARE (tcp_bind_invalid_flags)
TEST_DECLARE (tcp_bind_writable_flags)
TEST_DECLARE (tcp_bind_or_listen_error_after_close)
TEST_DECLARE (tcp_listen_without_bind)
TEST_DECLARE (tcp_connect_error_fault)
TEST_DECLARE (tcp_connect_timeout)
TEST_DECLARE (tcp_local_connect_timeout)
TEST_DECLARE (tcp6_local_connect_timeout)
TEST_DECLARE (tcp_close_while_connecting)
TEST_DECLARE (tcp_close_after_read_timeout)
TEST_DECLARE (tcp_close)
TEST_DECLARE (tcp_close_reset_accepted)
TEST_DECLARE (tcp_close_reset_accepted_after_shutdown)
TEST_DECLARE (tcp_close_reset_accepted_after_socket_shutdown)
TEST_DECLARE (tcp_close_reset_client)
TEST_DECLARE (tcp_close_reset_client_after_shutdown)
TEST_DECLARE (tcp_create_early)
@ -150,6 +149,7 @@ TEST_DECLARE (tcp_write_to_half_open_connection)
TEST_DECLARE (tcp_unexpected_read)
TEST_DECLARE (tcp_read_stop)
TEST_DECLARE (tcp_read_stop_start)
TEST_DECLARE (tcp_rst)
TEST_DECLARE (tcp_bind6_error_addrinuse)
TEST_DECLARE (tcp_bind6_error_addrnotavail)
TEST_DECLARE (tcp_bind6_error_fault)
@ -194,6 +194,7 @@ TEST_DECLARE (pipe_bind_error_addrnotavail)
TEST_DECLARE (pipe_bind_error_inval)
TEST_DECLARE (pipe_connect_multiple)
TEST_DECLARE (pipe_listen_without_bind)
TEST_DECLARE (pipe_bind_or_listen_error_after_close)
TEST_DECLARE (pipe_connect_bad_name)
TEST_DECLARE (pipe_connect_to_file)
TEST_DECLARE (pipe_connect_on_prepare)
@ -227,6 +228,7 @@ TEST_DECLARE (timer_is_closing)
TEST_DECLARE (timer_null_callback)
TEST_DECLARE (timer_early_check)
TEST_DECLARE (idle_starvation)
TEST_DECLARE (idle_check)
TEST_DECLARE (loop_handles)
TEST_DECLARE (get_loadavg)
TEST_DECLARE (walk_handles)
@ -320,6 +322,8 @@ TEST_DECLARE (spawn_reads_child_path)
TEST_DECLARE (spawn_inherit_streams)
TEST_DECLARE (spawn_quoted_path)
TEST_DECLARE (spawn_tcp_server)
TEST_DECLARE (spawn_exercise_sigchld_issue)
TEST_DECLARE (spawn_relative_path)
TEST_DECLARE (fs_poll)
TEST_DECLARE (fs_poll_getpath)
TEST_DECLARE (fs_poll_close_request)
@ -388,6 +392,7 @@ TEST_DECLARE (fs_event_no_callback_after_close)
TEST_DECLARE (fs_event_no_callback_on_close)
TEST_DECLARE (fs_event_immediate_close)
TEST_DECLARE (fs_event_close_with_pending_event)
TEST_DECLARE (fs_event_close_with_pending_delete_event)
TEST_DECLARE (fs_event_close_in_callback)
TEST_DECLARE (fs_event_start_and_close)
TEST_DECLARE (fs_event_error_reporting)
@ -424,6 +429,7 @@ TEST_DECLARE (fs_invalid_mkdir_name)
#endif
TEST_DECLARE (fs_get_system_error)
TEST_DECLARE (strscpy)
TEST_DECLARE (strtok)
TEST_DECLARE (threadpool_queue_work_simple)
TEST_DECLARE (threadpool_queue_work_einval)
TEST_DECLARE (threadpool_multiple_event_loops)
@ -543,9 +549,6 @@ TEST_DECLARE (metrics_idle_time_zero)
TASK_LIST_START
TEST_ENTRY_CUSTOM (platform_output, 0, 1, 5000)
#if 0
TEST_ENTRY (callback_order)
#endif
TEST_ENTRY (test_macros)
TEST_ENTRY (close_order)
TEST_ENTRY (run_once)
@ -627,9 +630,6 @@ TASK_LIST_START
TEST_ENTRY (ipc_send_recv_tcp_inprocess)
TEST_ENTRY (ipc_tcp_connection)
TEST_ENTRY (ipc_send_zero)
#ifndef _WIN32
TEST_ENTRY (ipc_closed_handle)
#endif
TEST_ENTRY (tcp_alloc_cb_fail)
@ -700,15 +700,18 @@ TASK_LIST_START
TEST_ENTRY (tcp_bind_localhost_ok)
TEST_ENTRY (tcp_bind_invalid_flags)
TEST_ENTRY (tcp_bind_writable_flags)
TEST_ENTRY (tcp_bind_or_listen_error_after_close)
TEST_ENTRY (tcp_listen_without_bind)
TEST_ENTRY (tcp_connect_error_fault)
TEST_ENTRY (tcp_connect_timeout)
TEST_ENTRY (tcp_local_connect_timeout)
TEST_ENTRY (tcp6_local_connect_timeout)
TEST_ENTRY (tcp_close_while_connecting)
TEST_ENTRY (tcp_close_after_read_timeout)
TEST_ENTRY (tcp_close)
TEST_ENTRY (tcp_close_reset_accepted)
TEST_ENTRY (tcp_close_reset_accepted_after_shutdown)
TEST_ENTRY (tcp_close_reset_accepted_after_socket_shutdown)
TEST_ENTRY (tcp_close_reset_client)
TEST_ENTRY (tcp_close_reset_client_after_shutdown)
TEST_ENTRY (tcp_create_early)
@ -728,6 +731,9 @@ TASK_LIST_START
TEST_ENTRY (tcp_read_stop_start)
TEST_ENTRY (tcp_rst)
TEST_HELPER (tcp_rst, tcp4_echo_server)
TEST_ENTRY (tcp_bind6_error_addrinuse)
TEST_ENTRY (tcp_bind6_error_addrnotavail)
TEST_ENTRY (tcp_bind6_error_fault)
@ -774,6 +780,7 @@ TASK_LIST_START
TEST_ENTRY (pipe_bind_error_inval)
TEST_ENTRY (pipe_connect_multiple)
TEST_ENTRY (pipe_listen_without_bind)
TEST_ENTRY (pipe_bind_or_listen_error_after_close)
TEST_ENTRY (pipe_getsockname)
TEST_ENTRY (pipe_getsockname_abstract)
TEST_ENTRY (pipe_getsockname_blocking)
@ -819,6 +826,7 @@ TASK_LIST_START
TEST_ENTRY (timer_early_check)
TEST_ENTRY (idle_starvation)
TEST_ENTRY (idle_check)
TEST_ENTRY (ref)
TEST_ENTRY (idle_ref)
@ -950,6 +958,8 @@ TASK_LIST_START
TEST_ENTRY (spawn_inherit_streams)
TEST_ENTRY (spawn_quoted_path)
TEST_ENTRY (spawn_tcp_server)
TEST_ENTRY (spawn_exercise_sigchld_issue)
TEST_ENTRY (spawn_relative_path)
TEST_ENTRY (fs_poll)
TEST_ENTRY (fs_poll_getpath)
TEST_ENTRY (fs_poll_close_request)
@ -1052,6 +1062,7 @@ TASK_LIST_START
TEST_ENTRY (fs_event_no_callback_on_close)
TEST_ENTRY (fs_event_immediate_close)
TEST_ENTRY (fs_event_close_with_pending_event)
TEST_ENTRY (fs_event_close_with_pending_delete_event)
TEST_ENTRY (fs_event_close_in_callback)
TEST_ENTRY (fs_event_start_and_close)
TEST_ENTRY_CUSTOM (fs_event_error_reporting, 0, 0, 60000)
@ -1088,6 +1099,7 @@ TASK_LIST_START
TEST_ENTRY (get_osfhandle_valid_handle)
TEST_ENTRY (open_osfhandle_valid_handle)
TEST_ENTRY (strscpy)
TEST_ENTRY (strtok)
TEST_ENTRY (threadpool_queue_work_simple)
TEST_ENTRY (threadpool_queue_work_einval)
TEST_ENTRY_CUSTOM (threadpool_multiple_event_loops, 0, 0, 60000)

View file

@ -28,7 +28,7 @@ TEST_IMPL(loop_update_time) {
start = uv_now(uv_default_loop());
while (uv_now(uv_default_loop()) - start < 1000)
ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_NOWAIT));
ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_NOWAIT));
MAKE_VALGRIND_HAPPY();
return 0;
@ -43,20 +43,26 @@ TEST_IMPL(loop_backend_timeout) {
uv_timer_t timer;
int r;
r = uv_timer_init(loop, &timer);
ASSERT(r == 0);
/* The default loop has some internal watchers to initialize. */
loop->active_handles++;
r = uv_run(loop, UV_RUN_NOWAIT);
ASSERT_EQ(r, 1);
loop->active_handles--;
ASSERT_EQ(uv_loop_alive(loop), 0);
ASSERT(!uv_loop_alive(loop));
ASSERT(uv_backend_timeout(loop) == 0);
r = uv_timer_init(loop, &timer);
ASSERT_EQ(r, 0);
ASSERT_EQ(uv_loop_alive(loop), 0);
ASSERT_EQ(uv_backend_timeout(loop), 0);
r = uv_timer_start(&timer, cb, 1000, 0); /* 1 sec */
ASSERT(r == 0);
ASSERT(uv_backend_timeout(loop) > 100); /* 0.1 sec */
ASSERT(uv_backend_timeout(loop) <= 1000); /* 1 sec */
ASSERT_EQ(r, 0);
ASSERT_EQ(uv_backend_timeout(loop), 1000);
r = uv_run(loop, UV_RUN_DEFAULT);
ASSERT(r == 0);
ASSERT(uv_backend_timeout(loop) == 0);
ASSERT_EQ(r, 0);
ASSERT_EQ(uv_backend_timeout(loop), 0);
MAKE_VALGRIND_HAPPY();
return 0;

View file

@ -137,3 +137,19 @@ TEST_IMPL(pipe_listen_without_bind) {
MAKE_VALGRIND_HAPPY();
return 0;
}
TEST_IMPL(pipe_bind_or_listen_error_after_close) {
uv_pipe_t server;
ASSERT_EQ(uv_pipe_init(uv_default_loop(), &server, 0), 0);
uv_close((uv_handle_t*) &server, NULL);
ASSERT_EQ(uv_pipe_bind(&server, TEST_PIPENAME), UV_EINVAL);
ASSERT_EQ(uv_listen((uv_stream_t*) &server, SOMAXCONN, NULL), UV_EINVAL);
ASSERT_EQ(uv_run(uv_default_loop(), UV_RUN_DEFAULT), 0);
MAKE_VALGRIND_HAPPY();
return 0;
}

View file

@ -46,11 +46,7 @@ static void thread_main(void* arg) {
uv_fs_req_cleanup(&req);
} while (n > 0 || (n == -1 && uv_errno == UV_EINTR));
#ifdef _WIN32
ASSERT(n == UV_EOF);
#else
ASSERT(n == 0);
#endif
}
@ -106,8 +102,7 @@ TEST_IMPL(pipe_set_non_blocking) {
ASSERT(n == UV_EAGAIN); /* E_NOTIMPL */
ASSERT(0 == uv_write(&write_req, (uv_stream_t*) &pipe_handle, &buf, 1, write_cb));
ASSERT_NOT_NULL(write_req.handle);
ASSERT(1 == uv_run(uv_default_loop(), UV_RUN_ONCE)); /* queue write_cb */
ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_ONCE)); /* process write_cb */
ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_ONCE));
ASSERT_NULL(write_req.handle); /* check for signaled completion of write_cb */
n = buf.len;
#endif

View file

@ -41,6 +41,7 @@ TEST_IMPL(platform_output) {
uv_interface_address_t* interfaces;
uv_passwd_t pwd;
uv_utsname_t uname;
unsigned par;
int count;
int i;
int err;
@ -88,6 +89,10 @@ TEST_IMPL(platform_output) {
printf(" maximum resident set size: %llu\n",
(unsigned long long) rusage.ru_maxrss);
par = uv_available_parallelism();
ASSERT_GE(par, 1);
printf("uv_available_parallelism: %u\n", par);
err = uv_cpu_info(&cpus, &count);
#if defined(__CYGWIN__) || defined(__MSYS__)
ASSERT(err == UV_ENOSYS);

View file

@ -1675,9 +1675,6 @@ TEST_IMPL(closed_fd_events) {
ASSERT(req.result == 1);
uv_fs_req_cleanup(&req);
#ifdef _WIN32
ASSERT(1 == uv_run(uv_default_loop(), UV_RUN_ONCE));
#endif
ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_ONCE));
/* should have received just one byte */
@ -1891,6 +1888,44 @@ TEST_IMPL(spawn_quoted_path) {
#endif
}
TEST_IMPL(spawn_exercise_sigchld_issue) {
int r;
int i;
uv_process_options_t dummy_options = {0};
uv_process_t dummy_processes[100];
char* args[2];
init_process_options("spawn_helper1", exit_cb);
r = uv_spawn(uv_default_loop(), &process, &options);
ASSERT_EQ(r, 0);
// This test exercises a bug in the darwin kernel that causes SIGCHLD not to
// be delivered sometimes. Calling posix_spawn many times increases the
// likelihood of encountering this issue, so spin a few times to make this
// test more reliable.
dummy_options.file = args[0] = "program-that-had-better-not-exist";
args[1] = NULL;
dummy_options.args = args;
dummy_options.exit_cb = fail_cb;
dummy_options.flags = 0;
for (i = 0; i < 100; i++) {
r = uv_spawn(uv_default_loop(), &dummy_processes[i], &dummy_options);
if (r != UV_ENOENT)
ASSERT_EQ(r, UV_EACCES);
uv_close((uv_handle_t*) &dummy_processes[i], close_cb);
}
r = uv_run(uv_default_loop(), UV_RUN_DEFAULT);
ASSERT_EQ(r, 0);
ASSERT_EQ(exit_cb_called, 1);
ASSERT_EQ(close_cb_called, 101);
MAKE_VALGRIND_HAPPY();
return 0;
}
/* Helper for child process of spawn_inherit_streams */
#ifndef _WIN32
void spawn_stdin_stdout(void) {
@ -1943,3 +1978,37 @@ void spawn_stdin_stdout(void) {
}
}
#endif /* !_WIN32 */
TEST_IMPL(spawn_relative_path) {
char* sep;
init_process_options("spawn_helper1", exit_cb);
exepath_size = sizeof(exepath) - 2;
ASSERT_EQ(0, uv_exepath(exepath, &exepath_size));
exepath[exepath_size] = '\0';
/* Poor man's basename(3). */
sep = strrchr(exepath, '/');
if (sep == NULL)
sep = strrchr(exepath, '\\');
ASSERT_NOT_NULL(sep);
/* Split into dirname and basename and make basename relative. */
memmove(sep + 2, sep, 1 + strlen(sep));
sep[0] = '\0';
sep[1] = '.';
sep[2] = '/';
options.cwd = exepath;
options.file = options.args[0] = sep + 1;
ASSERT_EQ(0, uv_spawn(uv_default_loop(), &process, &options));
ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT));
ASSERT_EQ(1, exit_cb_called);
ASSERT_EQ(1, close_cb_called);
MAKE_VALGRIND_HAPPY();
return 0;
}

90
deps/uv/test/test-strtok.c vendored Normal file
View file

@ -0,0 +1,90 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "task.h"
#include <string.h>
#include "../src/strtok.h"
#include "../src/strtok.c"
struct strtok_test_case {
const char* str;
const char* sep;
};
const char* tokens[] = {
"abc",
NULL,
"abc",
"abf",
NULL,
"This",
"is.a",
"test",
"of",
"the",
"string",
"tokenizer",
"function.",
NULL,
"Hello",
"This-is-a-nice",
"-string",
NULL
};
#define ASSERT_STRCMP(x, y) \
ASSERT((x != NULL && y != NULL && strcmp(x, y) == 0) || (x == y && x == NULL))
TEST_IMPL(strtok) {
struct strtok_test_case tests[] = {
{ "abc", "" },
{ "abc.abf", "." },
{ "This;is.a:test:of=the/string\\tokenizer-function.", "\\/:;=-" },
{ "Hello This-is-a-nice.-string", " ." },
};
size_t tokens_len = ARRAY_SIZE(tokens);
size_t tests_len = ARRAY_SIZE(tests);
size_t i;
size_t j;
char* itr;
char* tok_r;
char current_test[2048];
for (i = 0, j = 0; i < tests_len; i += 1) {
ASSERT(j < tokens_len);
snprintf(current_test, sizeof(current_test), "%s", tests[i].str);
tok_r = uv__strtok(current_test, tests[i].sep, &itr);
ASSERT_STRCMP(tok_r, tokens[j]);
j++;
while (tok_r) {
ASSERT(j < tokens_len);
tok_r = uv__strtok(NULL, tests[i].sep, &itr);
ASSERT_STRCMP(tok_r, tokens[j]);
j++;
}
}
return 0;
}

View file

@ -297,3 +297,21 @@ TEST_IMPL(tcp_bind_writable_flags) {
MAKE_VALGRIND_HAPPY();
return 0;
}
TEST_IMPL(tcp_bind_or_listen_error_after_close) {
uv_tcp_t tcp;
struct sockaddr_in addr;
memset(&addr, 0, sizeof(addr));
addr.sin_addr.s_addr = htonl(INADDR_ANY);
addr.sin_port = htons(9999);
addr.sin_family = AF_INET;
ASSERT_EQ(uv_tcp_init(uv_default_loop(), &tcp), 0);
uv_close((uv_handle_t*) &tcp, NULL);
ASSERT_EQ(uv_tcp_bind(&tcp, (struct sockaddr*) &addr, 0), UV_EINVAL);
ASSERT_EQ(uv_listen((uv_stream_t*) &tcp, 5, NULL), UV_EINVAL);
ASSERT_EQ(uv_run(uv_default_loop(), UV_RUN_DEFAULT), 0);
MAKE_VALGRIND_HAPPY();
return 0;
}

View file

@ -0,0 +1,183 @@
/* Copyright libuv project and contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "task.h"
static uv_tcp_t client;
static uv_tcp_t connection;
static uv_connect_t connect_req;
static uv_timer_t timer;
static int read_cb_called;
static int on_close_called;
static void on_connection(uv_stream_t* server, int status);
static void on_client_connect(uv_connect_t* req, int status);
static void on_client_alloc(uv_handle_t* handle,
size_t suggested_size,
uv_buf_t* buf);
static void on_client_read(uv_stream_t* stream,
ssize_t nread,
const uv_buf_t* buf);
static void on_client_timeout(uv_timer_t* handle);
static void on_close(uv_handle_t* handle);
static void on_client_connect(uv_connect_t* conn_req, int status) {
int r;
r = uv_read_start((uv_stream_t*) &client, on_client_alloc, on_client_read);
ASSERT_EQ(r, 0);
r = uv_timer_start(&timer, on_client_timeout, 1000, 0);
ASSERT_EQ(r, 0);
}
static void on_client_alloc(uv_handle_t* handle,
size_t suggested_size,
uv_buf_t* buf) {
static char slab[8];
buf->base = slab;
buf->len = sizeof(slab);
}
static void on_client_read(uv_stream_t* stream, ssize_t nread,
const uv_buf_t* buf) {
ASSERT_LT(nread, 0);
read_cb_called++;
}
static void on_client_timeout(uv_timer_t* handle) {
ASSERT_EQ(handle, &timer);
ASSERT_EQ(read_cb_called, 0);
uv_read_stop((uv_stream_t*) &client);
uv_close((uv_handle_t*) &client, on_close);
uv_close((uv_handle_t*) &timer, on_close);
}
static void on_connection_alloc(uv_handle_t* handle,
size_t suggested_size,
uv_buf_t* buf) {
static char slab[8];
buf->base = slab;
buf->len = sizeof(slab);
}
static void on_connection_read(uv_stream_t* stream,
ssize_t nread,
const uv_buf_t* buf) {
ASSERT_EQ(nread, UV_EOF);
read_cb_called++;
uv_close((uv_handle_t*) stream, on_close);
}
static void on_connection(uv_stream_t* server, int status) {
int r;
ASSERT_EQ(status, 0);
ASSERT_EQ(uv_accept(server, (uv_stream_t*) &connection), 0);
r = uv_read_start((uv_stream_t*) &connection,
on_connection_alloc,
on_connection_read);
ASSERT_EQ(r, 0);
}
static void on_close(uv_handle_t* handle) {
ASSERT(handle == (uv_handle_t*) &client ||
handle == (uv_handle_t*) &connection ||
handle == (uv_handle_t*) &timer);
on_close_called++;
}
static void start_server(uv_loop_t* loop, uv_tcp_t* handle) {
struct sockaddr_in addr;
int r;
ASSERT_EQ(uv_ip4_addr("127.0.0.1", TEST_PORT, &addr), 0);
r = uv_tcp_init(loop, handle);
ASSERT_EQ(r, 0);
r = uv_tcp_bind(handle, (const struct sockaddr*) &addr, 0);
ASSERT_EQ(r, 0);
r = uv_listen((uv_stream_t*) handle, 128, on_connection);
ASSERT_EQ(r, 0);
uv_unref((uv_handle_t*) handle);
}
/* Check that pending write requests have their callbacks
* invoked when the handle is closed.
*/
TEST_IMPL(tcp_close_after_read_timeout) {
struct sockaddr_in addr;
uv_tcp_t tcp_server;
uv_loop_t* loop;
int r;
ASSERT_EQ(uv_ip4_addr("127.0.0.1", TEST_PORT, &addr), 0);
loop = uv_default_loop();
/* We can't use the echo server, it doesn't handle ECONNRESET. */
start_server(loop, &tcp_server);
r = uv_tcp_init(loop, &client);
ASSERT_EQ(r, 0);
r = uv_tcp_connect(&connect_req,
&client,
(const struct sockaddr*) &addr,
on_client_connect);
ASSERT_EQ(r, 0);
r = uv_tcp_init(loop, &connection);
ASSERT_EQ(r, 0);
r = uv_timer_init(loop, &timer);
ASSERT_EQ(r, 0);
ASSERT_EQ(read_cb_called, 0);
ASSERT_EQ(on_close_called, 0);
r = uv_run(loop, UV_RUN_DEFAULT);
ASSERT_EQ(r, 0);
ASSERT_EQ(read_cb_called, 1);
ASSERT_EQ(on_close_called, 3);
MAKE_VALGRIND_HAPPY();
return 0;
}

View file

@ -25,6 +25,12 @@
#include <errno.h>
#include <string.h> /* memset */
#ifdef _WIN32
# define INVALID_FD (INVALID_HANDLE_VALUE)
#else
# define INVALID_FD (-1)
#endif
static uv_loop_t* loop;
static uv_tcp_t tcp_server;
static uv_tcp_t tcp_client;
@ -62,9 +68,22 @@ static void do_write(uv_tcp_t* handle) {
static void do_close(uv_tcp_t* handle) {
uv_os_fd_t fd;
int r;
if (shutdown_before_close == 1) {
ASSERT(0 == uv_shutdown(&shutdown_req, (uv_stream_t*) handle, shutdown_cb));
ASSERT(UV_EINVAL == uv_tcp_close_reset(handle, close_cb));
} else if (shutdown_before_close == 2) {
r = uv_fileno((const uv_handle_t*) handle, &fd);
ASSERT_EQ(r, 0);
ASSERT_NE(fd, INVALID_FD);
#ifdef _WIN32
ASSERT_EQ(0, shutdown(fd, SD_BOTH));
#else
ASSERT_EQ(0, shutdown(fd, SHUT_RDWR));
#endif
ASSERT_EQ(0, uv_tcp_close_reset(handle, close_cb));
} else {
ASSERT(0 == uv_tcp_close_reset(handle, close_cb));
ASSERT(UV_ENOTCONN == uv_shutdown(&shutdown_req, (uv_stream_t*) handle, shutdown_cb));
@ -288,3 +307,30 @@ TEST_IMPL(tcp_close_reset_accepted_after_shutdown) {
MAKE_VALGRIND_HAPPY();
return 0;
}
TEST_IMPL(tcp_close_reset_accepted_after_socket_shutdown) {
int r;
loop = uv_default_loop();
start_server(loop, &tcp_server);
client_close = 0;
shutdown_before_close = 2;
do_connect(loop, &tcp_client);
ASSERT_EQ(write_cb_called, 0);
ASSERT_EQ(close_cb_called, 0);
ASSERT_EQ(shutdown_cb_called, 0);
r = uv_run(loop, UV_RUN_DEFAULT);
ASSERT_EQ(r, 0);
ASSERT_EQ(write_cb_called, 4);
ASSERT_EQ(close_cb_called, 1);
ASSERT_EQ(shutdown_cb_called, 0);
MAKE_VALGRIND_HAPPY();
return 0;
}

107
deps/uv/test/test-tcp-rst.c vendored Normal file
View file

@ -0,0 +1,107 @@
/* Copyright libuv project and contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "task.h"
static uv_tcp_t tcp;
static uv_connect_t connect_req;
static uv_buf_t qbuf;
static int called_alloc_cb;
static int called_connect_cb;
static int called_close_cb;
static void close_cb(uv_handle_t* handle) {
ASSERT(handle == (uv_handle_t*) &tcp);
called_close_cb++;
}
static void alloc_cb(uv_handle_t* handle, size_t size, uv_buf_t* buf) {
buf->base = malloc(size);
buf->len = size;
called_alloc_cb++;
}
static void read_cb(uv_stream_t* t, ssize_t nread, const uv_buf_t* buf) {
ASSERT_PTR_EQ((uv_tcp_t*) t, &tcp);
ASSERT_EQ(nread, UV_ECONNRESET);
int fd;
ASSERT_EQ(0, uv_fileno((uv_handle_t*) t, &fd));
uv_handle_type type = uv_guess_handle(fd);
ASSERT_EQ(type, UV_TCP);
uv_close((uv_handle_t *) t, close_cb);
free(buf->base);
}
static void connect_cb(uv_connect_t *req, int status) {
ASSERT_EQ(status, 0);
ASSERT_PTR_EQ(req, &connect_req);
/* Start reading from the connection so we receive the RST in uv__read. */
ASSERT_EQ(0, uv_read_start((uv_stream_t*) &tcp, alloc_cb, read_cb));
/* Write 'QSH' to receive RST from the echo server. */
ASSERT_EQ(qbuf.len, uv_try_write((uv_stream_t*) &tcp, &qbuf, 1));
called_connect_cb++;
}
/*
* This test has a client which connects to the echo_server and receives TCP
* RST. Test checks that uv_guess_handle still works on a reset TCP handle.
*/
TEST_IMPL(tcp_rst) {
#ifndef _WIN32
struct sockaddr_in server_addr;
int r;
qbuf.base = "QSH";
qbuf.len = 3;
ASSERT_EQ(0, uv_ip4_addr("127.0.0.1", TEST_PORT, &server_addr));
r = uv_tcp_init(uv_default_loop(), &tcp);
ASSERT_EQ(r, 0);
r = uv_tcp_connect(&connect_req,
&tcp,
(const struct sockaddr*) &server_addr,
connect_cb);
ASSERT_EQ(r, 0);
uv_run(uv_default_loop(), UV_RUN_DEFAULT);
ASSERT_EQ(called_alloc_cb, 1);
ASSERT_EQ(called_connect_cb, 1);
ASSERT_EQ(called_close_cb, 1);
MAKE_VALGRIND_HAPPY();
return 0;
#else
RETURN_SKIP("Unix only test");
#endif
}

View file

@ -273,6 +273,11 @@ TEST_IMPL(thread_stack_size_explicit) {
thread_check_stack, &options));
ASSERT(0 == uv_thread_join(&thread));
options.stack_size = 42;
ASSERT(0 == uv_thread_create_ex(&thread, &options,
thread_check_stack, &options));
ASSERT(0 == uv_thread_join(&thread));
#ifdef PTHREAD_STACK_MIN
options.stack_size = PTHREAD_STACK_MIN - 42; /* unaligned size */
ASSERT(0 == uv_thread_create_ex(&thread, &options,

View file

@ -25,6 +25,8 @@
static int once_cb_called = 0;
static int once_close_cb_called = 0;
static int twice_cb_called = 0;
static int twice_close_cb_called = 0;
static int repeat_cb_called = 0;
static int repeat_close_cb_called = 0;
static int order_cb_called = 0;
@ -58,6 +60,27 @@ static void once_cb(uv_timer_t* handle) {
uv_update_time(uv_default_loop());
}
static void twice_close_cb(uv_handle_t* handle) {
printf("TWICE_CLOSE_CB\n");
ASSERT_NOT_NULL(handle);
ASSERT(0 == uv_is_active(handle));
twice_close_cb_called++;
}
static void twice_cb(uv_timer_t* handle) {
printf("TWICE_CB %d\n", twice_cb_called);
ASSERT_NOT_NULL(handle);
ASSERT(0 == uv_is_active((uv_handle_t*) handle));
twice_cb_called++;
uv_close((uv_handle_t*)handle, twice_close_cb);
}
static void repeat_close_cb(uv_handle_t* handle) {
printf("REPEAT_CLOSE_CB\n");
@ -144,12 +167,12 @@ TEST_IMPL(timer_start_twice) {
ASSERT(r == 0);
r = uv_timer_start(&once, never_cb, 86400 * 1000, 0);
ASSERT(r == 0);
r = uv_timer_start(&once, once_cb, 10, 0);
r = uv_timer_start(&once, twice_cb, 10, 0);
ASSERT(r == 0);
r = uv_run(uv_default_loop(), UV_RUN_DEFAULT);
ASSERT(r == 0);
ASSERT(once_cb_called == 1);
ASSERT(twice_cb_called == 1);
MAKE_VALGRIND_HAPPY();
return 0;

View file

@ -98,10 +98,6 @@ static void sv_recv_cb(uv_udp_t* handle,
TEST_IMPL(udp_connect) {
#if defined(__PASE__)
RETURN_SKIP(
"IBMi PASE's UDP connection can not be disconnected with AF_UNSPEC.");
#endif
uv_udp_send_t req;
struct sockaddr_in ext_addr;
struct sockaddr_in tmp_addr;

View file

@ -98,10 +98,6 @@ static void sv_recv_cb(uv_udp_t* handle,
TEST_IMPL(udp_connect6) {
#if defined(__PASE__)
RETURN_SKIP(
"IBMi PASE's UDP connection can not be disconnected with AF_UNSPEC.");
#endif
uv_udp_send_t req;
struct sockaddr_in6 ext_addr;
struct sockaddr_in6 tmp_addr;

View file

@ -29,14 +29,15 @@
#define CHECK_HANDLE(handle) \
ASSERT((uv_udp_t*)(handle) == &recver || (uv_udp_t*)(handle) == &sender)
#define BUFFER_MULTIPLIER 4
#define BUFFER_MULTIPLIER 20
#define MAX_DGRAM_SIZE (64 * 1024)
#define NUM_SENDS 8
#define NUM_SENDS 40
#define EXPECTED_MMSG_ALLOCS (NUM_SENDS / BUFFER_MULTIPLIER)
static uv_udp_t recver;
static uv_udp_t sender;
static int recv_cb_called;
static int received_datagrams;
static int close_cb_called;
static int alloc_cb_called;
@ -82,14 +83,20 @@ static void recv_cb(uv_udp_t* handle,
return;
}
if (nread == 0) {
/* There can be no more available data for the time being. */
ASSERT_NULL(addr);
} else {
ASSERT_EQ(nread, 4);
ASSERT_NOT_NULL(addr);
ASSERT_MEM_EQ("PING", rcvbuf->base, nread);
received_datagrams++;
}
recv_cb_called++;
if (recv_cb_called == NUM_SENDS) {
uv_close((uv_handle_t*)handle, close_cb);
uv_close((uv_handle_t*)&sender, close_cb);
if (received_datagrams == NUM_SENDS) {
uv_close((uv_handle_t*) handle, close_cb);
uv_close((uv_handle_t*) &sender, close_cb);
}
/* Don't free if the buffer could be reused via mmsg */
@ -124,7 +131,7 @@ TEST_IMPL(udp_mmsg) {
ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT));
ASSERT_EQ(close_cb_called, 2);
ASSERT_EQ(recv_cb_called, NUM_SENDS);
ASSERT_EQ(received_datagrams, NUM_SENDS);
ASSERT_EQ(sender.send_queue_size, 0);
ASSERT_EQ(recver.send_queue_size, 0);

2
deps/uv/uv.gyp vendored
View file

@ -78,6 +78,8 @@
'src/random.c',
'src/strscpy.c',
'src/strscpy.h',
'src/strtok.c',
'src/strtok.h',
'src/threadpool.c',
'src/timer.c',
'src/uv-data-getter-setters.c',