3838#include <sys/eventfd.h>
3939#endif
4040
41- #if UV__KQUEUE_EVFILT_USER
42- static uv_once_t kqueue_runtime_detection_guard = UV_ONCE_INIT ;
43- static int kqueue_evfilt_user_support = 1 ;
44-
45-
46- static void uv__kqueue_runtime_detection (void ){
47- int kq ;
48- struct kevent ev [2 ];
49- struct timespec timeout = {0 , 0 };
50-
51- /* Perform the runtime detection to ensure that kqueue with
52- * EVFILT_USER actually works. */
53- kq = kqueue ();
54- EV_SET (ev , UV__KQUEUE_EVFILT_USER_IDENT , EVFILT_USER ,
55- EV_ADD | EV_CLEAR , 0 , 0 , 0 );
56- EV_SET (ev + 1 , UV__KQUEUE_EVFILT_USER_IDENT , EVFILT_USER ,
57- 0 , NOTE_TRIGGER , 0 , 0 );
58- if (kevent (kq , ev , 2 , ev , 1 , & timeout ) < 1 ||
59- ev [0 ].filter != EVFILT_USER ||
60- ev [0 ].ident != UV__KQUEUE_EVFILT_USER_IDENT ||
61- ev [0 ].flags & EV_ERROR )
62- /* If we wind up here, we can assume that EVFILT_USER is defined but
63- * broken on the current system. */
64- kqueue_evfilt_user_support = 0 ;
65- uv__close (kq );
66- }
67- #endif
68-
6941static void uv__async_send (uv_loop_t * loop );
7042static int uv__async_start (uv_loop_t * loop );
7143static void uv__cpu_relax (void );
@@ -158,23 +130,16 @@ void uv__async_close(uv_async_t* handle){
158130
159131
160132static void uv__async_io (uv_loop_t * loop , uv__io_t * w , unsigned int events ){
161- #ifndef __linux__
162133char buf [1024 ];
163134ssize_t r ;
164- #endif
165135struct uv__queue queue ;
166136struct uv__queue * q ;
167137uv_async_t * h ;
168138 _Atomic int * pending ;
169139
170140assert (w == & loop -> async_io_watcher );
171141
172- #ifndef __linux__
173- #if UV__KQUEUE_EVFILT_USER
174- for (;!kqueue_evfilt_user_support ;){
175- #else
176142for (;){
177- #endif
178143r = read (w -> fd , buf , sizeof (buf ));
179144
180145if (r == sizeof (buf ))
@@ -191,7 +156,6 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events){
191156
192157abort ();
193158 }
194- #endif /* !__linux__ */
195159
196160uv__queue_move (& loop -> async_handles , & queue );
197161while (!uv__queue_empty (& queue )){
@@ -215,58 +179,34 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events){
215179
216180
217181static void uv__async_send (uv_loop_t * loop ){
182+ const void * buf ;
183+ ssize_t len ;
218184int fd ;
219- ssize_t r ;
220- #ifdef __linux__
221- uint64_t val ;
222-
223- fd = loop -> async_io_watcher .fd ; /* eventfd */
224- for (val = 1 ; /* empty */ ; val = 1 ){
225- r = write (fd , & val , sizeof (uint64_t ));
226- if (r < 0 ){
227- /* When EAGAIN occurs, the eventfd counter hits the maximum value of the unsigned 64-bit.
228- * We need to first drain the eventfd and then write again.
229- *
230- * Check out https://man7.org/linux/man-pages/man2/eventfd.2.html for details.
231- */
232- if (errno == EAGAIN ){
233- /* It's ready to retry. */
234- if (read (fd , & val , sizeof (uint64_t )) > 0 || errno == EAGAIN ){
235- continue ;
236- }
237- }
238- /* Unknown error occurs. */
239- break ;
240- }
241- return ;
242- }
243- #else
244- #if UV__KQUEUE_EVFILT_USER
245- struct kevent ev ;
246-
247- if (kqueue_evfilt_user_support ){
248- fd = loop -> async_io_watcher .fd ; /* magic number for EVFILT_USER */
249- EV_SET (& ev , fd , EVFILT_USER , 0 , NOTE_TRIGGER , 0 , 0 );
250- r = kevent (loop -> backend_fd , & ev , 1 , NULL , 0 , NULL );
251- if (r == 0 )
252- return ;
253- else
254- abort ();
185+ int r ;
186+
187+ buf = "" ;
188+ len = 1 ;
189+ fd = loop -> async_wfd ;
190+
191+ #if defined(__linux__ )
192+ if (fd == -1 ){
193+ static const uint64_t val = 1 ;
194+ buf = & val ;
195+ len = sizeof (val );
196+ fd = loop -> async_io_watcher .fd ; /* eventfd */
255197 }
256198#endif
257199
258- fd = loop -> async_wfd ; /* write end of the pipe */
259200do
260- r = write (fd , "x" , 1 );
201+ r = write (fd , buf , len );
261202while (r == -1 && errno == EINTR );
262203
263- if (r == 1 )
204+ if (r == len )
264205return ;
265206
266207if (r == -1 )
267208if (errno == EAGAIN || errno == EWOULDBLOCK )
268209return ;
269- #endif
270210
271211abort ();
272212}
@@ -275,9 +215,6 @@ static void uv__async_send(uv_loop_t* loop){
275215static int uv__async_start (uv_loop_t * loop ){
276216int pipefd [2 ];
277217int err ;
278- #if UV__KQUEUE_EVFILT_USER
279- struct kevent ev ;
280- #endif
281218
282219if (loop -> async_io_watcher .fd != -1 )
283220return 0 ;
@@ -289,36 +226,6 @@ static int uv__async_start(uv_loop_t* loop){
289226
290227pipefd [0 ] = err ;
291228pipefd [1 ] = -1 ;
292- #elif UV__KQUEUE_EVFILT_USER
293- uv_once (& kqueue_runtime_detection_guard , uv__kqueue_runtime_detection );
294- if (kqueue_evfilt_user_support ){
295- /* In order not to break the generic pattern of I/O polling, a valid
296- * file descriptor is required to take up a room in loop->watchers,
297- * thus we create one for that, but this fd will not be actually used,
298- * it's just a placeholder and magic number which is going to be closed
299- * during the cleanup, as other FDs. */
300- err = uv__open_cloexec ("/dev/null" , O_RDONLY );
301- if (err < 0 )
302- return err ;
303-
304- pipefd [0 ] = err ;
305- pipefd [1 ] = -1 ;
306-
307- /* When using EVFILT_USER event to wake up the kqueue, this event must be
308- * registered beforehand. Otherwise, calling kevent() to issue an
309- * unregistered EVFILT_USER event will get an ENOENT.
310- * Since uv__async_send() may happen before uv__io_poll() with multi-threads,
311- * we can't defer this registration of EVFILT_USER event as we did for other
312- * events, but must perform it right away. */
313- EV_SET (& ev , err , EVFILT_USER , EV_ADD | EV_CLEAR , 0 , 0 , 0 );
314- err = kevent (loop -> backend_fd , & ev , 1 , NULL , 0 , NULL );
315- if (err < 0 )
316- return UV__ERR (errno );
317- } else {
318- err = uv__make_pipe (pipefd , UV_NONBLOCK_PIPE );
319- if (err < 0 )
320- return err ;
321- }
322229#else
323230err = uv__make_pipe (pipefd , UV_NONBLOCK_PIPE );
324231if (err < 0 )
@@ -329,13 +236,6 @@ static int uv__async_start(uv_loop_t* loop){
329236uv__io_start (loop , & loop -> async_io_watcher , POLLIN );
330237loop -> async_wfd = pipefd [1 ];
331238
332- #if UV__KQUEUE_EVFILT_USER
333- /* Prevent the EVFILT_USER event from being added to kqueue redundantly
334- * and mistakenly later in uv__io_poll(). */
335- if (kqueue_evfilt_user_support )
336- loop -> async_io_watcher .events = loop -> async_io_watcher .pevents ;
337- #endif
338-
339239return 0 ;
340240}
341241
0 commit comments