|
160
|
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
|
|
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
3 * of this software and associated documentation files (the "Software"), to
|
|
|
4 * deal in the Software without restriction, including without limitation the
|
|
|
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
|
6 * sell copies of the Software, and to permit persons to whom the Software is
|
|
|
7 * furnished to do so, subject to the following conditions:
|
|
|
8 *
|
|
|
9 * The above copyright notice and this permission notice shall be included in
|
|
|
10 * all copies or substantial portions of the Software.
|
|
|
11 *
|
|
|
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
18 * IN THE SOFTWARE.
|
|
|
19 */
|
|
|
20
|
|
|
21 /* This file contains both the uv__async internal infrastructure and the
|
|
|
22 * user-facing uv_async_t functions.
|
|
|
23 */
|
|
|
24
|
|
|
25 #include "uv.h"
|
|
|
26 #include "internal.h"
|
|
|
27
|
|
|
28 #include <errno.h>
|
|
|
29 #include <stdatomic.h>
|
|
|
30 #include <stdio.h> /* snprintf() */
|
|
|
31 #include <assert.h>
|
|
|
32 #include <stdlib.h>
|
|
|
33 #include <string.h>
|
|
|
34 #include <unistd.h>
|
|
|
35 #include <sched.h> /* sched_yield() */
|
|
|
36
|
|
|
37 #ifdef __linux__
|
|
|
38 #include <sys/eventfd.h>
|
|
|
39 #endif
|
|
|
40
|
|
|
41 #if UV__KQUEUE_EVFILT_USER
|
|
|
42 static uv_once_t kqueue_runtime_detection_guard = UV_ONCE_INIT;
|
|
|
43 static int kqueue_evfilt_user_support = 1;
|
|
|
44
|
|
|
45
|
|
|
46 static void uv__kqueue_runtime_detection(void) {
|
|
|
47 int kq;
|
|
|
48 struct kevent ev[2];
|
|
|
49 struct timespec timeout = {0, 0};
|
|
|
50
|
|
|
51 /* Perform the runtime detection to ensure that kqueue with
|
|
|
52 * EVFILT_USER actually works. */
|
|
|
53 kq = kqueue();
|
|
|
54 EV_SET(ev, UV__KQUEUE_EVFILT_USER_IDENT, EVFILT_USER,
|
|
|
55 EV_ADD | EV_CLEAR, 0, 0, 0);
|
|
|
56 EV_SET(ev + 1, UV__KQUEUE_EVFILT_USER_IDENT, EVFILT_USER,
|
|
|
57 0, NOTE_TRIGGER, 0, 0);
|
|
|
58 if (kevent(kq, ev, 2, ev, 1, &timeout) < 1 ||
|
|
|
59 ev[0].filter != EVFILT_USER ||
|
|
|
60 ev[0].ident != UV__KQUEUE_EVFILT_USER_IDENT ||
|
|
|
61 ev[0].flags & EV_ERROR)
|
|
|
62 /* If we wind up here, we can assume that EVFILT_USER is defined but
|
|
|
63 * broken on the current system. */
|
|
|
64 kqueue_evfilt_user_support = 0;
|
|
|
65 uv__close(kq);
|
|
|
66 }
|
|
|
67 #endif
|
|
|
68
|
|
|
69 static void uv__async_send(uv_loop_t* loop);
|
|
|
70 static int uv__async_start(uv_loop_t* loop);
|
|
|
71 static void uv__cpu_relax(void);
|
|
|
72
|
|
|
73
|
|
|
74 int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
|
|
|
75 int err;
|
|
|
76
|
|
|
77 err = uv__async_start(loop);
|
|
|
78 if (err)
|
|
|
79 return err;
|
|
|
80
|
|
|
81 uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC);
|
|
|
82 handle->async_cb = async_cb;
|
|
|
83 handle->pending = 0;
|
|
|
84 handle->u.fd = 0; /* This will be used as a busy flag. */
|
|
|
85
|
|
|
86 uv__queue_insert_tail(&loop->async_handles, &handle->queue);
|
|
|
87 uv__handle_start(handle);
|
|
|
88
|
|
|
89 return 0;
|
|
|
90 }
|
|
|
91
|
|
|
92
|
|
|
93 int uv_async_send(uv_async_t* handle) {
|
|
|
94 _Atomic int* pending;
|
|
|
95 _Atomic int* busy;
|
|
|
96
|
|
|
97 pending = (_Atomic int*) &handle->pending;
|
|
|
98 busy = (_Atomic int*) &handle->u.fd;
|
|
|
99
|
|
|
100 /* Do a cheap read first. */
|
|
|
101 if (atomic_load_explicit(pending, memory_order_relaxed) != 0)
|
|
|
102 return 0;
|
|
|
103
|
|
|
104 /* Set the loop to busy. */
|
|
|
105 atomic_fetch_add(busy, 1);
|
|
|
106
|
|
|
107 /* Wake up the other thread's event loop. */
|
|
|
108 if (atomic_exchange(pending, 1) == 0)
|
|
|
109 uv__async_send(handle->loop);
|
|
|
110
|
|
|
111 /* Set the loop to not-busy. */
|
|
|
112 atomic_fetch_add(busy, -1);
|
|
|
113
|
|
|
114 return 0;
|
|
|
115 }
|
|
|
116
|
|
|
117
|
|
|
118 /* Wait for the busy flag to clear before closing.
|
|
|
119 * Only call this from the event loop thread. */
|
|
|
120 static void uv__async_spin(uv_async_t* handle) {
|
|
|
121 _Atomic int* pending;
|
|
|
122 _Atomic int* busy;
|
|
|
123 int i;
|
|
|
124
|
|
|
125 pending = (_Atomic int*) &handle->pending;
|
|
|
126 busy = (_Atomic int*) &handle->u.fd;
|
|
|
127
|
|
|
128 /* Set the pending flag first, so no new events will be added by other
|
|
|
129 * threads after this function returns. */
|
|
|
130 atomic_store(pending, 1);
|
|
|
131
|
|
|
132 for (;;) {
|
|
|
133 /* 997 is not completely chosen at random. It's a prime number, acyclic by
|
|
|
134 * nature, and should therefore hopefully dampen sympathetic resonance.
|
|
|
135 */
|
|
|
136 for (i = 0; i < 997; i++) {
|
|
|
137 if (atomic_load(busy) == 0)
|
|
|
138 return;
|
|
|
139
|
|
|
140 /* Other thread is busy with this handle, spin until it's done. */
|
|
|
141 uv__cpu_relax();
|
|
|
142 }
|
|
|
143
|
|
|
144 /* Yield the CPU. We may have preempted the other thread while it's
|
|
|
145 * inside the critical section and if it's running on the same CPU
|
|
|
146 * as us, we'll just burn CPU cycles until the end of our time slice.
|
|
|
147 */
|
|
|
148 sched_yield();
|
|
|
149 }
|
|
|
150 }
|
|
|
151
|
|
|
152
|
|
|
153 void uv__async_close(uv_async_t* handle) {
|
|
|
154 uv__async_spin(handle);
|
|
|
155 uv__queue_remove(&handle->queue);
|
|
|
156 uv__handle_stop(handle);
|
|
|
157 }
|
|
|
158
|
|
|
159
|
|
|
160 static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
|
|
|
161 char buf[1024];
|
|
|
162 ssize_t r;
|
|
|
163 struct uv__queue queue;
|
|
|
164 struct uv__queue* q;
|
|
|
165 uv_async_t* h;
|
|
|
166 _Atomic int *pending;
|
|
|
167
|
|
|
168 assert(w == &loop->async_io_watcher);
|
|
|
169
|
|
|
170 #if UV__KQUEUE_EVFILT_USER
|
|
|
171 for (;!kqueue_evfilt_user_support;) {
|
|
|
172 #else
|
|
|
173 for (;;) {
|
|
|
174 #endif
|
|
|
175 r = read(w->fd, buf, sizeof(buf));
|
|
|
176
|
|
|
177 if (r == sizeof(buf))
|
|
|
178 continue;
|
|
|
179
|
|
|
180 if (r != -1)
|
|
|
181 break;
|
|
|
182
|
|
|
183 if (errno == EAGAIN || errno == EWOULDBLOCK)
|
|
|
184 break;
|
|
|
185
|
|
|
186 if (errno == EINTR)
|
|
|
187 continue;
|
|
|
188
|
|
|
189 abort();
|
|
|
190 }
|
|
|
191
|
|
|
192 uv__queue_move(&loop->async_handles, &queue);
|
|
|
193 while (!uv__queue_empty(&queue)) {
|
|
|
194 q = uv__queue_head(&queue);
|
|
|
195 h = uv__queue_data(q, uv_async_t, queue);
|
|
|
196
|
|
|
197 uv__queue_remove(q);
|
|
|
198 uv__queue_insert_tail(&loop->async_handles, q);
|
|
|
199
|
|
|
200 /* Atomically fetch and clear pending flag */
|
|
|
201 pending = (_Atomic int*) &h->pending;
|
|
|
202 if (atomic_exchange(pending, 0) == 0)
|
|
|
203 continue;
|
|
|
204
|
|
|
205 if (h->async_cb == NULL)
|
|
|
206 continue;
|
|
|
207
|
|
|
208 h->async_cb(h);
|
|
|
209 }
|
|
|
210 }
|
|
|
211
|
|
|
212
|
|
|
213 static void uv__async_send(uv_loop_t* loop) {
|
|
|
214 const void* buf;
|
|
|
215 ssize_t len;
|
|
|
216 int fd;
|
|
|
217 int r;
|
|
|
218
|
|
|
219 buf = "";
|
|
|
220 len = 1;
|
|
|
221 fd = loop->async_wfd;
|
|
|
222
|
|
|
223 #if defined(__linux__)
|
|
|
224 if (fd == -1) {
|
|
|
225 static const uint64_t val = 1;
|
|
|
226 buf = &val;
|
|
|
227 len = sizeof(val);
|
|
|
228 fd = loop->async_io_watcher.fd; /* eventfd */
|
|
|
229 }
|
|
|
230 #elif UV__KQUEUE_EVFILT_USER
|
|
|
231 struct kevent ev;
|
|
|
232
|
|
|
233 if (kqueue_evfilt_user_support) {
|
|
|
234 fd = loop->async_io_watcher.fd; /* magic number for EVFILT_USER */
|
|
|
235 EV_SET(&ev, fd, EVFILT_USER, 0, NOTE_TRIGGER, 0, 0);
|
|
|
236 r = kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL);
|
|
|
237 if (r == 0)
|
|
|
238 return;
|
|
|
239 abort();
|
|
|
240 }
|
|
|
241 #endif
|
|
|
242
|
|
|
243 do
|
|
|
244 r = write(fd, buf, len);
|
|
|
245 while (r == -1 && errno == EINTR);
|
|
|
246
|
|
|
247 if (r == len)
|
|
|
248 return;
|
|
|
249
|
|
|
250 if (r == -1)
|
|
|
251 if (errno == EAGAIN || errno == EWOULDBLOCK)
|
|
|
252 return;
|
|
|
253
|
|
|
254 abort();
|
|
|
255 }
|
|
|
256
|
|
|
257
|
|
|
258 static int uv__async_start(uv_loop_t* loop) {
|
|
|
259 int pipefd[2];
|
|
|
260 int err;
|
|
|
261 #if UV__KQUEUE_EVFILT_USER
|
|
|
262 struct kevent ev;
|
|
|
263 #endif
|
|
|
264
|
|
|
265 if (loop->async_io_watcher.fd != -1)
|
|
|
266 return 0;
|
|
|
267
|
|
|
268 #ifdef __linux__
|
|
|
269 err = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
|
|
|
270 if (err < 0)
|
|
|
271 return UV__ERR(errno);
|
|
|
272
|
|
|
273 pipefd[0] = err;
|
|
|
274 pipefd[1] = -1;
|
|
|
275 #elif UV__KQUEUE_EVFILT_USER
|
|
|
276 uv_once(&kqueue_runtime_detection_guard, uv__kqueue_runtime_detection);
|
|
|
277 if (kqueue_evfilt_user_support) {
|
|
|
278 /* In order not to break the generic pattern of I/O polling, a valid
|
|
|
279 * file descriptor is required to take up a room in loop->watchers,
|
|
|
280 * thus we create one for that, but this fd will not be actually used,
|
|
|
281 * it's just a placeholder and magic number which is going to be closed
|
|
|
282 * during the cleanup, as other FDs. */
|
|
|
283 err = uv__open_cloexec("/", O_RDONLY);
|
|
|
284 if (err < 0)
|
|
|
285 return err;
|
|
|
286
|
|
|
287 pipefd[0] = err;
|
|
|
288 pipefd[1] = -1;
|
|
|
289
|
|
|
290 /* When using EVFILT_USER event to wake up the kqueue, this event must be
|
|
|
291 * registered beforehand. Otherwise, calling kevent() to issue an
|
|
|
292 * unregistered EVFILT_USER event will get an ENOENT.
|
|
|
293 * Since uv__async_send() may happen before uv__io_poll() with multi-threads,
|
|
|
294 * we can't defer this registration of EVFILT_USER event as we did for other
|
|
|
295 * events, but must perform it right away. */
|
|
|
296 EV_SET(&ev, err, EVFILT_USER, EV_ADD | EV_CLEAR, 0, 0, 0);
|
|
|
297 err = kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL);
|
|
|
298 if (err < 0)
|
|
|
299 return UV__ERR(errno);
|
|
|
300 } else {
|
|
|
301 err = uv__make_pipe(pipefd, UV_NONBLOCK_PIPE);
|
|
|
302 if (err < 0)
|
|
|
303 return err;
|
|
|
304 }
|
|
|
305 #else
|
|
|
306 err = uv__make_pipe(pipefd, UV_NONBLOCK_PIPE);
|
|
|
307 if (err < 0)
|
|
|
308 return err;
|
|
|
309 #endif
|
|
|
310
|
|
|
311 err = uv__io_init_start(loop, &loop->async_io_watcher, uv__async_io,
|
|
|
312 pipefd[0], POLLIN);
|
|
|
313 if (err < 0) {
|
|
|
314 uv__close(pipefd[0]);
|
|
|
315 if (pipefd[1] != -1)
|
|
|
316 uv__close(pipefd[1]);
|
|
|
317 return err;
|
|
|
318 }
|
|
|
319 loop->async_wfd = pipefd[1];
|
|
|
320
|
|
|
321 #if UV__KQUEUE_EVFILT_USER
|
|
|
322 /* Prevent the EVFILT_USER event from being added to kqueue redundantly
|
|
|
323 * and mistakenly later in uv__io_poll(). */
|
|
|
324 if (kqueue_evfilt_user_support)
|
|
|
325 loop->async_io_watcher.events = loop->async_io_watcher.pevents;
|
|
|
326 #endif
|
|
|
327
|
|
|
328 return 0;
|
|
|
329 }
|
|
|
330
|
|
|
331
|
|
|
332 void uv__async_stop(uv_loop_t* loop) {
|
|
|
333 struct uv__queue queue;
|
|
|
334 struct uv__queue* q;
|
|
|
335 uv_async_t* h;
|
|
|
336
|
|
|
337 if (loop->async_io_watcher.fd == -1)
|
|
|
338 return;
|
|
|
339
|
|
|
340 /* Make sure no other thread is accessing the async handle fd after the loop
|
|
|
341 * cleanup.
|
|
|
342 */
|
|
|
343 uv__queue_move(&loop->async_handles, &queue);
|
|
|
344 while (!uv__queue_empty(&queue)) {
|
|
|
345 q = uv__queue_head(&queue);
|
|
|
346 h = uv__queue_data(q, uv_async_t, queue);
|
|
|
347
|
|
|
348 uv__queue_remove(q);
|
|
|
349 uv__queue_insert_tail(&loop->async_handles, q);
|
|
|
350
|
|
|
351 uv__async_spin(h);
|
|
|
352 }
|
|
|
353
|
|
|
354 if (loop->async_wfd != -1) {
|
|
|
355 if (loop->async_wfd != loop->async_io_watcher.fd)
|
|
|
356 uv__close(loop->async_wfd);
|
|
|
357 loop->async_wfd = -1;
|
|
|
358 }
|
|
|
359
|
|
|
360 uv__io_stop(loop, &loop->async_io_watcher, POLLIN);
|
|
|
361 uv__close(loop->async_io_watcher.fd);
|
|
|
362 loop->async_io_watcher.fd = -1;
|
|
|
363 }
|
|
|
364
|
|
|
365
|
|
|
366 int uv__async_fork(uv_loop_t* loop) {
|
|
|
367 struct uv__queue queue;
|
|
|
368 struct uv__queue* q;
|
|
|
369 uv_async_t* h;
|
|
|
370
|
|
|
371 if (loop->async_io_watcher.fd == -1) /* never started */
|
|
|
372 return 0;
|
|
|
373
|
|
|
374 uv__queue_move(&loop->async_handles, &queue);
|
|
|
375 while (!uv__queue_empty(&queue)) {
|
|
|
376 q = uv__queue_head(&queue);
|
|
|
377 h = uv__queue_data(q, uv_async_t, queue);
|
|
|
378
|
|
|
379 uv__queue_remove(q);
|
|
|
380 uv__queue_insert_tail(&loop->async_handles, q);
|
|
|
381
|
|
|
382 /* The state of any thread that set pending is now likely corrupt in this
|
|
|
383 * child because the user called fork, so just clear these flags and move
|
|
|
384 * on. Calling most libc functions after `fork` is declared to be undefined
|
|
|
385 * behavior anyways, unless async-signal-safe, for multithreaded programs
|
|
|
386 * like libuv, and nothing interesting in pthreads is async-signal-safe.
|
|
|
387 */
|
|
|
388 h->pending = 0;
|
|
|
389 /* This is the busy flag, and we just abruptly lost all other threads. */
|
|
|
390 h->u.fd = 0;
|
|
|
391 }
|
|
|
392
|
|
|
393 /* Recreate these, since they still exist, but belong to the wrong pid now. */
|
|
|
394 if (loop->async_wfd != -1) {
|
|
|
395 if (loop->async_wfd != loop->async_io_watcher.fd)
|
|
|
396 uv__close(loop->async_wfd);
|
|
|
397 loop->async_wfd = -1;
|
|
|
398 }
|
|
|
399
|
|
|
400 uv__io_stop(loop, &loop->async_io_watcher, POLLIN);
|
|
|
401 uv__close(loop->async_io_watcher.fd);
|
|
|
402 loop->async_io_watcher.fd = -1;
|
|
|
403
|
|
|
404 return uv__async_start(loop);
|
|
|
405 }
|
|
|
406
|
|
|
407
|
|
|
408 static void uv__cpu_relax(void) {
|
|
|
409 #if defined(__i386__) || defined(__x86_64__)
|
|
|
410 __asm__ __volatile__ ("rep; nop" ::: "memory"); /* a.k.a. PAUSE */
|
|
|
411 #elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__)
|
|
|
412 __asm__ __volatile__ ("yield" ::: "memory");
|
|
|
413 #elif (defined(__ppc__) || defined(__ppc64__)) && defined(__APPLE__)
|
|
|
414 __asm volatile ("" : : : "memory");
|
|
|
415 #elif !defined(__APPLE__) && (defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__))
|
|
|
416 __asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory");
|
|
|
417 #endif
|
|
|
418 }
|