|
160
|
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
|
|
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
3 * of this software and associated documentation files (the "Software"), to
|
|
|
4 * deal in the Software without restriction, including without limitation the
|
|
|
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
|
6 * sell copies of the Software, and to permit persons to whom the Software is
|
|
|
7 * furnished to do so, subject to the following conditions:
|
|
|
8 *
|
|
|
9 * The above copyright notice and this permission notice shall be included in
|
|
|
10 * all copies or substantial portions of the Software.
|
|
|
11 *
|
|
|
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
18 * IN THE SOFTWARE.
|
|
|
19 */
|
|
|
20
|
|
|
21 #include "uv.h"
|
|
|
22 #include "internal.h"
|
|
|
23 #include "strtok.h"
|
|
|
24
|
|
|
25 #include <stddef.h> /* NULL */
|
|
|
26 #include <stdio.h> /* printf */
|
|
|
27 #include <stdlib.h>
|
|
|
28 #include <string.h> /* strerror */
|
|
|
29 #include <errno.h>
|
|
|
30 #include <assert.h>
|
|
|
31 #include <unistd.h>
|
|
|
32 #include <sys/types.h>
|
|
|
33 #include <sys/stat.h>
|
|
|
34 #include <fcntl.h> /* O_CLOEXEC */
|
|
|
35 #include <sys/ioctl.h>
|
|
|
36 #include <sys/socket.h>
|
|
|
37 #include <sys/un.h>
|
|
|
38 #include <netinet/in.h>
|
|
|
39 #include <arpa/inet.h>
|
|
|
40 #include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */
|
|
|
41 #include <sys/uio.h> /* writev */
|
|
|
42 #include <sys/resource.h> /* getrusage */
|
|
|
43 #include <pwd.h>
|
|
|
44 #include <grp.h>
|
|
|
45 #include <sys/utsname.h>
|
|
|
46 #include <sys/time.h>
|
|
|
47 #include <time.h> /* clock_gettime */
|
|
|
48
|
|
|
49 #ifdef __sun
|
|
|
50 # include <sys/filio.h>
|
|
|
51 # include <sys/wait.h>
|
|
|
52 #endif
|
|
|
53
|
|
|
54 #if defined(__APPLE__)
|
|
|
55 # include <mach/mach.h>
|
|
|
56 # include <mach/thread_info.h>
|
|
|
57 # include <sys/filio.h>
|
|
|
58 # include <sys/sysctl.h>
|
|
|
59 #endif /* defined(__APPLE__) */
|
|
|
60
|
|
|
61
|
|
|
62 #if defined(__APPLE__) && !TARGET_OS_IPHONE
|
|
|
63 # include <crt_externs.h>
|
|
|
64 # include <mach-o/dyld.h> /* _NSGetExecutablePath */
|
|
|
65 # define environ (*_NSGetEnviron())
|
|
|
66 #else /* defined(__APPLE__) && !TARGET_OS_IPHONE */
|
|
|
67 extern char** environ;
|
|
|
68 #endif /* !(defined(__APPLE__) && !TARGET_OS_IPHONE) */
|
|
|
69
|
|
|
70
|
|
|
71 #if defined(__DragonFly__) || \
|
|
|
72 defined(__FreeBSD__) || \
|
|
|
73 defined(__NetBSD__) || \
|
|
|
74 defined(__OpenBSD__)
|
|
|
75 # include <sys/sysctl.h>
|
|
|
76 # include <sys/filio.h>
|
|
|
77 # include <sys/wait.h>
|
|
|
78 # include <sys/param.h>
|
|
|
79 # if defined(__FreeBSD__)
|
|
|
80 # include <sys/cpuset.h>
|
|
|
81 # define uv__accept4 accept4
|
|
|
82 # endif
|
|
|
83 # if defined(__NetBSD__)
|
|
|
84 # define uv__accept4(a, b, c, d) paccept((a), (b), (c), NULL, (d))
|
|
|
85 # endif
|
|
|
86 #endif
|
|
|
87
|
|
|
88 #if defined(__MVS__)
|
|
|
89 # include <sys/ioctl.h>
|
|
|
90 # include "zos-sys-info.h"
|
|
|
91 #endif
|
|
|
92
|
|
|
93 #if defined(__linux__)
|
|
|
94 # include <sched.h>
|
|
|
95 # include <sys/syscall.h>
|
|
|
96 # define gettid() syscall(SYS_gettid)
|
|
|
97 # define uv__accept4 accept4
|
|
|
98 #endif
|
|
|
99
|
|
|
100 #if defined(__FreeBSD__)
|
|
|
101 # include <sys/param.h>
|
|
|
102 # include <sys/cpuset.h>
|
|
|
103 #endif
|
|
|
104
|
|
|
105 #if defined(__NetBSD__)
|
|
|
106 # include <sched.h>
|
|
|
107 #endif
|
|
|
108
|
|
|
109 #if defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
|
|
|
110 # include <sanitizer/linux_syscall_hooks.h>
|
|
|
111 #endif
|
|
|
112
|
|
|
113 static void uv__run_pending(uv_loop_t* loop);
|
|
|
114
|
|
|
115 /* Verify that uv_buf_t is ABI-compatible with struct iovec. */
|
|
|
116 STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
|
|
|
117 STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->base) ==
|
|
|
118 sizeof(((struct iovec*) 0)->iov_base));
|
|
|
119 STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->len) ==
|
|
|
120 sizeof(((struct iovec*) 0)->iov_len));
|
|
|
121 STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
|
|
|
122 STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
|
|
|
123
|
|
|
124
|
|
|
125 /* https://github.com/libuv/libuv/issues/1674 */
|
|
|
126 int uv_clock_gettime(uv_clock_id clock_id, uv_timespec64_t* ts) {
|
|
|
127 struct timespec t;
|
|
|
128 int r;
|
|
|
129
|
|
|
130 if (ts == NULL)
|
|
|
131 return UV_EFAULT;
|
|
|
132
|
|
|
133 switch (clock_id) {
|
|
|
134 default:
|
|
|
135 return UV_EINVAL;
|
|
|
136 case UV_CLOCK_MONOTONIC:
|
|
|
137 r = clock_gettime(CLOCK_MONOTONIC, &t);
|
|
|
138 break;
|
|
|
139 case UV_CLOCK_REALTIME:
|
|
|
140 r = clock_gettime(CLOCK_REALTIME, &t);
|
|
|
141 break;
|
|
|
142 }
|
|
|
143
|
|
|
144 if (r)
|
|
|
145 return UV__ERR(errno);
|
|
|
146
|
|
|
147 ts->tv_sec = t.tv_sec;
|
|
|
148 ts->tv_nsec = t.tv_nsec;
|
|
|
149
|
|
|
150 return 0;
|
|
|
151 }
|
|
|
152
|
|
|
153
|
|
|
154 uint64_t uv_hrtime(void) {
|
|
|
155 return uv__hrtime(UV_CLOCK_PRECISE);
|
|
|
156 }
|
|
|
157
|
|
|
158
|
|
|
159 void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
|
|
|
160 assert(!uv__is_closing(handle));
|
|
|
161
|
|
|
162 handle->flags |= UV_HANDLE_CLOSING;
|
|
|
163 handle->close_cb = close_cb;
|
|
|
164
|
|
|
165 switch (handle->type) {
|
|
|
166 case UV_NAMED_PIPE:
|
|
|
167 uv__pipe_close((uv_pipe_t*)handle);
|
|
|
168 break;
|
|
|
169
|
|
|
170 case UV_TTY:
|
|
|
171 uv__tty_close((uv_tty_t*)handle);
|
|
|
172 break;
|
|
|
173
|
|
|
174 case UV_TCP:
|
|
|
175 uv__tcp_close((uv_tcp_t*)handle);
|
|
|
176 break;
|
|
|
177
|
|
|
178 case UV_UDP:
|
|
|
179 uv__udp_close((uv_udp_t*)handle);
|
|
|
180 break;
|
|
|
181
|
|
|
182 case UV_PREPARE:
|
|
|
183 uv__prepare_close((uv_prepare_t*)handle);
|
|
|
184 break;
|
|
|
185
|
|
|
186 case UV_CHECK:
|
|
|
187 uv__check_close((uv_check_t*)handle);
|
|
|
188 break;
|
|
|
189
|
|
|
190 case UV_IDLE:
|
|
|
191 uv__idle_close((uv_idle_t*)handle);
|
|
|
192 break;
|
|
|
193
|
|
|
194 case UV_ASYNC:
|
|
|
195 uv__async_close((uv_async_t*)handle);
|
|
|
196 break;
|
|
|
197
|
|
|
198 case UV_TIMER:
|
|
|
199 uv__timer_close((uv_timer_t*)handle);
|
|
|
200 break;
|
|
|
201
|
|
|
202 case UV_PROCESS:
|
|
|
203 uv__process_close((uv_process_t*)handle);
|
|
|
204 break;
|
|
|
205
|
|
|
206 case UV_FS_EVENT:
|
|
|
207 uv__fs_event_close((uv_fs_event_t*)handle);
|
|
|
208 #if defined(__sun) || defined(__MVS__)
|
|
|
209 /*
|
|
|
210 * On Solaris, illumos, and z/OS we will not be able to dissociate the
|
|
|
211 * watcher for an event which is pending delivery, so we cannot always call
|
|
|
212 * uv__make_close_pending() straight away. The backend will call the
|
|
|
213 * function once the event has cleared.
|
|
|
214 */
|
|
|
215 return;
|
|
|
216 #endif
|
|
|
217 break;
|
|
|
218
|
|
|
219 case UV_POLL:
|
|
|
220 uv__poll_close((uv_poll_t*)handle);
|
|
|
221 break;
|
|
|
222
|
|
|
223 case UV_FS_POLL:
|
|
|
224 uv__fs_poll_close((uv_fs_poll_t*)handle);
|
|
|
225 /* Poll handles use file system requests, and one of them may still be
|
|
|
226 * running. The poll code will call uv__make_close_pending() for us. */
|
|
|
227 return;
|
|
|
228
|
|
|
229 case UV_SIGNAL:
|
|
|
230 uv__signal_close((uv_signal_t*) handle);
|
|
|
231 break;
|
|
|
232
|
|
|
233 default:
|
|
|
234 assert(0);
|
|
|
235 }
|
|
|
236
|
|
|
237 uv__make_close_pending(handle);
|
|
|
238 }
|
|
|
239
|
|
|
240 int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
|
|
|
241 int r;
|
|
|
242 int fd;
|
|
|
243 socklen_t len;
|
|
|
244
|
|
|
245 if (handle == NULL || value == NULL)
|
|
|
246 return UV_EINVAL;
|
|
|
247
|
|
|
248 if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE)
|
|
|
249 fd = uv__stream_fd((uv_stream_t*) handle);
|
|
|
250 else if (handle->type == UV_UDP)
|
|
|
251 fd = ((uv_udp_t *) handle)->io_watcher.fd;
|
|
|
252 else
|
|
|
253 return UV_ENOTSUP;
|
|
|
254
|
|
|
255 len = sizeof(*value);
|
|
|
256
|
|
|
257 if (*value == 0)
|
|
|
258 r = getsockopt(fd, SOL_SOCKET, optname, value, &len);
|
|
|
259 else
|
|
|
260 r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len);
|
|
|
261
|
|
|
262 if (r < 0)
|
|
|
263 return UV__ERR(errno);
|
|
|
264
|
|
|
265 return 0;
|
|
|
266 }
|
|
|
267
|
|
|
268 void uv__make_close_pending(uv_handle_t* handle) {
|
|
|
269 assert(handle->flags & UV_HANDLE_CLOSING);
|
|
|
270 assert(!(handle->flags & UV_HANDLE_CLOSED));
|
|
|
271 handle->next_closing = handle->loop->closing_handles;
|
|
|
272 handle->loop->closing_handles = handle;
|
|
|
273 }
|
|
|
274
|
|
|
275 int uv__getiovmax(void) {
|
|
|
276 #if defined(IOV_MAX)
|
|
|
277 return IOV_MAX;
|
|
|
278 #elif defined(_SC_IOV_MAX)
|
|
|
279 static _Atomic int iovmax_cached = -1;
|
|
|
280 int iovmax;
|
|
|
281
|
|
|
282 iovmax = atomic_load_explicit(&iovmax_cached, memory_order_relaxed);
|
|
|
283 if (iovmax != -1)
|
|
|
284 return iovmax;
|
|
|
285
|
|
|
286 /* On some embedded devices (arm-linux-uclibc based ip camera),
|
|
|
287 * sysconf(_SC_IOV_MAX) can not get the correct value. The return
|
|
|
288 * value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
|
|
|
289 */
|
|
|
290 iovmax = sysconf(_SC_IOV_MAX);
|
|
|
291 if (iovmax == -1)
|
|
|
292 iovmax = 1;
|
|
|
293
|
|
|
294 atomic_store_explicit(&iovmax_cached, iovmax, memory_order_relaxed);
|
|
|
295
|
|
|
296 return iovmax;
|
|
|
297 #else
|
|
|
298 return 1024;
|
|
|
299 #endif
|
|
|
300 }
|
|
|
301
|
|
|
302
|
|
|
303 static void uv__finish_close(uv_handle_t* handle) {
|
|
|
304 uv_signal_t* sh;
|
|
|
305
|
|
|
306 /* Note: while the handle is in the UV_HANDLE_CLOSING state now, it's still
|
|
|
307 * possible for it to be active in the sense that uv__is_active() returns
|
|
|
308 * true.
|
|
|
309 *
|
|
|
310 * A good example is when the user calls uv_shutdown(), immediately followed
|
|
|
311 * by uv_close(). The handle is considered active at this point because the
|
|
|
312 * completion of the shutdown req is still pending.
|
|
|
313 */
|
|
|
314 assert(handle->flags & UV_HANDLE_CLOSING);
|
|
|
315 assert(!(handle->flags & UV_HANDLE_CLOSED));
|
|
|
316 handle->flags |= UV_HANDLE_CLOSED;
|
|
|
317
|
|
|
318 switch (handle->type) {
|
|
|
319 case UV_PREPARE:
|
|
|
320 case UV_CHECK:
|
|
|
321 case UV_IDLE:
|
|
|
322 case UV_ASYNC:
|
|
|
323 case UV_TIMER:
|
|
|
324 case UV_PROCESS:
|
|
|
325 case UV_FS_EVENT:
|
|
|
326 case UV_FS_POLL:
|
|
|
327 case UV_POLL:
|
|
|
328 break;
|
|
|
329
|
|
|
330 case UV_SIGNAL:
|
|
|
331 /* If there are any caught signals "trapped" in the signal pipe,
|
|
|
332 * we can't call the close callback yet. Reinserting the handle
|
|
|
333 * into the closing queue makes the event loop spin but that's
|
|
|
334 * okay because we only need to deliver the pending events.
|
|
|
335 */
|
|
|
336 sh = (uv_signal_t*) handle;
|
|
|
337 if (sh->caught_signals > sh->dispatched_signals) {
|
|
|
338 handle->flags ^= UV_HANDLE_CLOSED;
|
|
|
339 uv__make_close_pending(handle); /* Back into the queue. */
|
|
|
340 return;
|
|
|
341 }
|
|
|
342 break;
|
|
|
343
|
|
|
344 case UV_NAMED_PIPE:
|
|
|
345 case UV_TCP:
|
|
|
346 case UV_TTY:
|
|
|
347 uv__stream_destroy((uv_stream_t*)handle);
|
|
|
348 break;
|
|
|
349
|
|
|
350 case UV_UDP:
|
|
|
351 uv__udp_finish_close((uv_udp_t*)handle);
|
|
|
352 break;
|
|
|
353
|
|
|
354 default:
|
|
|
355 assert(0);
|
|
|
356 break;
|
|
|
357 }
|
|
|
358
|
|
|
359 uv__handle_unref(handle);
|
|
|
360 uv__queue_remove(&handle->handle_queue);
|
|
|
361
|
|
|
362 if (handle->close_cb) {
|
|
|
363 handle->close_cb(handle);
|
|
|
364 }
|
|
|
365 }
|
|
|
366
|
|
|
367
|
|
|
368 static void uv__run_closing_handles(uv_loop_t* loop) {
|
|
|
369 uv_handle_t* p;
|
|
|
370 uv_handle_t* q;
|
|
|
371
|
|
|
372 p = loop->closing_handles;
|
|
|
373 loop->closing_handles = NULL;
|
|
|
374
|
|
|
375 while (p) {
|
|
|
376 q = p->next_closing;
|
|
|
377 uv__finish_close(p);
|
|
|
378 p = q;
|
|
|
379 }
|
|
|
380 }
|
|
|
381
|
|
|
382
|
|
|
383 int uv_is_closing(const uv_handle_t* handle) {
|
|
|
384 return uv__is_closing(handle);
|
|
|
385 }
|
|
|
386
|
|
|
387
|
|
|
388 int uv_backend_fd(const uv_loop_t* loop) {
|
|
|
389 return loop->backend_fd;
|
|
|
390 }
|
|
|
391
|
|
|
392
|
|
|
393 static int uv__loop_alive(const uv_loop_t* loop) {
|
|
|
394 return uv__has_active_handles(loop) ||
|
|
|
395 uv__has_active_reqs(loop) ||
|
|
|
396 !uv__queue_empty(&loop->pending_queue) ||
|
|
|
397 loop->closing_handles != NULL;
|
|
|
398 }
|
|
|
399
|
|
|
400
|
|
|
401 static int uv__backend_timeout(const uv_loop_t* loop) {
|
|
|
402 if (loop->stop_flag == 0 &&
|
|
|
403 /* uv__loop_alive(loop) && */
|
|
|
404 (uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
|
|
|
405 uv__queue_empty(&loop->pending_queue) &&
|
|
|
406 uv__queue_empty(&loop->idle_handles) &&
|
|
|
407 (loop->flags & UV_LOOP_REAP_CHILDREN) == 0 &&
|
|
|
408 loop->closing_handles == NULL)
|
|
|
409 return uv__next_timeout(loop);
|
|
|
410 return 0;
|
|
|
411 }
|
|
|
412
|
|
|
413
|
|
|
414 int uv_backend_timeout(const uv_loop_t* loop) {
|
|
|
415 if (uv__queue_empty(&loop->watcher_queue))
|
|
|
416 return uv__backend_timeout(loop);
|
|
|
417 /* Need to call uv_run to update the backend fd state. */
|
|
|
418 return 0;
|
|
|
419 }
|
|
|
420
|
|
|
421
|
|
|
422 int uv_loop_alive(const uv_loop_t* loop) {
|
|
|
423 return uv__loop_alive(loop);
|
|
|
424 }
|
|
|
425
|
|
|
426
|
|
|
427 int uv_run(uv_loop_t* loop, uv_run_mode mode) {
|
|
|
428 int timeout;
|
|
|
429 int r;
|
|
|
430 int can_sleep;
|
|
|
431
|
|
|
432 r = uv__loop_alive(loop);
|
|
|
433 if (!r)
|
|
|
434 uv__update_time(loop);
|
|
|
435
|
|
|
436 /* Maintain backwards compatibility by processing timers before entering the
|
|
|
437 * while loop for UV_RUN_DEFAULT. Otherwise timers only need to be executed
|
|
|
438 * once, which should be done after polling in order to maintain proper
|
|
|
439 * execution order of the conceptual event loop. */
|
|
|
440 if (mode == UV_RUN_DEFAULT && r != 0 && loop->stop_flag == 0) {
|
|
|
441 uv__update_time(loop);
|
|
|
442 uv__run_timers(loop);
|
|
|
443 }
|
|
|
444
|
|
|
445 while (r != 0 && loop->stop_flag == 0) {
|
|
|
446 can_sleep =
|
|
|
447 uv__queue_empty(&loop->pending_queue) &&
|
|
|
448 uv__queue_empty(&loop->idle_handles);
|
|
|
449
|
|
|
450 uv__run_pending(loop);
|
|
|
451 uv__run_idle(loop);
|
|
|
452 uv__run_prepare(loop);
|
|
|
453
|
|
|
454 timeout = 0;
|
|
|
455 if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT)
|
|
|
456 timeout = uv__backend_timeout(loop);
|
|
|
457
|
|
|
458 uv__metrics_inc_loop_count(loop);
|
|
|
459
|
|
|
460 uv__io_poll(loop, timeout);
|
|
|
461
|
|
|
462 /* Process immediate callbacks (e.g. write_cb) a small fixed number of
|
|
|
463 * times to avoid loop starvation.*/
|
|
|
464 for (r = 0; r < 8 && !uv__queue_empty(&loop->pending_queue); r++)
|
|
|
465 uv__run_pending(loop);
|
|
|
466
|
|
|
467 /* Run one final update on the provider_idle_time in case uv__io_poll
|
|
|
468 * returned because the timeout expired, but no events were received. This
|
|
|
469 * call will be ignored if the provider_entry_time was either never set (if
|
|
|
470 * the timeout == 0) or was already updated b/c an event was received.
|
|
|
471 */
|
|
|
472 uv__metrics_update_idle_time(loop);
|
|
|
473
|
|
|
474 uv__run_check(loop);
|
|
|
475 uv__run_closing_handles(loop);
|
|
|
476
|
|
|
477 uv__update_time(loop);
|
|
|
478 uv__run_timers(loop);
|
|
|
479
|
|
|
480 r = uv__loop_alive(loop);
|
|
|
481 if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
|
|
|
482 break;
|
|
|
483 }
|
|
|
484
|
|
|
485 /* The if statement lets gcc compile it to a conditional store. Avoids
|
|
|
486 * dirtying a cache line.
|
|
|
487 */
|
|
|
488 if (loop->stop_flag != 0)
|
|
|
489 loop->stop_flag = 0;
|
|
|
490
|
|
|
491 return r;
|
|
|
492 }
|
|
|
493
|
|
|
494
|
|
|
495 void uv_update_time(uv_loop_t* loop) {
|
|
|
496 uv__update_time(loop);
|
|
|
497 }
|
|
|
498
|
|
|
499
|
|
|
500 int uv_is_active(const uv_handle_t* handle) {
|
|
|
501 return uv__is_active(handle);
|
|
|
502 }
|
|
|
503
|
|
|
504
|
|
|
505 /* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
|
|
|
506 int uv__socket(int domain, int type, int protocol) {
|
|
|
507 int sockfd;
|
|
|
508 int err;
|
|
|
509
|
|
|
510 #if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
|
|
|
511 sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
|
|
|
512 if (sockfd != -1)
|
|
|
513 return sockfd;
|
|
|
514
|
|
|
515 if (errno != EINVAL)
|
|
|
516 return UV__ERR(errno);
|
|
|
517 #endif
|
|
|
518
|
|
|
519 sockfd = socket(domain, type, protocol);
|
|
|
520 if (sockfd == -1)
|
|
|
521 return UV__ERR(errno);
|
|
|
522
|
|
|
523 err = uv__nonblock(sockfd, 1);
|
|
|
524 if (err == 0)
|
|
|
525 err = uv__cloexec(sockfd, 1);
|
|
|
526
|
|
|
527 if (err) {
|
|
|
528 uv__close(sockfd);
|
|
|
529 return err;
|
|
|
530 }
|
|
|
531
|
|
|
532 #if defined(SO_NOSIGPIPE)
|
|
|
533 {
|
|
|
534 int on = 1;
|
|
|
535 setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
|
|
|
536 }
|
|
|
537 #endif
|
|
|
538
|
|
|
539 return sockfd;
|
|
|
540 }
|
|
|
541
|
|
|
542 /* get a file pointer to a file in read-only and close-on-exec mode */
|
|
|
543 FILE* uv__open_file(const char* path) {
|
|
|
544 int fd;
|
|
|
545 FILE* fp;
|
|
|
546
|
|
|
547 fd = uv__open_cloexec(path, O_RDONLY);
|
|
|
548 if (fd < 0)
|
|
|
549 return NULL;
|
|
|
550
|
|
|
551 fp = fdopen(fd, "r");
|
|
|
552 if (fp == NULL)
|
|
|
553 uv__close(fd);
|
|
|
554
|
|
|
555 return fp;
|
|
|
556 }
|
|
|
557
|
|
|
558
|
|
|
559 int uv__accept(int sockfd) {
|
|
|
560 int peerfd;
|
|
|
561 int err;
|
|
|
562
|
|
|
563 (void) &err;
|
|
|
564 assert(sockfd >= 0);
|
|
|
565
|
|
|
566 do
|
|
|
567 #ifdef uv__accept4
|
|
|
568 peerfd = uv__accept4(sockfd, NULL, NULL, SOCK_NONBLOCK|SOCK_CLOEXEC);
|
|
|
569 #else
|
|
|
570 peerfd = accept(sockfd, NULL, NULL);
|
|
|
571 #endif
|
|
|
572 while (peerfd == -1 && errno == EINTR);
|
|
|
573
|
|
|
574 if (peerfd == -1)
|
|
|
575 return UV__ERR(errno);
|
|
|
576
|
|
|
577 #ifndef uv__accept4
|
|
|
578 err = uv__cloexec(peerfd, 1);
|
|
|
579 if (err == 0)
|
|
|
580 err = uv__nonblock(peerfd, 1);
|
|
|
581
|
|
|
582 if (err != 0) {
|
|
|
583 uv__close(peerfd);
|
|
|
584 return err;
|
|
|
585 }
|
|
|
586 #endif
|
|
|
587
|
|
|
588 return peerfd;
|
|
|
589 }
|
|
|
590
|
|
|
591
|
|
|
592 /* close() on macos has the "interesting" quirk that it fails with EINTR
|
|
|
593 * without closing the file descriptor when a thread is in the cancel state.
|
|
|
594 * That's why libuv calls close$NOCANCEL() instead.
|
|
|
595 *
|
|
|
596 * glibc on linux has a similar issue: close() is a cancellation point and
|
|
|
597 * will unwind the thread when it's in the cancel state. Work around that
|
|
|
598 * by making the system call directly. Musl libc is unaffected.
|
|
|
599 */
|
|
|
600 int uv__close_nocancel(int fd) {
|
|
|
601 #if defined(__APPLE__)
|
|
|
602 #pragma GCC diagnostic push
|
|
|
603 #pragma GCC diagnostic ignored "-Wdollar-in-identifier-extension"
|
|
|
604 #if defined(__LP64__) || TARGET_OS_IPHONE
|
|
|
605 extern int close$NOCANCEL(int);
|
|
|
606 return close$NOCANCEL(fd);
|
|
|
607 #else
|
|
|
608 extern int close$NOCANCEL$UNIX2003(int);
|
|
|
609 return close$NOCANCEL$UNIX2003(fd);
|
|
|
610 #endif
|
|
|
611 #pragma GCC diagnostic pop
|
|
|
612 #elif defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
|
|
|
613 long rc;
|
|
|
614 __sanitizer_syscall_pre_close(fd);
|
|
|
615 rc = syscall(SYS_close, fd);
|
|
|
616 __sanitizer_syscall_post_close(rc, fd);
|
|
|
617 return rc;
|
|
|
618 #elif defined(__linux__) && !defined(__SANITIZE_THREAD__)
|
|
|
619 return syscall(SYS_close, fd);
|
|
|
620 #else
|
|
|
621 return close(fd);
|
|
|
622 #endif
|
|
|
623 }
|
|
|
624
|
|
|
625
|
|
|
626 int uv__close_nocheckstdio(int fd) {
|
|
|
627 int saved_errno;
|
|
|
628 int rc;
|
|
|
629
|
|
|
630 assert(fd > -1); /* Catch uninitialized io_watcher.fd bugs. */
|
|
|
631
|
|
|
632 saved_errno = errno;
|
|
|
633 rc = uv__close_nocancel(fd);
|
|
|
634 if (rc == -1) {
|
|
|
635 rc = UV__ERR(errno);
|
|
|
636 if (rc == UV_EINTR || rc == UV__ERR(EINPROGRESS))
|
|
|
637 rc = 0; /* The close is in progress, not an error. */
|
|
|
638 errno = saved_errno;
|
|
|
639 }
|
|
|
640
|
|
|
641 return rc;
|
|
|
642 }
|
|
|
643
|
|
|
644
|
|
|
645 int uv__close(int fd) {
|
|
|
646 assert(fd > STDERR_FILENO); /* Catch stdio close bugs. */
|
|
|
647 #if defined(__MVS__)
|
|
|
648 SAVE_ERRNO(epoll_file_close(fd));
|
|
|
649 #endif
|
|
|
650 return uv__close_nocheckstdio(fd);
|
|
|
651 }
|
|
|
652
|
|
|
653 #if UV__NONBLOCK_IS_IOCTL
|
|
|
654 int uv__nonblock_ioctl(int fd, int set) {
|
|
|
655 int r;
|
|
|
656
|
|
|
657 do
|
|
|
658 r = ioctl(fd, FIONBIO, &set);
|
|
|
659 while (r == -1 && errno == EINTR);
|
|
|
660
|
|
|
661 if (r)
|
|
|
662 return UV__ERR(errno);
|
|
|
663
|
|
|
664 return 0;
|
|
|
665 }
|
|
|
666 #endif
|
|
|
667
|
|
|
668
|
|
|
669 int uv__nonblock_fcntl(int fd, int set) {
|
|
|
670 int flags;
|
|
|
671 int r;
|
|
|
672
|
|
|
673 do
|
|
|
674 r = fcntl(fd, F_GETFL);
|
|
|
675 while (r == -1 && errno == EINTR);
|
|
|
676
|
|
|
677 if (r == -1)
|
|
|
678 return UV__ERR(errno);
|
|
|
679
|
|
|
680 /* Bail out now if already set/clear. */
|
|
|
681 if (!!(r & O_NONBLOCK) == !!set)
|
|
|
682 return 0;
|
|
|
683
|
|
|
684 if (set)
|
|
|
685 flags = r | O_NONBLOCK;
|
|
|
686 else
|
|
|
687 flags = r & ~O_NONBLOCK;
|
|
|
688
|
|
|
689 do
|
|
|
690 r = fcntl(fd, F_SETFL, flags);
|
|
|
691 while (r == -1 && errno == EINTR);
|
|
|
692
|
|
|
693 if (r)
|
|
|
694 return UV__ERR(errno);
|
|
|
695
|
|
|
696 return 0;
|
|
|
697 }
|
|
|
698
|
|
|
699
|
|
|
700 int uv__cloexec(int fd, int set) {
|
|
|
701 int flags;
|
|
|
702 int r;
|
|
|
703
|
|
|
704 flags = 0;
|
|
|
705 if (set)
|
|
|
706 flags = FD_CLOEXEC;
|
|
|
707
|
|
|
708 do
|
|
|
709 r = fcntl(fd, F_SETFD, flags);
|
|
|
710 while (r == -1 && errno == EINTR);
|
|
|
711
|
|
|
712 if (r)
|
|
|
713 return UV__ERR(errno);
|
|
|
714
|
|
|
715 return 0;
|
|
|
716 }
|
|
|
717
|
|
|
718
|
|
|
719 ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
|
|
|
720 #if defined(__ANDROID__) || \
|
|
|
721 defined(__DragonFly__) || \
|
|
|
722 defined(__FreeBSD__) || \
|
|
|
723 defined(__NetBSD__) || \
|
|
|
724 defined(__OpenBSD__) || \
|
|
|
725 defined(__linux__)
|
|
|
726 ssize_t rc;
|
|
|
727 rc = recvmsg(fd, msg, flags | MSG_CMSG_CLOEXEC);
|
|
|
728 if (rc == -1)
|
|
|
729 return UV__ERR(errno);
|
|
|
730 return rc;
|
|
|
731 #else
|
|
|
732 struct cmsghdr* cmsg;
|
|
|
733 int* pfd;
|
|
|
734 int* end;
|
|
|
735 ssize_t rc;
|
|
|
736 rc = recvmsg(fd, msg, flags);
|
|
|
737 if (rc == -1)
|
|
|
738 return UV__ERR(errno);
|
|
|
739 if (msg->msg_controllen == 0)
|
|
|
740 return rc;
|
|
|
741 for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg))
|
|
|
742 if (cmsg->cmsg_type == SCM_RIGHTS)
|
|
|
743 for (pfd = (int*) CMSG_DATA(cmsg),
|
|
|
744 end = (int*) ((char*) cmsg + cmsg->cmsg_len);
|
|
|
745 pfd < end;
|
|
|
746 pfd += 1)
|
|
|
747 uv__cloexec(*pfd, 1);
|
|
|
748 return rc;
|
|
|
749 #endif
|
|
|
750 }
|
|
|
751
|
|
|
752
|
|
|
753 int uv_cwd(char* buffer, size_t* size) {
|
|
|
754 char scratch[1 + UV__PATH_MAX];
|
|
|
755
|
|
|
756 if (buffer == NULL || size == NULL || *size == 0)
|
|
|
757 return UV_EINVAL;
|
|
|
758
|
|
|
759 /* Try to read directly into the user's buffer first... */
|
|
|
760 if (getcwd(buffer, *size) != NULL)
|
|
|
761 goto fixup;
|
|
|
762
|
|
|
763 if (errno != ERANGE)
|
|
|
764 return UV__ERR(errno);
|
|
|
765
|
|
|
766 /* ...or into scratch space if the user's buffer is too small
|
|
|
767 * so we can report how much space to provide on the next try.
|
|
|
768 */
|
|
|
769 if (getcwd(scratch, sizeof(scratch)) == NULL)
|
|
|
770 return UV__ERR(errno);
|
|
|
771
|
|
|
772 buffer = scratch;
|
|
|
773
|
|
|
774 fixup:
|
|
|
775
|
|
|
776 *size = strlen(buffer);
|
|
|
777
|
|
|
778 if (*size > 1 && buffer[*size - 1] == '/') {
|
|
|
779 *size -= 1;
|
|
|
780 buffer[*size] = '\0';
|
|
|
781 }
|
|
|
782
|
|
|
783 if (buffer == scratch) {
|
|
|
784 *size += 1;
|
|
|
785 return UV_ENOBUFS;
|
|
|
786 }
|
|
|
787
|
|
|
788 return 0;
|
|
|
789 }
|
|
|
790
|
|
|
791
|
|
|
792 int uv_chdir(const char* dir) {
|
|
|
793 if (chdir(dir))
|
|
|
794 return UV__ERR(errno);
|
|
|
795
|
|
|
796 return 0;
|
|
|
797 }
|
|
|
798
|
|
|
799
|
|
|
800 void uv_disable_stdio_inheritance(void) {
|
|
|
801 int fd;
|
|
|
802
|
|
|
803 /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
|
|
|
804 * first 16 file descriptors. After that, bail out after the first error.
|
|
|
805 */
|
|
|
806 for (fd = 0; ; fd++)
|
|
|
807 if (uv__cloexec(fd, 1) && fd > 15)
|
|
|
808 break;
|
|
|
809 }
|
|
|
810
|
|
|
811
|
|
|
812 int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
|
|
|
813 int fd_out;
|
|
|
814
|
|
|
815 switch (handle->type) {
|
|
|
816 case UV_TCP:
|
|
|
817 case UV_NAMED_PIPE:
|
|
|
818 case UV_TTY:
|
|
|
819 fd_out = uv__stream_fd((uv_stream_t*) handle);
|
|
|
820 break;
|
|
|
821
|
|
|
822 case UV_UDP:
|
|
|
823 fd_out = ((uv_udp_t *) handle)->io_watcher.fd;
|
|
|
824 break;
|
|
|
825
|
|
|
826 case UV_POLL:
|
|
|
827 fd_out = ((uv_poll_t *) handle)->io_watcher.fd;
|
|
|
828 break;
|
|
|
829
|
|
|
830 default:
|
|
|
831 return UV_EINVAL;
|
|
|
832 }
|
|
|
833
|
|
|
834 if (uv__is_closing(handle) || fd_out == -1)
|
|
|
835 return UV_EBADF;
|
|
|
836
|
|
|
837 *fd = fd_out;
|
|
|
838 return 0;
|
|
|
839 }
|
|
|
840
|
|
|
841
|
|
|
842 static void uv__run_pending(uv_loop_t* loop) {
|
|
|
843 struct uv__queue* q;
|
|
|
844 struct uv__queue pq;
|
|
|
845 uv__io_t* w;
|
|
|
846
|
|
|
847 uv__queue_move(&loop->pending_queue, &pq);
|
|
|
848
|
|
|
849 while (!uv__queue_empty(&pq)) {
|
|
|
850 q = uv__queue_head(&pq);
|
|
|
851 uv__queue_remove(q);
|
|
|
852 uv__queue_init(q);
|
|
|
853 w = uv__queue_data(q, uv__io_t, pending_queue);
|
|
|
854 w->cb(loop, w, POLLOUT);
|
|
|
855 }
|
|
|
856 }
|
|
|
857
|
|
|
858
|
|
|
859 static unsigned int next_power_of_two(unsigned int val) {
|
|
|
860 val -= 1;
|
|
|
861 val |= val >> 1;
|
|
|
862 val |= val >> 2;
|
|
|
863 val |= val >> 4;
|
|
|
864 val |= val >> 8;
|
|
|
865 val |= val >> 16;
|
|
|
866 val += 1;
|
|
|
867 return val;
|
|
|
868 }
|
|
|
869
|
|
|
870 static int maybe_resize(uv_loop_t* loop, unsigned int len) {
|
|
|
871 uv__io_t** watchers;
|
|
|
872 void* fake_watcher_list;
|
|
|
873 void* fake_watcher_count;
|
|
|
874 unsigned int nwatchers;
|
|
|
875 unsigned int i;
|
|
|
876
|
|
|
877 if (len <= loop->nwatchers)
|
|
|
878 return 0;
|
|
|
879
|
|
|
880 /* Preserve fake watcher list and count at the end of the watchers */
|
|
|
881 if (loop->watchers != NULL) {
|
|
|
882 fake_watcher_list = loop->watchers[loop->nwatchers];
|
|
|
883 fake_watcher_count = loop->watchers[loop->nwatchers + 1];
|
|
|
884 } else {
|
|
|
885 fake_watcher_list = NULL;
|
|
|
886 fake_watcher_count = NULL;
|
|
|
887 }
|
|
|
888
|
|
|
889 nwatchers = next_power_of_two(len + 2) - 2;
|
|
|
890 watchers = uv__reallocf(loop->watchers,
|
|
|
891 (nwatchers + 2) * sizeof(loop->watchers[0]));
|
|
|
892
|
|
|
893 if (watchers == NULL)
|
|
|
894 return UV_ENOMEM;
|
|
|
895 for (i = loop->nwatchers; i < nwatchers; i++)
|
|
|
896 watchers[i] = NULL;
|
|
|
897 watchers[nwatchers] = fake_watcher_list;
|
|
|
898 watchers[nwatchers + 1] = fake_watcher_count;
|
|
|
899
|
|
|
900 loop->watchers = watchers;
|
|
|
901 loop->nwatchers = nwatchers;
|
|
|
902 return 0;
|
|
|
903 }
|
|
|
904
|
|
|
905
|
|
|
906 void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
|
|
|
907 assert(fd >= -1);
|
|
|
908 uv__queue_init(&w->pending_queue);
|
|
|
909 uv__queue_init(&w->watcher_queue);
|
|
|
910 w->cb = cb;
|
|
|
911 w->fd = fd;
|
|
|
912 w->events = 0;
|
|
|
913 w->pevents = 0;
|
|
|
914 }
|
|
|
915
|
|
|
916
|
|
|
917 int uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
|
|
|
918 int err;
|
|
|
919
|
|
|
920 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
|
|
|
921 assert(0 != events);
|
|
|
922 assert(w->fd >= 0);
|
|
|
923 assert(w->fd < INT_MAX);
|
|
|
924
|
|
|
925 w->pevents |= events;
|
|
|
926 err = maybe_resize(loop, w->fd + 1);
|
|
|
927 if (err)
|
|
|
928 return err;
|
|
|
929
|
|
|
930 #if !defined(__sun)
|
|
|
931 /* The event ports backend needs to rearm all file descriptors on each and
|
|
|
932 * every tick of the event loop but the other backends allow us to
|
|
|
933 * short-circuit here if the event mask is unchanged.
|
|
|
934 */
|
|
|
935 if (w->events == w->pevents)
|
|
|
936 return 0;
|
|
|
937 #endif
|
|
|
938
|
|
|
939 if (uv__queue_empty(&w->watcher_queue))
|
|
|
940 uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
|
|
|
941
|
|
|
942 if (loop->watchers[w->fd] == NULL) {
|
|
|
943 loop->watchers[w->fd] = w;
|
|
|
944 loop->nfds++;
|
|
|
945 }
|
|
|
946
|
|
|
947 return 0;
|
|
|
948 }
|
|
|
949
|
|
|
950
|
|
|
951 int uv__io_init_start(uv_loop_t* loop,
|
|
|
952 uv__io_t* w,
|
|
|
953 uv__io_cb cb,
|
|
|
954 int fd,
|
|
|
955 unsigned int events) {
|
|
|
956 int err;
|
|
|
957
|
|
|
958 assert(cb != NULL);
|
|
|
959 assert(fd > -1);
|
|
|
960 uv__io_init(w, cb, fd);
|
|
|
961 err = uv__io_start(loop, w, events);
|
|
|
962 if (err)
|
|
|
963 uv__io_init(w, NULL, -1);
|
|
|
964 return err;
|
|
|
965 }
|
|
|
966
|
|
|
967
|
|
|
968 void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
|
|
|
969 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
|
|
|
970 assert(0 != events);
|
|
|
971
|
|
|
972 if (w->fd == -1)
|
|
|
973 return;
|
|
|
974
|
|
|
975 assert(w->fd >= 0);
|
|
|
976
|
|
|
977 /* Happens when uv__io_stop() is called on a handle that was never started. */
|
|
|
978 if ((unsigned) w->fd >= loop->nwatchers)
|
|
|
979 return;
|
|
|
980
|
|
|
981 w->pevents &= ~events;
|
|
|
982
|
|
|
983 if (w->pevents == 0) {
|
|
|
984 uv__queue_remove(&w->watcher_queue);
|
|
|
985 uv__queue_init(&w->watcher_queue);
|
|
|
986 w->events = 0;
|
|
|
987
|
|
|
988 if (w == loop->watchers[w->fd]) {
|
|
|
989 assert(loop->nfds > 0);
|
|
|
990 loop->watchers[w->fd] = NULL;
|
|
|
991 loop->nfds--;
|
|
|
992 }
|
|
|
993 }
|
|
|
994 else if (uv__queue_empty(&w->watcher_queue))
|
|
|
995 uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
|
|
|
996 }
|
|
|
997
|
|
|
998
|
|
|
999 void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
|
|
|
1000 uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
|
|
|
1001 uv__queue_remove(&w->pending_queue);
|
|
|
1002
|
|
|
1003 /* Remove stale events for this file descriptor */
|
|
|
1004 if (w->fd != -1)
|
|
|
1005 uv__platform_invalidate_fd(loop, w->fd);
|
|
|
1006 }
|
|
|
1007
|
|
|
1008
|
|
|
1009 void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
|
|
|
1010 if (uv__queue_empty(&w->pending_queue))
|
|
|
1011 uv__queue_insert_tail(&loop->pending_queue, &w->pending_queue);
|
|
|
1012 }
|
|
|
1013
|
|
|
1014
|
|
|
1015 int uv__io_active(const uv__io_t* w, unsigned int events) {
|
|
|
1016 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
|
|
|
1017 assert(0 != events);
|
|
|
1018 return 0 != (w->pevents & events);
|
|
|
1019 }
|
|
|
1020
|
|
|
1021
|
|
|
1022 int uv__fd_exists(uv_loop_t* loop, int fd) {
|
|
|
1023 return (unsigned) fd < loop->nwatchers && loop->watchers[fd] != NULL;
|
|
|
1024 }
|
|
|
1025
|
|
|
1026
|
|
|
1027 static int uv__getrusage(int who, uv_rusage_t* rusage) {
|
|
|
1028 struct rusage usage;
|
|
|
1029
|
|
|
1030 if (getrusage(who, &usage))
|
|
|
1031 return UV__ERR(errno);
|
|
|
1032
|
|
|
1033 rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec;
|
|
|
1034 rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec;
|
|
|
1035
|
|
|
1036 rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec;
|
|
|
1037 rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec;
|
|
|
1038
|
|
|
1039 #if !defined(__MVS__) && !defined(__HAIKU__)
|
|
|
1040 rusage->ru_maxrss = usage.ru_maxrss;
|
|
|
1041 rusage->ru_ixrss = usage.ru_ixrss;
|
|
|
1042 rusage->ru_idrss = usage.ru_idrss;
|
|
|
1043 rusage->ru_isrss = usage.ru_isrss;
|
|
|
1044 rusage->ru_minflt = usage.ru_minflt;
|
|
|
1045 rusage->ru_majflt = usage.ru_majflt;
|
|
|
1046 rusage->ru_nswap = usage.ru_nswap;
|
|
|
1047 rusage->ru_inblock = usage.ru_inblock;
|
|
|
1048 rusage->ru_oublock = usage.ru_oublock;
|
|
|
1049 rusage->ru_msgsnd = usage.ru_msgsnd;
|
|
|
1050 rusage->ru_msgrcv = usage.ru_msgrcv;
|
|
|
1051 rusage->ru_nsignals = usage.ru_nsignals;
|
|
|
1052 rusage->ru_nvcsw = usage.ru_nvcsw;
|
|
|
1053 rusage->ru_nivcsw = usage.ru_nivcsw;
|
|
|
1054 #endif
|
|
|
1055
|
|
|
1056 /* Most platforms report ru_maxrss in kilobytes; macOS and Solaris are
|
|
|
1057 * the outliers because of course they are.
|
|
|
1058 */
|
|
|
1059 #if defined(__APPLE__)
|
|
|
1060 rusage->ru_maxrss /= 1024; /* macOS and iOS report bytes. */
|
|
|
1061 #elif defined(__sun)
|
|
|
1062 rusage->ru_maxrss *= getpagesize() / 1024; /* Solaris reports pages. */
|
|
|
1063 #endif
|
|
|
1064
|
|
|
1065 return 0;
|
|
|
1066 }
|
|
|
1067
|
|
|
1068
|
|
|
1069 int uv_getrusage(uv_rusage_t* rusage) {
|
|
|
1070 return uv__getrusage(RUSAGE_SELF, rusage);
|
|
|
1071 }
|
|
|
1072
|
|
|
1073
|
|
|
1074 int uv_getrusage_thread(uv_rusage_t* rusage) {
|
|
|
1075 #if defined(__APPLE__)
|
|
|
1076 mach_msg_type_number_t count;
|
|
|
1077 thread_basic_info_data_t info;
|
|
|
1078 kern_return_t kr;
|
|
|
1079 thread_t thread;
|
|
|
1080
|
|
|
1081 thread = mach_thread_self();
|
|
|
1082 count = THREAD_BASIC_INFO_COUNT;
|
|
|
1083 kr = thread_info(thread,
|
|
|
1084 THREAD_BASIC_INFO,
|
|
|
1085 (thread_info_t)&info,
|
|
|
1086 &count);
|
|
|
1087
|
|
|
1088 if (kr != KERN_SUCCESS) {
|
|
|
1089 mach_port_deallocate(mach_task_self(), thread);
|
|
|
1090 return UV_EINVAL;
|
|
|
1091 }
|
|
|
1092
|
|
|
1093 memset(rusage, 0, sizeof(*rusage));
|
|
|
1094
|
|
|
1095 rusage->ru_utime.tv_sec = info.user_time.seconds;
|
|
|
1096 rusage->ru_utime.tv_usec = info.user_time.microseconds;
|
|
|
1097 rusage->ru_stime.tv_sec = info.system_time.seconds;
|
|
|
1098 rusage->ru_stime.tv_usec = info.system_time.microseconds;
|
|
|
1099
|
|
|
1100 mach_port_deallocate(mach_task_self(), thread);
|
|
|
1101
|
|
|
1102 return 0;
|
|
|
1103
|
|
|
1104 #elif defined(RUSAGE_LWP)
|
|
|
1105 return uv__getrusage(RUSAGE_LWP, rusage);
|
|
|
1106 #elif defined(RUSAGE_THREAD)
|
|
|
1107 return uv__getrusage(RUSAGE_THREAD, rusage);
|
|
|
1108 #endif /* defined(__APPLE__) */
|
|
|
1109 return UV_ENOTSUP;
|
|
|
1110 }
|
|
|
1111
|
|
|
1112
|
|
|
1113 int uv__open_cloexec(const char* path, int flags) {
|
|
|
1114 #if defined(O_CLOEXEC)
|
|
|
1115 int fd;
|
|
|
1116
|
|
|
1117 fd = open(path, flags | O_CLOEXEC);
|
|
|
1118 if (fd == -1)
|
|
|
1119 return UV__ERR(errno);
|
|
|
1120
|
|
|
1121 return fd;
|
|
|
1122 #else /* O_CLOEXEC */
|
|
|
1123 int err;
|
|
|
1124 int fd;
|
|
|
1125
|
|
|
1126 fd = open(path, flags);
|
|
|
1127 if (fd == -1)
|
|
|
1128 return UV__ERR(errno);
|
|
|
1129
|
|
|
1130 err = uv__cloexec(fd, 1);
|
|
|
1131 if (err) {
|
|
|
1132 uv__close(fd);
|
|
|
1133 return err;
|
|
|
1134 }
|
|
|
1135
|
|
|
1136 return fd;
|
|
|
1137 #endif /* O_CLOEXEC */
|
|
|
1138 }
|
|
|
1139
|
|
|
1140
|
|
|
1141 int uv__slurp(const char* filename, char* buf, size_t len) {
|
|
|
1142 ssize_t n;
|
|
|
1143 int fd;
|
|
|
1144
|
|
|
1145 assert(len > 0);
|
|
|
1146
|
|
|
1147 fd = uv__open_cloexec(filename, O_RDONLY);
|
|
|
1148 if (fd < 0)
|
|
|
1149 return fd;
|
|
|
1150
|
|
|
1151 do
|
|
|
1152 n = read(fd, buf, len - 1);
|
|
|
1153 while (n == -1 && errno == EINTR);
|
|
|
1154
|
|
|
1155 if (uv__close_nocheckstdio(fd))
|
|
|
1156 abort();
|
|
|
1157
|
|
|
1158 if (n < 0)
|
|
|
1159 return UV__ERR(errno);
|
|
|
1160
|
|
|
1161 buf[n] = '\0';
|
|
|
1162
|
|
|
1163 return 0;
|
|
|
1164 }
|
|
|
1165
|
|
|
1166
|
|
|
1167 int uv__dup2_cloexec(int oldfd, int newfd) {
|
|
|
1168 #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__linux__)
|
|
|
1169 int r;
|
|
|
1170
|
|
|
1171 r = dup3(oldfd, newfd, O_CLOEXEC);
|
|
|
1172 if (r == -1)
|
|
|
1173 return UV__ERR(errno);
|
|
|
1174
|
|
|
1175 return r;
|
|
|
1176 #else
|
|
|
1177 int err;
|
|
|
1178 int r;
|
|
|
1179
|
|
|
1180 r = dup2(oldfd, newfd); /* Never retry. */
|
|
|
1181 if (r == -1)
|
|
|
1182 return UV__ERR(errno);
|
|
|
1183
|
|
|
1184 err = uv__cloexec(newfd, 1);
|
|
|
1185 if (err != 0) {
|
|
|
1186 uv__close(newfd);
|
|
|
1187 return err;
|
|
|
1188 }
|
|
|
1189
|
|
|
1190 return r;
|
|
|
1191 #endif
|
|
|
1192 }
|
|
|
1193
|
|
|
1194
|
|
|
1195 int uv_os_homedir(char* buffer, size_t* size) {
|
|
|
1196 uv_passwd_t pwd;
|
|
|
1197 size_t len;
|
|
|
1198 int r;
|
|
|
1199
|
|
|
1200 /* Check if the HOME environment variable is set first. The task of
|
|
|
1201 performing input validation on buffer and size is taken care of by
|
|
|
1202 uv_os_getenv(). */
|
|
|
1203 r = uv_os_getenv("HOME", buffer, size);
|
|
|
1204
|
|
|
1205 if (r != UV_ENOENT)
|
|
|
1206 return r;
|
|
|
1207
|
|
|
1208 /* HOME is not set, so call uv_os_get_passwd() */
|
|
|
1209 r = uv_os_get_passwd(&pwd);
|
|
|
1210
|
|
|
1211 if (r != 0) {
|
|
|
1212 return r;
|
|
|
1213 }
|
|
|
1214
|
|
|
1215 len = strlen(pwd.homedir);
|
|
|
1216
|
|
|
1217 if (len >= *size) {
|
|
|
1218 *size = len + 1;
|
|
|
1219 uv_os_free_passwd(&pwd);
|
|
|
1220 return UV_ENOBUFS;
|
|
|
1221 }
|
|
|
1222
|
|
|
1223 memcpy(buffer, pwd.homedir, len + 1);
|
|
|
1224 *size = len;
|
|
|
1225 uv_os_free_passwd(&pwd);
|
|
|
1226
|
|
|
1227 return 0;
|
|
|
1228 }
|
|
|
1229
|
|
|
1230
|
|
|
1231 int uv_os_tmpdir(char* buffer, size_t* size) {
|
|
|
1232 const char* buf;
|
|
|
1233 size_t len;
|
|
|
1234
|
|
|
1235 if (buffer == NULL || size == NULL || *size == 0)
|
|
|
1236 return UV_EINVAL;
|
|
|
1237
|
|
|
1238 #define CHECK_ENV_VAR(name) \
|
|
|
1239 do { \
|
|
|
1240 buf = getenv(name); \
|
|
|
1241 if (buf != NULL) \
|
|
|
1242 goto return_buffer; \
|
|
|
1243 } \
|
|
|
1244 while (0)
|
|
|
1245
|
|
|
1246 /* Check the TMPDIR, TMP, TEMP, and TEMPDIR environment variables in order */
|
|
|
1247 CHECK_ENV_VAR("TMPDIR");
|
|
|
1248 CHECK_ENV_VAR("TMP");
|
|
|
1249 CHECK_ENV_VAR("TEMP");
|
|
|
1250 CHECK_ENV_VAR("TEMPDIR");
|
|
|
1251
|
|
|
1252 #undef CHECK_ENV_VAR
|
|
|
1253
|
|
|
1254 /* No temp environment variables defined */
|
|
|
1255 #if defined(__ANDROID__)
|
|
|
1256 buf = "/data/local/tmp";
|
|
|
1257 #else
|
|
|
1258 buf = "/tmp";
|
|
|
1259 #endif
|
|
|
1260
|
|
|
1261 return_buffer:
|
|
|
1262 len = strlen(buf);
|
|
|
1263
|
|
|
1264 if (len >= *size) {
|
|
|
1265 *size = len + 1;
|
|
|
1266 return UV_ENOBUFS;
|
|
|
1267 }
|
|
|
1268
|
|
|
1269 /* The returned directory should not have a trailing slash. */
|
|
|
1270 if (len > 1 && buf[len - 1] == '/') {
|
|
|
1271 len--;
|
|
|
1272 }
|
|
|
1273
|
|
|
1274 memcpy(buffer, buf, len + 1);
|
|
|
1275 buffer[len] = '\0';
|
|
|
1276 *size = len;
|
|
|
1277
|
|
|
1278 return 0;
|
|
|
1279 }
|
|
|
1280
|
|
|
1281
|
|
|
1282 static int uv__getpwuid_r(uv_passwd_t *pwd, uid_t uid) {
|
|
|
1283 struct passwd pw;
|
|
|
1284 struct passwd* result;
|
|
|
1285 char* buf;
|
|
|
1286 size_t bufsize;
|
|
|
1287 size_t name_size;
|
|
|
1288 size_t homedir_size;
|
|
|
1289 size_t shell_size;
|
|
|
1290 int r;
|
|
|
1291
|
|
|
1292 if (pwd == NULL)
|
|
|
1293 return UV_EINVAL;
|
|
|
1294
|
|
|
1295 /* Calling sysconf(_SC_GETPW_R_SIZE_MAX) would get the suggested size, but it
|
|
|
1296 * is frequently 1024 or 4096, so we can just use that directly. The pwent
|
|
|
1297 * will not usually be large. */
|
|
|
1298 for (bufsize = 2000;; bufsize *= 2) {
|
|
|
1299 buf = uv__malloc(bufsize);
|
|
|
1300
|
|
|
1301 if (buf == NULL)
|
|
|
1302 return UV_ENOMEM;
|
|
|
1303
|
|
|
1304 do
|
|
|
1305 r = getpwuid_r(uid, &pw, buf, bufsize, &result);
|
|
|
1306 while (r == EINTR);
|
|
|
1307
|
|
|
1308 if (r != 0 || result == NULL)
|
|
|
1309 uv__free(buf);
|
|
|
1310
|
|
|
1311 if (r != ERANGE)
|
|
|
1312 break;
|
|
|
1313 }
|
|
|
1314
|
|
|
1315 if (r != 0)
|
|
|
1316 return UV__ERR(r);
|
|
|
1317
|
|
|
1318 if (result == NULL)
|
|
|
1319 return UV_ENOENT;
|
|
|
1320
|
|
|
1321 /* Allocate memory for the username, shell, and home directory */
|
|
|
1322 name_size = strlen(pw.pw_name) + 1;
|
|
|
1323 homedir_size = strlen(pw.pw_dir) + 1;
|
|
|
1324 shell_size = strlen(pw.pw_shell) + 1;
|
|
|
1325 pwd->username = uv__malloc(name_size + homedir_size + shell_size);
|
|
|
1326
|
|
|
1327 if (pwd->username == NULL) {
|
|
|
1328 uv__free(buf);
|
|
|
1329 return UV_ENOMEM;
|
|
|
1330 }
|
|
|
1331
|
|
|
1332 /* Copy the username */
|
|
|
1333 memcpy(pwd->username, pw.pw_name, name_size);
|
|
|
1334
|
|
|
1335 /* Copy the home directory */
|
|
|
1336 pwd->homedir = pwd->username + name_size;
|
|
|
1337 memcpy(pwd->homedir, pw.pw_dir, homedir_size);
|
|
|
1338
|
|
|
1339 /* Copy the shell */
|
|
|
1340 pwd->shell = pwd->homedir + homedir_size;
|
|
|
1341 memcpy(pwd->shell, pw.pw_shell, shell_size);
|
|
|
1342
|
|
|
1343 /* Copy the uid and gid */
|
|
|
1344 pwd->uid = pw.pw_uid;
|
|
|
1345 pwd->gid = pw.pw_gid;
|
|
|
1346
|
|
|
1347 uv__free(buf);
|
|
|
1348
|
|
|
1349 return 0;
|
|
|
1350 }
|
|
|
1351
|
|
|
1352
|
|
|
1353 int uv_os_get_group(uv_group_t* grp, uv_uid_t gid) {
|
|
|
1354 #if defined(__ANDROID__) && __ANDROID_API__ < 24
|
|
|
1355 /* This function getgrgid_r() was added in Android N (level 24) */
|
|
|
1356 return UV_ENOSYS;
|
|
|
1357 #else
|
|
|
1358 struct group gp;
|
|
|
1359 struct group* result;
|
|
|
1360 char* buf;
|
|
|
1361 char* gr_mem;
|
|
|
1362 size_t bufsize;
|
|
|
1363 size_t name_size;
|
|
|
1364 long members;
|
|
|
1365 size_t mem_size;
|
|
|
1366 int r;
|
|
|
1367
|
|
|
1368 if (grp == NULL)
|
|
|
1369 return UV_EINVAL;
|
|
|
1370
|
|
|
1371 /* Calling sysconf(_SC_GETGR_R_SIZE_MAX) would get the suggested size, but it
|
|
|
1372 * is frequently 1024 or 4096, so we can just use that directly. The pwent
|
|
|
1373 * will not usually be large. */
|
|
|
1374 for (bufsize = 2000;; bufsize *= 2) {
|
|
|
1375 buf = uv__malloc(bufsize);
|
|
|
1376
|
|
|
1377 if (buf == NULL)
|
|
|
1378 return UV_ENOMEM;
|
|
|
1379
|
|
|
1380 do
|
|
|
1381 r = getgrgid_r(gid, &gp, buf, bufsize, &result);
|
|
|
1382 while (r == EINTR);
|
|
|
1383
|
|
|
1384 if (r != 0 || result == NULL)
|
|
|
1385 uv__free(buf);
|
|
|
1386
|
|
|
1387 if (r != ERANGE)
|
|
|
1388 break;
|
|
|
1389 }
|
|
|
1390
|
|
|
1391 if (r != 0)
|
|
|
1392 return UV__ERR(r);
|
|
|
1393
|
|
|
1394 if (result == NULL)
|
|
|
1395 return UV_ENOENT;
|
|
|
1396
|
|
|
1397 /* Allocate memory for the groupname and members. */
|
|
|
1398 name_size = strlen(gp.gr_name) + 1;
|
|
|
1399 members = 0;
|
|
|
1400 mem_size = sizeof(char*);
|
|
|
1401 for (r = 0; gp.gr_mem[r] != NULL; r++) {
|
|
|
1402 mem_size += strlen(gp.gr_mem[r]) + 1 + sizeof(char*);
|
|
|
1403 members++;
|
|
|
1404 }
|
|
|
1405
|
|
|
1406 gr_mem = uv__malloc(name_size + mem_size);
|
|
|
1407 if (gr_mem == NULL) {
|
|
|
1408 uv__free(buf);
|
|
|
1409 return UV_ENOMEM;
|
|
|
1410 }
|
|
|
1411
|
|
|
1412 /* Copy the members */
|
|
|
1413 grp->members = (char**) gr_mem;
|
|
|
1414 grp->members[members] = NULL;
|
|
|
1415 gr_mem = (char*) &grp->members[members + 1];
|
|
|
1416 for (r = 0; r < members; r++) {
|
|
|
1417 grp->members[r] = gr_mem;
|
|
|
1418 strcpy(gr_mem, gp.gr_mem[r]);
|
|
|
1419 gr_mem += strlen(gr_mem) + 1;
|
|
|
1420 }
|
|
|
1421 assert(gr_mem == (char*)grp->members + mem_size);
|
|
|
1422
|
|
|
1423 /* Copy the groupname */
|
|
|
1424 grp->groupname = gr_mem;
|
|
|
1425 memcpy(grp->groupname, gp.gr_name, name_size);
|
|
|
1426 gr_mem += name_size;
|
|
|
1427
|
|
|
1428 /* Copy the gid */
|
|
|
1429 grp->gid = gp.gr_gid;
|
|
|
1430
|
|
|
1431 uv__free(buf);
|
|
|
1432
|
|
|
1433 return 0;
|
|
|
1434 #endif
|
|
|
1435 }
|
|
|
1436
|
|
|
1437
|
|
|
1438 int uv_os_get_passwd(uv_passwd_t* pwd) {
|
|
|
1439 return uv__getpwuid_r(pwd, geteuid());
|
|
|
1440 }
|
|
|
1441
|
|
|
1442
|
|
|
1443 int uv_os_get_passwd2(uv_passwd_t* pwd, uv_uid_t uid) {
|
|
|
1444 return uv__getpwuid_r(pwd, uid);
|
|
|
1445 }
|
|
|
1446
|
|
|
1447
|
|
|
1448 int uv_translate_sys_error(int sys_errno) {
|
|
|
1449 /* If < 0 then it's already a libuv error. */
|
|
|
1450 return sys_errno <= 0 ? sys_errno : -sys_errno;
|
|
|
1451 }
|
|
|
1452
|
|
|
1453
|
|
|
1454 int uv_os_environ(uv_env_item_t** envitems, int* count) {
|
|
|
1455 int i, j, cnt;
|
|
|
1456 uv_env_item_t* envitem;
|
|
|
1457
|
|
|
1458 *envitems = NULL;
|
|
|
1459 *count = 0;
|
|
|
1460
|
|
|
1461 for (i = 0; environ[i] != NULL; i++);
|
|
|
1462
|
|
|
1463 *envitems = uv__calloc(i, sizeof(**envitems));
|
|
|
1464
|
|
|
1465 if (*envitems == NULL)
|
|
|
1466 return UV_ENOMEM;
|
|
|
1467
|
|
|
1468 for (j = 0, cnt = 0; j < i; j++) {
|
|
|
1469 char* buf;
|
|
|
1470 char* ptr;
|
|
|
1471
|
|
|
1472 if (environ[j] == NULL)
|
|
|
1473 break;
|
|
|
1474
|
|
|
1475 buf = uv__strdup(environ[j]);
|
|
|
1476 if (buf == NULL)
|
|
|
1477 goto fail;
|
|
|
1478
|
|
|
1479 ptr = strchr(buf, '=');
|
|
|
1480 if (ptr == NULL) {
|
|
|
1481 uv__free(buf);
|
|
|
1482 continue;
|
|
|
1483 }
|
|
|
1484
|
|
|
1485 *ptr = '\0';
|
|
|
1486
|
|
|
1487 envitem = &(*envitems)[cnt];
|
|
|
1488 envitem->name = buf;
|
|
|
1489 envitem->value = ptr + 1;
|
|
|
1490
|
|
|
1491 cnt++;
|
|
|
1492 }
|
|
|
1493
|
|
|
1494 *count = cnt;
|
|
|
1495 return 0;
|
|
|
1496
|
|
|
1497 fail:
|
|
|
1498 for (i = 0; i < cnt; i++) {
|
|
|
1499 envitem = &(*envitems)[cnt];
|
|
|
1500 uv__free(envitem->name);
|
|
|
1501 }
|
|
|
1502 uv__free(*envitems);
|
|
|
1503
|
|
|
1504 *envitems = NULL;
|
|
|
1505 *count = 0;
|
|
|
1506 return UV_ENOMEM;
|
|
|
1507 }
|
|
|
1508
|
|
|
1509
|
|
|
1510 int uv_os_getenv(const char* name, char* buffer, size_t* size) {
|
|
|
1511 char* var;
|
|
|
1512 size_t len;
|
|
|
1513
|
|
|
1514 if (name == NULL || buffer == NULL || size == NULL || *size == 0)
|
|
|
1515 return UV_EINVAL;
|
|
|
1516
|
|
|
1517 var = getenv(name);
|
|
|
1518
|
|
|
1519 if (var == NULL)
|
|
|
1520 return UV_ENOENT;
|
|
|
1521
|
|
|
1522 len = strlen(var);
|
|
|
1523
|
|
|
1524 if (len >= *size) {
|
|
|
1525 *size = len + 1;
|
|
|
1526 return UV_ENOBUFS;
|
|
|
1527 }
|
|
|
1528
|
|
|
1529 memcpy(buffer, var, len + 1);
|
|
|
1530 *size = len;
|
|
|
1531
|
|
|
1532 return 0;
|
|
|
1533 }
|
|
|
1534
|
|
|
1535
|
|
|
1536 int uv_os_setenv(const char* name, const char* value) {
|
|
|
1537 if (name == NULL || value == NULL)
|
|
|
1538 return UV_EINVAL;
|
|
|
1539
|
|
|
1540 if (setenv(name, value, 1) != 0)
|
|
|
1541 return UV__ERR(errno);
|
|
|
1542
|
|
|
1543 return 0;
|
|
|
1544 }
|
|
|
1545
|
|
|
1546
|
|
|
1547 int uv_os_unsetenv(const char* name) {
|
|
|
1548 if (name == NULL)
|
|
|
1549 return UV_EINVAL;
|
|
|
1550
|
|
|
1551 if (unsetenv(name) != 0)
|
|
|
1552 return UV__ERR(errno);
|
|
|
1553
|
|
|
1554 return 0;
|
|
|
1555 }
|
|
|
1556
|
|
|
1557
|
|
|
1558 int uv_os_gethostname(char* buffer, size_t* size) {
|
|
|
1559 /*
|
|
|
1560 On some platforms, if the input buffer is not large enough, gethostname()
|
|
|
1561 succeeds, but truncates the result. libuv can detect this and return ENOBUFS
|
|
|
1562 instead by creating a large enough buffer and comparing the hostname length
|
|
|
1563 to the size input.
|
|
|
1564 */
|
|
|
1565 char buf[UV_MAXHOSTNAMESIZE];
|
|
|
1566 size_t len;
|
|
|
1567
|
|
|
1568 if (buffer == NULL || size == NULL || *size == 0)
|
|
|
1569 return UV_EINVAL;
|
|
|
1570
|
|
|
1571 if (gethostname(buf, sizeof(buf)) != 0)
|
|
|
1572 return UV__ERR(errno);
|
|
|
1573
|
|
|
1574 buf[sizeof(buf) - 1] = '\0'; /* Null terminate, just to be safe. */
|
|
|
1575 len = strlen(buf);
|
|
|
1576
|
|
|
1577 if (len >= *size) {
|
|
|
1578 *size = len + 1;
|
|
|
1579 return UV_ENOBUFS;
|
|
|
1580 }
|
|
|
1581
|
|
|
1582 memcpy(buffer, buf, len + 1);
|
|
|
1583 *size = len;
|
|
|
1584 return 0;
|
|
|
1585 }
|
|
|
1586
|
|
|
1587
|
|
|
1588 uv_os_fd_t uv_get_osfhandle(int fd) {
|
|
|
1589 return fd;
|
|
|
1590 }
|
|
|
1591
|
|
|
1592 int uv_open_osfhandle(uv_os_fd_t os_fd) {
|
|
|
1593 return os_fd;
|
|
|
1594 }
|
|
|
1595
|
|
|
1596 uv_pid_t uv_os_getpid(void) {
|
|
|
1597 return getpid();
|
|
|
1598 }
|
|
|
1599
|
|
|
1600
|
|
|
1601 uv_pid_t uv_os_getppid(void) {
|
|
|
1602 return getppid();
|
|
|
1603 }
|
|
|
1604
|
|
|
1605 int uv_cpumask_size(void) {
|
|
|
1606 #if UV__CPU_AFFINITY_SUPPORTED
|
|
|
1607 return CPU_SETSIZE;
|
|
|
1608 #else
|
|
|
1609 return UV_ENOTSUP;
|
|
|
1610 #endif
|
|
|
1611 }
|
|
|
1612
|
|
|
1613 int uv_os_getpriority(uv_pid_t pid, int* priority) {
|
|
|
1614 int r;
|
|
|
1615
|
|
|
1616 if (priority == NULL)
|
|
|
1617 return UV_EINVAL;
|
|
|
1618
|
|
|
1619 errno = 0;
|
|
|
1620 r = getpriority(PRIO_PROCESS, (int) pid);
|
|
|
1621
|
|
|
1622 if (r == -1 && errno != 0)
|
|
|
1623 return UV__ERR(errno);
|
|
|
1624
|
|
|
1625 *priority = r;
|
|
|
1626 return 0;
|
|
|
1627 }
|
|
|
1628
|
|
|
1629
|
|
|
1630 int uv_os_setpriority(uv_pid_t pid, int priority) {
|
|
|
1631 if (priority < UV_PRIORITY_HIGHEST || priority > UV_PRIORITY_LOW)
|
|
|
1632 return UV_EINVAL;
|
|
|
1633
|
|
|
1634 if (setpriority(PRIO_PROCESS, (int) pid, priority) != 0)
|
|
|
1635 return UV__ERR(errno);
|
|
|
1636
|
|
|
1637 return 0;
|
|
|
1638 }
|
|
|
1639
|
|
|
1640 /**
|
|
|
1641 * If the function succeeds, the return value is 0.
|
|
|
1642 * If the function fails, the return value is non-zero.
|
|
|
1643 * for Linux, when schedule policy is SCHED_OTHER (default), priority is 0.
|
|
|
1644 * So the output parameter priority is actually the nice value.
|
|
|
1645 */
|
|
|
1646 int uv_thread_getpriority(uv_thread_t tid, int* priority) {
|
|
|
1647 int r;
|
|
|
1648 int policy;
|
|
|
1649 struct sched_param param;
|
|
|
1650 #ifdef __linux__
|
|
|
1651 pid_t pid = gettid();
|
|
|
1652 #endif
|
|
|
1653
|
|
|
1654 if (priority == NULL)
|
|
|
1655 return UV_EINVAL;
|
|
|
1656
|
|
|
1657 r = pthread_getschedparam(tid, &policy, ¶m);
|
|
|
1658 if (r != 0)
|
|
|
1659 return UV__ERR(errno);
|
|
|
1660
|
|
|
1661 #ifdef __linux__
|
|
|
1662 if (SCHED_OTHER == policy && pthread_equal(tid, pthread_self())) {
|
|
|
1663 errno = 0;
|
|
|
1664 r = getpriority(PRIO_PROCESS, pid);
|
|
|
1665 if (r == -1 && errno != 0)
|
|
|
1666 return UV__ERR(errno);
|
|
|
1667 *priority = r;
|
|
|
1668 return 0;
|
|
|
1669 }
|
|
|
1670 #endif
|
|
|
1671
|
|
|
1672 *priority = param.sched_priority;
|
|
|
1673 return 0;
|
|
|
1674 }
|
|
|
1675
|
|
|
1676 #ifdef __linux__
|
|
|
1677 static int set_nice_for_calling_thread(int priority) {
|
|
|
1678 int r;
|
|
|
1679 int nice;
|
|
|
1680
|
|
|
1681 if (priority < UV_THREAD_PRIORITY_LOWEST || priority > UV_THREAD_PRIORITY_HIGHEST)
|
|
|
1682 return UV_EINVAL;
|
|
|
1683
|
|
|
1684 pid_t pid = gettid();
|
|
|
1685 nice = 0 - priority * 2;
|
|
|
1686 r = setpriority(PRIO_PROCESS, pid, nice);
|
|
|
1687 if (r != 0)
|
|
|
1688 return UV__ERR(errno);
|
|
|
1689 return 0;
|
|
|
1690 }
|
|
|
1691 #endif
|
|
|
1692
|
|
|
1693 /**
|
|
|
1694 * If the function succeeds, the return value is 0.
|
|
|
1695 * If the function fails, the return value is non-zero.
|
|
|
1696 */
|
|
|
1697 int uv_thread_setpriority(uv_thread_t tid, int priority) {
|
|
|
1698 #if !defined(__GNU__)
|
|
|
1699 int r;
|
|
|
1700 int min;
|
|
|
1701 int max;
|
|
|
1702 int range;
|
|
|
1703 int prio;
|
|
|
1704 int policy;
|
|
|
1705 struct sched_param param;
|
|
|
1706
|
|
|
1707 if (priority < UV_THREAD_PRIORITY_LOWEST || priority > UV_THREAD_PRIORITY_HIGHEST)
|
|
|
1708 return UV_EINVAL;
|
|
|
1709
|
|
|
1710 r = pthread_getschedparam(tid, &policy, ¶m);
|
|
|
1711 if (r != 0)
|
|
|
1712 return UV__ERR(errno);
|
|
|
1713
|
|
|
1714 #ifdef __linux__
|
|
|
1715 /**
|
|
|
1716 * for Linux, when schedule policy is SCHED_OTHER (default), priority must be 0,
|
|
|
1717 * we should set the nice value in this case.
|
|
|
1718 */
|
|
|
1719 if (SCHED_OTHER == policy && pthread_equal(tid, pthread_self()))
|
|
|
1720 return set_nice_for_calling_thread(priority);
|
|
|
1721 #endif
|
|
|
1722
|
|
|
1723 #ifdef __PASE__
|
|
|
1724 min = 1;
|
|
|
1725 max = 127;
|
|
|
1726 #else
|
|
|
1727 min = sched_get_priority_min(policy);
|
|
|
1728 max = sched_get_priority_max(policy);
|
|
|
1729 #endif
|
|
|
1730
|
|
|
1731 if (min == -1 || max == -1)
|
|
|
1732 return UV__ERR(errno);
|
|
|
1733
|
|
|
1734 range = max - min;
|
|
|
1735
|
|
|
1736 switch (priority) {
|
|
|
1737 case UV_THREAD_PRIORITY_HIGHEST:
|
|
|
1738 prio = max;
|
|
|
1739 break;
|
|
|
1740 case UV_THREAD_PRIORITY_ABOVE_NORMAL:
|
|
|
1741 prio = min + range * 3 / 4;
|
|
|
1742 break;
|
|
|
1743 case UV_THREAD_PRIORITY_NORMAL:
|
|
|
1744 prio = min + range / 2;
|
|
|
1745 break;
|
|
|
1746 case UV_THREAD_PRIORITY_BELOW_NORMAL:
|
|
|
1747 prio = min + range / 4;
|
|
|
1748 break;
|
|
|
1749 case UV_THREAD_PRIORITY_LOWEST:
|
|
|
1750 prio = min;
|
|
|
1751 break;
|
|
|
1752 default:
|
|
|
1753 return 0;
|
|
|
1754 }
|
|
|
1755
|
|
|
1756 if (param.sched_priority != prio) {
|
|
|
1757 param.sched_priority = prio;
|
|
|
1758 r = pthread_setschedparam(tid, policy, ¶m);
|
|
|
1759 if (r != 0)
|
|
|
1760 return UV__ERR(errno);
|
|
|
1761 }
|
|
|
1762
|
|
|
1763 return 0;
|
|
|
1764 #else /* !defined(__GNU__) */
|
|
|
1765 /* Simulate success on systems where thread priority is not implemented. */
|
|
|
1766 return 0;
|
|
|
1767 #endif /* !defined(__GNU__) */
|
|
|
1768 }
|
|
|
1769
|
|
|
1770 int uv_os_uname(uv_utsname_t* buffer) {
|
|
|
1771 struct utsname buf;
|
|
|
1772 int r;
|
|
|
1773
|
|
|
1774 if (buffer == NULL)
|
|
|
1775 return UV_EINVAL;
|
|
|
1776
|
|
|
1777 if (uname(&buf) == -1) {
|
|
|
1778 r = UV__ERR(errno);
|
|
|
1779 goto error;
|
|
|
1780 }
|
|
|
1781
|
|
|
1782 r = uv__strscpy(buffer->sysname, buf.sysname, sizeof(buffer->sysname));
|
|
|
1783 if (r == UV_E2BIG)
|
|
|
1784 goto error;
|
|
|
1785
|
|
|
1786 #ifdef _AIX
|
|
|
1787 r = snprintf(buffer->release,
|
|
|
1788 sizeof(buffer->release),
|
|
|
1789 "%s.%s",
|
|
|
1790 buf.version,
|
|
|
1791 buf.release);
|
|
|
1792 if (r >= sizeof(buffer->release)) {
|
|
|
1793 r = UV_E2BIG;
|
|
|
1794 goto error;
|
|
|
1795 }
|
|
|
1796 #else
|
|
|
1797 r = uv__strscpy(buffer->release, buf.release, sizeof(buffer->release));
|
|
|
1798 if (r == UV_E2BIG)
|
|
|
1799 goto error;
|
|
|
1800 #endif
|
|
|
1801
|
|
|
1802 r = uv__strscpy(buffer->version, buf.version, sizeof(buffer->version));
|
|
|
1803 if (r == UV_E2BIG)
|
|
|
1804 goto error;
|
|
|
1805
|
|
|
1806 #if defined(_AIX) || defined(__PASE__)
|
|
|
1807 r = uv__strscpy(buffer->machine, "ppc64", sizeof(buffer->machine));
|
|
|
1808 #else
|
|
|
1809 r = uv__strscpy(buffer->machine, buf.machine, sizeof(buffer->machine));
|
|
|
1810 #endif
|
|
|
1811
|
|
|
1812 if (r == UV_E2BIG)
|
|
|
1813 goto error;
|
|
|
1814
|
|
|
1815 return 0;
|
|
|
1816
|
|
|
1817 error:
|
|
|
1818 buffer->sysname[0] = '\0';
|
|
|
1819 buffer->release[0] = '\0';
|
|
|
1820 buffer->version[0] = '\0';
|
|
|
1821 buffer->machine[0] = '\0';
|
|
|
1822 return r;
|
|
|
1823 }
|
|
|
1824
|
|
|
1825 int uv__getsockpeername(const uv_handle_t* handle,
|
|
|
1826 uv__peersockfunc func,
|
|
|
1827 struct sockaddr* name,
|
|
|
1828 int* namelen) {
|
|
|
1829 socklen_t socklen;
|
|
|
1830 uv_os_fd_t fd;
|
|
|
1831 int r;
|
|
|
1832
|
|
|
1833 r = uv_fileno(handle, &fd);
|
|
|
1834 if (r < 0)
|
|
|
1835 return r;
|
|
|
1836
|
|
|
1837 /* sizeof(socklen_t) != sizeof(int) on some systems. */
|
|
|
1838 socklen = (socklen_t) *namelen;
|
|
|
1839
|
|
|
1840 if (func(fd, name, &socklen))
|
|
|
1841 return UV__ERR(errno);
|
|
|
1842
|
|
|
1843 *namelen = (int) socklen;
|
|
|
1844 return 0;
|
|
|
1845 }
|
|
|
1846
|
|
|
1847 int uv_gettimeofday(uv_timeval64_t* tv) {
|
|
|
1848 struct timeval time;
|
|
|
1849
|
|
|
1850 if (tv == NULL)
|
|
|
1851 return UV_EINVAL;
|
|
|
1852
|
|
|
1853 if (gettimeofday(&time, NULL) != 0)
|
|
|
1854 return UV__ERR(errno);
|
|
|
1855
|
|
|
1856 tv->tv_sec = (int64_t) time.tv_sec;
|
|
|
1857 tv->tv_usec = (int32_t) time.tv_usec;
|
|
|
1858 return 0;
|
|
|
1859 }
|
|
|
1860
|
|
|
1861 void uv_sleep(unsigned int msec) {
|
|
|
1862 struct timespec timeout;
|
|
|
1863 int rc;
|
|
|
1864
|
|
|
1865 timeout.tv_sec = msec / 1000;
|
|
|
1866 timeout.tv_nsec = (msec % 1000) * 1000 * 1000;
|
|
|
1867
|
|
|
1868 do
|
|
|
1869 rc = nanosleep(&timeout, &timeout);
|
|
|
1870 while (rc == -1 && errno == EINTR);
|
|
|
1871
|
|
|
1872 assert(rc == 0);
|
|
|
1873 }
|
|
|
1874
|
|
|
1875 int uv__search_path(const char* prog, char* buf, size_t* buflen) {
|
|
|
1876 char abspath[UV__PATH_MAX];
|
|
|
1877 size_t abspath_size;
|
|
|
1878 char trypath[UV__PATH_MAX];
|
|
|
1879 char* cloned_path;
|
|
|
1880 char* path_env;
|
|
|
1881 char* token;
|
|
|
1882 char* itr;
|
|
|
1883
|
|
|
1884 if (buf == NULL || buflen == NULL || *buflen == 0)
|
|
|
1885 return UV_EINVAL;
|
|
|
1886
|
|
|
1887 /*
|
|
|
1888 * Possibilities for prog:
|
|
|
1889 * i) an absolute path such as: /home/user/myprojects/nodejs/node
|
|
|
1890 * ii) a relative path such as: ./node or ../myprojects/nodejs/node
|
|
|
1891 * iii) a bare filename such as "node", after exporting PATH variable
|
|
|
1892 * to its location.
|
|
|
1893 */
|
|
|
1894
|
|
|
1895 /* Case i) and ii) absolute or relative paths */
|
|
|
1896 if (strchr(prog, '/') != NULL) {
|
|
|
1897 if (realpath(prog, abspath) != abspath)
|
|
|
1898 return UV__ERR(errno);
|
|
|
1899
|
|
|
1900 abspath_size = strlen(abspath);
|
|
|
1901
|
|
|
1902 *buflen -= 1;
|
|
|
1903 if (*buflen > abspath_size)
|
|
|
1904 *buflen = abspath_size;
|
|
|
1905
|
|
|
1906 memcpy(buf, abspath, *buflen);
|
|
|
1907 buf[*buflen] = '\0';
|
|
|
1908
|
|
|
1909 return 0;
|
|
|
1910 }
|
|
|
1911
|
|
|
1912 /* Case iii). Search PATH environment variable */
|
|
|
1913 cloned_path = NULL;
|
|
|
1914 token = NULL;
|
|
|
1915 path_env = getenv("PATH");
|
|
|
1916
|
|
|
1917 if (path_env == NULL)
|
|
|
1918 return UV_EINVAL;
|
|
|
1919
|
|
|
1920 cloned_path = uv__strdup(path_env);
|
|
|
1921 if (cloned_path == NULL)
|
|
|
1922 return UV_ENOMEM;
|
|
|
1923
|
|
|
1924 token = uv__strtok(cloned_path, ":", &itr);
|
|
|
1925 while (token != NULL) {
|
|
|
1926 snprintf(trypath, sizeof(trypath) - 1, "%s/%s", token, prog);
|
|
|
1927 if (realpath(trypath, abspath) == abspath) {
|
|
|
1928 /* Check the match is executable */
|
|
|
1929 if (access(abspath, X_OK) == 0) {
|
|
|
1930 abspath_size = strlen(abspath);
|
|
|
1931
|
|
|
1932 *buflen -= 1;
|
|
|
1933 if (*buflen > abspath_size)
|
|
|
1934 *buflen = abspath_size;
|
|
|
1935
|
|
|
1936 memcpy(buf, abspath, *buflen);
|
|
|
1937 buf[*buflen] = '\0';
|
|
|
1938
|
|
|
1939 uv__free(cloned_path);
|
|
|
1940 return 0;
|
|
|
1941 }
|
|
|
1942 }
|
|
|
1943 token = uv__strtok(NULL, ":", &itr);
|
|
|
1944 }
|
|
|
1945 uv__free(cloned_path);
|
|
|
1946
|
|
|
1947 /* Out of tokens (path entries), and no match found */
|
|
|
1948 return UV_EINVAL;
|
|
|
1949 }
|
|
|
1950
|
|
|
1951 #if defined(__linux__) || defined (__FreeBSD__)
|
|
|
1952 # define uv__cpu_count(cpuset) CPU_COUNT(cpuset)
|
|
|
1953 #elif defined(__NetBSD__)
|
|
|
1954 static int uv__cpu_count(cpuset_t* set) {
|
|
|
1955 int rc;
|
|
|
1956 cpuid_t i;
|
|
|
1957
|
|
|
1958 rc = 0;
|
|
|
1959 for (i = 0;; i++) {
|
|
|
1960 int r = cpuset_isset(i, set);
|
|
|
1961 if (r < 0)
|
|
|
1962 break;
|
|
|
1963 if (r)
|
|
|
1964 rc++;
|
|
|
1965 }
|
|
|
1966
|
|
|
1967 return rc;
|
|
|
1968 }
|
|
|
1969 #endif /* __NetBSD__ */
|
|
|
1970
|
|
|
1971 unsigned int uv_available_parallelism(void) {
|
|
|
1972 long rc = -1;
|
|
|
1973
|
|
|
1974 #ifdef __linux__
|
|
|
1975 cpu_set_t set;
|
|
|
1976
|
|
|
1977 memset(&set, 0, sizeof(set));
|
|
|
1978
|
|
|
1979 /* sysconf(_SC_NPROCESSORS_ONLN) in musl calls sched_getaffinity() but in
|
|
|
1980 * glibc it's... complicated... so for consistency try sched_getaffinity()
|
|
|
1981 * before falling back to sysconf(_SC_NPROCESSORS_ONLN).
|
|
|
1982 */
|
|
|
1983 if (0 == sched_getaffinity(0, sizeof(set), &set))
|
|
|
1984 rc = uv__cpu_count(&set);
|
|
|
1985 #elif defined(__MVS__)
|
|
|
1986 rc = __get_num_online_cpus();
|
|
|
1987 if (rc < 1)
|
|
|
1988 rc = 1;
|
|
|
1989
|
|
|
1990 return (unsigned) rc;
|
|
|
1991 #elif defined(__FreeBSD__)
|
|
|
1992 cpuset_t set;
|
|
|
1993
|
|
|
1994 memset(&set, 0, sizeof(set));
|
|
|
1995
|
|
|
1996 if (0 == cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_PID, -1, sizeof(set), &set))
|
|
|
1997 rc = uv__cpu_count(&set);
|
|
|
1998 #elif defined(__NetBSD__)
|
|
|
1999 cpuset_t* set = cpuset_create();
|
|
|
2000 if (set != NULL) {
|
|
|
2001 if (0 == sched_getaffinity_np(getpid(), sizeof(set), &set))
|
|
|
2002 rc = uv__cpu_count(&set);
|
|
|
2003 cpuset_destroy(set);
|
|
|
2004 }
|
|
|
2005 #elif defined(__APPLE__)
|
|
|
2006 int nprocs;
|
|
|
2007 size_t i;
|
|
|
2008 size_t len = sizeof(nprocs);
|
|
|
2009 static const char *mib[] = {
|
|
|
2010 "hw.activecpu",
|
|
|
2011 "hw.logicalcpu",
|
|
|
2012 "hw.ncpu"
|
|
|
2013 };
|
|
|
2014
|
|
|
2015 for (i = 0; i < ARRAY_SIZE(mib); i++) {
|
|
|
2016 if (0 == sysctlbyname(mib[i], &nprocs, &len, NULL, 0) &&
|
|
|
2017 len == sizeof(nprocs) &&
|
|
|
2018 nprocs > 0) {
|
|
|
2019 rc = nprocs;
|
|
|
2020 break;
|
|
|
2021 }
|
|
|
2022 }
|
|
|
2023 #elif defined(__OpenBSD__)
|
|
|
2024 int nprocs;
|
|
|
2025 size_t i;
|
|
|
2026 size_t len = sizeof(nprocs);
|
|
|
2027 static int mib[][2] = {
|
|
|
2028 # ifdef HW_NCPUONLINE
|
|
|
2029 { CTL_HW, HW_NCPUONLINE },
|
|
|
2030 # endif
|
|
|
2031 { CTL_HW, HW_NCPU }
|
|
|
2032 };
|
|
|
2033
|
|
|
2034 for (i = 0; i < ARRAY_SIZE(mib); i++) {
|
|
|
2035 if (0 == sysctl(mib[i], ARRAY_SIZE(mib[i]), &nprocs, &len, NULL, 0) &&
|
|
|
2036 len == sizeof(nprocs) &&
|
|
|
2037 nprocs > 0) {
|
|
|
2038 rc = nprocs;
|
|
|
2039 break;
|
|
|
2040 }
|
|
|
2041 }
|
|
|
2042 #endif /* __linux__ */
|
|
|
2043
|
|
|
2044 if (rc < 0)
|
|
|
2045 rc = sysconf(_SC_NPROCESSORS_ONLN);
|
|
|
2046
|
|
|
2047 #ifdef __linux__
|
|
|
2048 {
|
|
|
2049 long long quota = 0;
|
|
|
2050
|
|
|
2051 if (uv__get_constrained_cpu("a) == 0)
|
|
|
2052 if (quota > 0 && quota < rc)
|
|
|
2053 rc = quota;
|
|
|
2054 }
|
|
|
2055 #endif /* __linux__ */
|
|
|
2056
|
|
|
2057 if (rc < 1)
|
|
|
2058 rc = 1;
|
|
|
2059
|
|
|
2060 return (unsigned) rc;
|
|
|
2061 }
|
|
|
2062
|
|
|
2063 int uv__sock_reuseport(int fd) {
|
|
|
2064 int on = 1;
|
|
|
2065 #if defined(__FreeBSD__) && __FreeBSD__ >= 12 && defined(SO_REUSEPORT_LB)
|
|
|
2066 /* FreeBSD 12 introduced a new socket option named SO_REUSEPORT_LB
|
|
|
2067 * with the capability of load balancing, it's the substitution of
|
|
|
2068 * the SO_REUSEPORTs on Linux and DragonFlyBSD. */
|
|
|
2069 if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT_LB, &on, sizeof(on)))
|
|
|
2070 return UV__ERR(errno);
|
|
|
2071 #elif (defined(__linux__) || \
|
|
|
2072 defined(_AIX73) || \
|
|
|
2073 (defined(__DragonFly__) && __DragonFly_version >= 300600) || \
|
|
|
2074 (defined(UV__SOLARIS_11_4) && UV__SOLARIS_11_4)) && \
|
|
|
2075 defined(SO_REUSEPORT)
|
|
|
2076 /* On Linux 3.9+, the SO_REUSEPORT implementation distributes connections
|
|
|
2077 * evenly across all of the threads (or processes) that are blocked in
|
|
|
2078 * accept() on the same port. As with TCP, SO_REUSEPORT distributes datagrams
|
|
|
2079 * evenly across all of the receiving threads (or process).
|
|
|
2080 *
|
|
|
2081 * DragonFlyBSD 3.6.0 extended SO_REUSEPORT to distribute workload to
|
|
|
2082 * available sockets, which made it the equivalent of Linux's SO_REUSEPORT.
|
|
|
2083 *
|
|
|
2084 * AIX 7.2.5 added the feature that would add the capability to distribute
|
|
|
2085 * incoming connections or datagrams across all listening ports for SO_REUSEPORT.
|
|
|
2086 *
|
|
|
2087 * Solaris 11 supported SO_REUSEPORT, but it's implemented only for
|
|
|
2088 * binding to the same address and port, without load balancing.
|
|
|
2089 * Solaris 11.4 extended SO_REUSEPORT with the capability of load balancing.
|
|
|
2090 */
|
|
|
2091 if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &on, sizeof(on)))
|
|
|
2092 return UV__ERR(errno);
|
|
|
2093 #else
|
|
|
2094 (void) (fd);
|
|
|
2095 (void) (on);
|
|
|
2096 /* SO_REUSEPORTs do not have the capability of load balancing on platforms
|
|
|
2097 * other than those mentioned above. The semantics are completely different,
|
|
|
2098 * therefore we shouldn't enable it, but fail this operation to indicate that
|
|
|
2099 * UV_[TCP/UDP]_REUSEPORT is not supported on these platforms. */
|
|
|
2100 return UV_ENOTSUP;
|
|
|
2101 #endif
|
|
|
2102
|
|
|
2103 return 0;
|
|
|
2104 }
|