|
160
|
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
|
|
2 *
|
|
|
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
4 * of this software and associated documentation files (the "Software"), to
|
|
|
5 * deal in the Software without restriction, including without limitation the
|
|
|
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
|
7 * sell copies of the Software, and to permit persons to whom the Software is
|
|
|
8 * furnished to do so, subject to the following conditions:
|
|
|
9 *
|
|
|
10 * The above copyright notice and this permission notice shall be included in
|
|
|
11 * all copies or substantial portions of the Software.
|
|
|
12 *
|
|
|
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
19 * IN THE SOFTWARE.
|
|
|
20 */
|
|
|
21
|
|
|
22 #include "uv.h"
|
|
|
23 #include "uv-common.h"
|
|
|
24
|
|
|
25 #include <assert.h>
|
|
|
26 #include <errno.h>
|
|
|
27 #include <stdarg.h>
|
|
|
28 #include <stddef.h> /* NULL */
|
|
|
29 #include <stdio.h>
|
|
|
30 #include <stdlib.h> /* malloc */
|
|
|
31 #include <string.h> /* memset */
|
|
|
32
|
|
|
33 #if defined(_WIN32)
|
|
|
34 # include <malloc.h> /* malloc */
|
|
|
35 #else
|
|
|
36 # include <net/if.h> /* if_nametoindex */
|
|
|
37 # include <sys/un.h> /* AF_UNIX, sockaddr_un */
|
|
|
38 #endif
|
|
|
39
|
|
|
40
|
|
|
41 typedef struct {
|
|
|
42 uv_malloc_func local_malloc;
|
|
|
43 uv_realloc_func local_realloc;
|
|
|
44 uv_calloc_func local_calloc;
|
|
|
45 uv_free_func local_free;
|
|
|
46 } uv__allocator_t;
|
|
|
47
|
|
|
48 static uv__allocator_t uv__allocator = {
|
|
|
49 malloc,
|
|
|
50 realloc,
|
|
|
51 calloc,
|
|
|
52 free,
|
|
|
53 };
|
|
|
54
|
|
|
55 char* uv__strdup(const char* s) {
|
|
|
56 size_t len = strlen(s) + 1;
|
|
|
57 char* m = uv__malloc(len);
|
|
|
58 if (m == NULL)
|
|
|
59 return NULL;
|
|
|
60 return memcpy(m, s, len);
|
|
|
61 }
|
|
|
62
|
|
|
63 char* uv__strndup(const char* s, size_t n) {
|
|
|
64 char* m;
|
|
|
65 size_t len = strlen(s);
|
|
|
66 if (n < len)
|
|
|
67 len = n;
|
|
|
68 m = uv__malloc(len + 1);
|
|
|
69 if (m == NULL)
|
|
|
70 return NULL;
|
|
|
71 m[len] = '\0';
|
|
|
72 return memcpy(m, s, len);
|
|
|
73 }
|
|
|
74
|
|
|
75 void* uv__malloc(size_t size) {
|
|
|
76 if (size > 0)
|
|
|
77 return uv__allocator.local_malloc(size);
|
|
|
78 return NULL;
|
|
|
79 }
|
|
|
80
|
|
|
81 void uv__free(void* ptr) {
|
|
|
82 int saved_errno;
|
|
|
83
|
|
|
84 /* Libuv expects that free() does not clobber errno. The system allocator
|
|
|
85 * honors that assumption but custom allocators may not be so careful.
|
|
|
86 */
|
|
|
87 saved_errno = errno;
|
|
|
88 uv__allocator.local_free(ptr);
|
|
|
89 errno = saved_errno;
|
|
|
90 }
|
|
|
91
|
|
|
92 void* uv__calloc(size_t count, size_t size) {
|
|
|
93 return uv__allocator.local_calloc(count, size);
|
|
|
94 }
|
|
|
95
|
|
|
96 void* uv__realloc(void* ptr, size_t size) {
|
|
|
97 if (size > 0)
|
|
|
98 return uv__allocator.local_realloc(ptr, size);
|
|
|
99 uv__free(ptr);
|
|
|
100 return NULL;
|
|
|
101 }
|
|
|
102
|
|
|
103 void* uv__reallocf(void* ptr, size_t size) {
|
|
|
104 void* newptr;
|
|
|
105
|
|
|
106 newptr = uv__realloc(ptr, size);
|
|
|
107 if (newptr == NULL)
|
|
|
108 if (size > 0)
|
|
|
109 uv__free(ptr);
|
|
|
110
|
|
|
111 return newptr;
|
|
|
112 }
|
|
|
113
|
|
|
114 int uv_replace_allocator(uv_malloc_func malloc_func,
|
|
|
115 uv_realloc_func realloc_func,
|
|
|
116 uv_calloc_func calloc_func,
|
|
|
117 uv_free_func free_func) {
|
|
|
118 if (malloc_func == NULL || realloc_func == NULL ||
|
|
|
119 calloc_func == NULL || free_func == NULL) {
|
|
|
120 return UV_EINVAL;
|
|
|
121 }
|
|
|
122
|
|
|
123 uv__allocator.local_malloc = malloc_func;
|
|
|
124 uv__allocator.local_realloc = realloc_func;
|
|
|
125 uv__allocator.local_calloc = calloc_func;
|
|
|
126 uv__allocator.local_free = free_func;
|
|
|
127
|
|
|
128 return 0;
|
|
|
129 }
|
|
|
130
|
|
|
131
|
|
|
132 void uv_os_free_passwd(uv_passwd_t* pwd) {
|
|
|
133 if (pwd == NULL)
|
|
|
134 return;
|
|
|
135
|
|
|
136 /* On unix, the memory for name, shell, and homedir are allocated in a single
|
|
|
137 * uv__malloc() call. The base of the pointer is stored in pwd->username, so
|
|
|
138 * that is the field that needs to be freed.
|
|
|
139 */
|
|
|
140 uv__free(pwd->username);
|
|
|
141 #ifdef _WIN32
|
|
|
142 uv__free(pwd->homedir);
|
|
|
143 #endif
|
|
|
144 pwd->username = NULL;
|
|
|
145 pwd->shell = NULL;
|
|
|
146 pwd->homedir = NULL;
|
|
|
147 }
|
|
|
148
|
|
|
149
|
|
|
150 void uv_os_free_group(uv_group_t *grp) {
|
|
|
151 if (grp == NULL)
|
|
|
152 return;
|
|
|
153
|
|
|
154 /* The memory for is allocated in a single uv__malloc() call. The base of the
|
|
|
155 * pointer is stored in grp->members, so that is the only field that needs to
|
|
|
156 * be freed.
|
|
|
157 */
|
|
|
158 uv__free(grp->members);
|
|
|
159 grp->members = NULL;
|
|
|
160 grp->groupname = NULL;
|
|
|
161 }
|
|
|
162
|
|
|
163
|
|
|
164 #define XX(uc, lc) case UV_##uc: return sizeof(uv_##lc##_t);
|
|
|
165
|
|
|
166 size_t uv_handle_size(uv_handle_type type) {
|
|
|
167 switch (type) {
|
|
|
168 UV_HANDLE_TYPE_MAP(XX)
|
|
|
169 default:
|
|
|
170 return -1;
|
|
|
171 }
|
|
|
172 }
|
|
|
173
|
|
|
174 size_t uv_req_size(uv_req_type type) {
|
|
|
175 switch(type) {
|
|
|
176 UV_REQ_TYPE_MAP(XX)
|
|
|
177 default:
|
|
|
178 return -1;
|
|
|
179 }
|
|
|
180 }
|
|
|
181
|
|
|
182 #undef XX
|
|
|
183
|
|
|
184
|
|
|
185 size_t uv_loop_size(void) {
|
|
|
186 return sizeof(uv_loop_t);
|
|
|
187 }
|
|
|
188
|
|
|
189
|
|
|
190 uv_buf_t uv_buf_init(char* base, unsigned int len) {
|
|
|
191 uv_buf_t buf;
|
|
|
192 buf.base = base;
|
|
|
193 buf.len = len;
|
|
|
194 return buf;
|
|
|
195 }
|
|
|
196
|
|
|
197
|
|
|
198 static const char* uv__unknown_err_code(int err) {
|
|
|
199 char buf[32];
|
|
|
200 char* copy;
|
|
|
201
|
|
|
202 snprintf(buf, sizeof(buf), "Unknown system error %d", err);
|
|
|
203 copy = uv__strdup(buf);
|
|
|
204
|
|
|
205 return copy != NULL ? copy : "Unknown system error";
|
|
|
206 }
|
|
|
207
|
|
|
208 #define UV_ERR_NAME_GEN_R(name, _) \
|
|
|
209 case UV_## name: \
|
|
|
210 uv__strscpy(buf, #name, buflen); break;
|
|
|
211 char* uv_err_name_r(int err, char* buf, size_t buflen) {
|
|
|
212 switch (err) {
|
|
|
213 UV_ERRNO_MAP(UV_ERR_NAME_GEN_R)
|
|
|
214 default: snprintf(buf, buflen, "Unknown system error %d", err);
|
|
|
215 }
|
|
|
216 return buf;
|
|
|
217 }
|
|
|
218 #undef UV_ERR_NAME_GEN_R
|
|
|
219
|
|
|
220
|
|
|
221 #define UV_ERR_NAME_GEN(name, _) case UV_ ## name: return #name;
|
|
|
222 const char* uv_err_name(int err) {
|
|
|
223 switch (err) {
|
|
|
224 UV_ERRNO_MAP(UV_ERR_NAME_GEN)
|
|
|
225 }
|
|
|
226 return uv__unknown_err_code(err);
|
|
|
227 }
|
|
|
228 #undef UV_ERR_NAME_GEN
|
|
|
229
|
|
|
230
|
|
|
231 #define UV_STRERROR_GEN_R(name, msg) \
|
|
|
232 case UV_ ## name: \
|
|
|
233 snprintf(buf, buflen, "%s", msg); break;
|
|
|
234 char* uv_strerror_r(int err, char* buf, size_t buflen) {
|
|
|
235 switch (err) {
|
|
|
236 UV_ERRNO_MAP(UV_STRERROR_GEN_R)
|
|
|
237 default: snprintf(buf, buflen, "Unknown system error %d", err);
|
|
|
238 }
|
|
|
239 return buf;
|
|
|
240 }
|
|
|
241 #undef UV_STRERROR_GEN_R
|
|
|
242
|
|
|
243
|
|
|
244 #define UV_STRERROR_GEN(name, msg) case UV_ ## name: return msg;
|
|
|
245 const char* uv_strerror(int err) {
|
|
|
246 switch (err) {
|
|
|
247 UV_ERRNO_MAP(UV_STRERROR_GEN)
|
|
|
248 }
|
|
|
249 return uv__unknown_err_code(err);
|
|
|
250 }
|
|
|
251 #undef UV_STRERROR_GEN
|
|
|
252
|
|
|
253
|
|
|
254 int uv_ip4_addr(const char* ip, int port, struct sockaddr_in* addr) {
|
|
|
255 memset(addr, 0, sizeof(*addr));
|
|
|
256 addr->sin_family = AF_INET;
|
|
|
257 addr->sin_port = htons(port);
|
|
|
258 #ifdef SIN6_LEN
|
|
|
259 addr->sin_len = sizeof(*addr);
|
|
|
260 #endif
|
|
|
261 return uv_inet_pton(AF_INET, ip, &(addr->sin_addr.s_addr));
|
|
|
262 }
|
|
|
263
|
|
|
264
|
|
|
265 int uv_ip6_addr(const char* ip, int port, struct sockaddr_in6* addr) {
|
|
|
266 char address_part[40];
|
|
|
267 size_t address_part_size;
|
|
|
268 const char* zone_index;
|
|
|
269
|
|
|
270 memset(addr, 0, sizeof(*addr));
|
|
|
271 addr->sin6_family = AF_INET6;
|
|
|
272 addr->sin6_port = htons(port);
|
|
|
273 #ifdef SIN6_LEN
|
|
|
274 addr->sin6_len = sizeof(*addr);
|
|
|
275 #endif
|
|
|
276
|
|
|
277 zone_index = strchr(ip, '%');
|
|
|
278 if (zone_index != NULL) {
|
|
|
279 address_part_size = zone_index - ip;
|
|
|
280 if (address_part_size >= sizeof(address_part))
|
|
|
281 address_part_size = sizeof(address_part) - 1;
|
|
|
282
|
|
|
283 memcpy(address_part, ip, address_part_size);
|
|
|
284 address_part[address_part_size] = '\0';
|
|
|
285 ip = address_part;
|
|
|
286
|
|
|
287 zone_index++; /* skip '%' */
|
|
|
288 /* NOTE: unknown interface (id=0) is silently ignored */
|
|
|
289 #ifdef _WIN32
|
|
|
290 addr->sin6_scope_id = atoi(zone_index);
|
|
|
291 #else
|
|
|
292 addr->sin6_scope_id = if_nametoindex(zone_index);
|
|
|
293 #endif
|
|
|
294 }
|
|
|
295
|
|
|
296 return uv_inet_pton(AF_INET6, ip, &addr->sin6_addr);
|
|
|
297 }
|
|
|
298
|
|
|
299
|
|
|
300 int uv_ip4_name(const struct sockaddr_in* src, char* dst, size_t size) {
|
|
|
301 return uv_inet_ntop(AF_INET, &src->sin_addr, dst, size);
|
|
|
302 }
|
|
|
303
|
|
|
304
|
|
|
305 int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size) {
|
|
|
306 return uv_inet_ntop(AF_INET6, &src->sin6_addr, dst, size);
|
|
|
307 }
|
|
|
308
|
|
|
309
|
|
|
310 int uv_ip_name(const struct sockaddr *src, char *dst, size_t size) {
|
|
|
311 switch (src->sa_family) {
|
|
|
312 case AF_INET:
|
|
|
313 return uv_inet_ntop(AF_INET, &((struct sockaddr_in *)src)->sin_addr,
|
|
|
314 dst, size);
|
|
|
315 case AF_INET6:
|
|
|
316 return uv_inet_ntop(AF_INET6, &((struct sockaddr_in6 *)src)->sin6_addr,
|
|
|
317 dst, size);
|
|
|
318 default:
|
|
|
319 return UV_EAFNOSUPPORT;
|
|
|
320 }
|
|
|
321 }
|
|
|
322
|
|
|
323
|
|
|
324 int uv_tcp_bind(uv_tcp_t* handle,
|
|
|
325 const struct sockaddr* addr,
|
|
|
326 unsigned int flags) {
|
|
|
327 unsigned int addrlen;
|
|
|
328
|
|
|
329 if (handle->type != UV_TCP)
|
|
|
330 return UV_EINVAL;
|
|
|
331 if (uv__is_closing(handle)) {
|
|
|
332 return UV_EINVAL;
|
|
|
333 }
|
|
|
334 if (addr->sa_family == AF_INET)
|
|
|
335 addrlen = sizeof(struct sockaddr_in);
|
|
|
336 else if (addr->sa_family == AF_INET6)
|
|
|
337 addrlen = sizeof(struct sockaddr_in6);
|
|
|
338 else
|
|
|
339 return UV_EINVAL;
|
|
|
340
|
|
|
341 return uv__tcp_bind(handle, addr, addrlen, flags);
|
|
|
342 }
|
|
|
343
|
|
|
344
|
|
|
345 int uv_udp_init_ex(uv_loop_t* loop, uv_udp_t* handle, unsigned flags) {
|
|
|
346 unsigned extra_flags;
|
|
|
347 int domain;
|
|
|
348 int rc;
|
|
|
349
|
|
|
350 /* Use the lower 8 bits for the domain. */
|
|
|
351 domain = flags & 0xFF;
|
|
|
352 if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC)
|
|
|
353 return UV_EINVAL;
|
|
|
354
|
|
|
355 /* Use the higher bits for extra flags. */
|
|
|
356 extra_flags = flags & ~0xFF;
|
|
|
357 if (extra_flags & ~UV_UDP_RECVMMSG)
|
|
|
358 return UV_EINVAL;
|
|
|
359
|
|
|
360 rc = uv__udp_init_ex(loop, handle, flags, domain);
|
|
|
361
|
|
|
362 if (rc == 0)
|
|
|
363 if (extra_flags & UV_UDP_RECVMMSG)
|
|
|
364 handle->flags |= UV_HANDLE_UDP_RECVMMSG;
|
|
|
365
|
|
|
366 return rc;
|
|
|
367 }
|
|
|
368
|
|
|
369
|
|
|
370 int uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) {
|
|
|
371 return uv_udp_init_ex(loop, handle, AF_UNSPEC);
|
|
|
372 }
|
|
|
373
|
|
|
374
|
|
|
375 int uv_udp_bind(uv_udp_t* handle,
|
|
|
376 const struct sockaddr* addr,
|
|
|
377 unsigned int flags) {
|
|
|
378 unsigned int addrlen;
|
|
|
379
|
|
|
380 if (handle->type != UV_UDP)
|
|
|
381 return UV_EINVAL;
|
|
|
382
|
|
|
383 if (addr->sa_family == AF_INET)
|
|
|
384 addrlen = sizeof(struct sockaddr_in);
|
|
|
385 else if (addr->sa_family == AF_INET6)
|
|
|
386 addrlen = sizeof(struct sockaddr_in6);
|
|
|
387 else
|
|
|
388 return UV_EINVAL;
|
|
|
389
|
|
|
390 return uv__udp_bind(handle, addr, addrlen, flags);
|
|
|
391 }
|
|
|
392
|
|
|
393
|
|
|
394 int uv_tcp_connect(uv_connect_t* req,
|
|
|
395 uv_tcp_t* handle,
|
|
|
396 const struct sockaddr* addr,
|
|
|
397 uv_connect_cb cb) {
|
|
|
398 unsigned int addrlen;
|
|
|
399
|
|
|
400 if (handle->type != UV_TCP)
|
|
|
401 return UV_EINVAL;
|
|
|
402
|
|
|
403 if (addr->sa_family == AF_INET)
|
|
|
404 addrlen = sizeof(struct sockaddr_in);
|
|
|
405 else if (addr->sa_family == AF_INET6)
|
|
|
406 addrlen = sizeof(struct sockaddr_in6);
|
|
|
407 else
|
|
|
408 return UV_EINVAL;
|
|
|
409
|
|
|
410 return uv__tcp_connect(req, handle, addr, addrlen, cb);
|
|
|
411 }
|
|
|
412
|
|
|
413
|
|
|
414 int uv_udp_connect(uv_udp_t* handle, const struct sockaddr* addr) {
|
|
|
415 unsigned int addrlen;
|
|
|
416
|
|
|
417 if (handle->type != UV_UDP)
|
|
|
418 return UV_EINVAL;
|
|
|
419
|
|
|
420 /* Disconnect the handle */
|
|
|
421 if (addr == NULL) {
|
|
|
422 if (!(handle->flags & UV_HANDLE_UDP_CONNECTED))
|
|
|
423 return UV_ENOTCONN;
|
|
|
424
|
|
|
425 return uv__udp_disconnect(handle);
|
|
|
426 }
|
|
|
427
|
|
|
428 if (addr->sa_family == AF_INET)
|
|
|
429 addrlen = sizeof(struct sockaddr_in);
|
|
|
430 else if (addr->sa_family == AF_INET6)
|
|
|
431 addrlen = sizeof(struct sockaddr_in6);
|
|
|
432 else
|
|
|
433 return UV_EINVAL;
|
|
|
434
|
|
|
435 if (handle->flags & UV_HANDLE_UDP_CONNECTED)
|
|
|
436 return UV_EISCONN;
|
|
|
437
|
|
|
438 return uv__udp_connect(handle, addr, addrlen);
|
|
|
439 }
|
|
|
440
|
|
|
441
|
|
|
442 int uv__udp_is_connected(uv_udp_t* handle) {
|
|
|
443 struct sockaddr_storage addr;
|
|
|
444 int addrlen;
|
|
|
445 if (handle->type != UV_UDP)
|
|
|
446 return 0;
|
|
|
447
|
|
|
448 addrlen = sizeof(addr);
|
|
|
449 if (uv_udp_getpeername(handle, (struct sockaddr*) &addr, &addrlen) != 0)
|
|
|
450 return 0;
|
|
|
451
|
|
|
452 return addrlen > 0;
|
|
|
453 }
|
|
|
454
|
|
|
455
|
|
|
456 int uv__udp_check_before_send(uv_udp_t* handle, const struct sockaddr* addr) {
|
|
|
457 unsigned int addrlen;
|
|
|
458
|
|
|
459 if (handle->type != UV_UDP)
|
|
|
460 return UV_EINVAL;
|
|
|
461
|
|
|
462 if (addr != NULL && (handle->flags & UV_HANDLE_UDP_CONNECTED))
|
|
|
463 return UV_EISCONN;
|
|
|
464
|
|
|
465 if (addr == NULL && !(handle->flags & UV_HANDLE_UDP_CONNECTED))
|
|
|
466 return UV_EDESTADDRREQ;
|
|
|
467
|
|
|
468 if (addr != NULL) {
|
|
|
469 if (addr->sa_family == AF_INET)
|
|
|
470 addrlen = sizeof(struct sockaddr_in);
|
|
|
471 else if (addr->sa_family == AF_INET6)
|
|
|
472 addrlen = sizeof(struct sockaddr_in6);
|
|
|
473 #if defined(AF_UNIX) && !defined(_WIN32)
|
|
|
474 else if (addr->sa_family == AF_UNIX)
|
|
|
475 addrlen = sizeof(struct sockaddr_un);
|
|
|
476 #endif
|
|
|
477 else
|
|
|
478 return UV_EINVAL;
|
|
|
479 } else {
|
|
|
480 addrlen = 0;
|
|
|
481 }
|
|
|
482
|
|
|
483 return addrlen;
|
|
|
484 }
|
|
|
485
|
|
|
486
|
|
|
487 int uv_udp_send(uv_udp_send_t* req,
|
|
|
488 uv_udp_t* handle,
|
|
|
489 const uv_buf_t bufs[],
|
|
|
490 unsigned int nbufs,
|
|
|
491 const struct sockaddr* addr,
|
|
|
492 uv_udp_send_cb send_cb) {
|
|
|
493 int addrlen;
|
|
|
494
|
|
|
495 addrlen = uv__udp_check_before_send(handle, addr);
|
|
|
496 if (addrlen < 0)
|
|
|
497 return addrlen;
|
|
|
498
|
|
|
499 return uv__udp_send(req, handle, bufs, nbufs, addr, addrlen, send_cb);
|
|
|
500 }
|
|
|
501
|
|
|
502
|
|
|
503 int uv_udp_try_send(uv_udp_t* handle,
|
|
|
504 const uv_buf_t bufs[],
|
|
|
505 unsigned int nbufs,
|
|
|
506 const struct sockaddr* addr) {
|
|
|
507 int addrlen;
|
|
|
508
|
|
|
509 addrlen = uv__udp_check_before_send(handle, addr);
|
|
|
510 if (addrlen < 0)
|
|
|
511 return addrlen;
|
|
|
512
|
|
|
513 return uv__udp_try_send(handle, bufs, nbufs, addr, addrlen);
|
|
|
514 }
|
|
|
515
|
|
|
516
|
|
|
517 int uv_udp_try_send2(uv_udp_t* handle,
|
|
|
518 unsigned int count,
|
|
|
519 uv_buf_t* bufs[/*count*/],
|
|
|
520 unsigned int nbufs[/*count*/],
|
|
|
521 struct sockaddr* addrs[/*count*/],
|
|
|
522 unsigned int flags) {
|
|
|
523 if (count < 1)
|
|
|
524 return UV_EINVAL;
|
|
|
525
|
|
|
526 if (flags != 0)
|
|
|
527 return UV_EINVAL;
|
|
|
528
|
|
|
529 if (handle->send_queue_count > 0)
|
|
|
530 return UV_EAGAIN;
|
|
|
531
|
|
|
532 return uv__udp_try_send2(handle, count, bufs, nbufs, addrs);
|
|
|
533 }
|
|
|
534
|
|
|
535
|
|
|
536 int uv_udp_recv_start(uv_udp_t* handle,
|
|
|
537 uv_alloc_cb alloc_cb,
|
|
|
538 uv_udp_recv_cb recv_cb) {
|
|
|
539 if (handle->type != UV_UDP || alloc_cb == NULL || recv_cb == NULL)
|
|
|
540 return UV_EINVAL;
|
|
|
541 else
|
|
|
542 return uv__udp_recv_start(handle, alloc_cb, recv_cb);
|
|
|
543 }
|
|
|
544
|
|
|
545
|
|
|
546 int uv_udp_recv_stop(uv_udp_t* handle) {
|
|
|
547 if (handle->type != UV_UDP)
|
|
|
548 return UV_EINVAL;
|
|
|
549 else
|
|
|
550 return uv__udp_recv_stop(handle);
|
|
|
551 }
|
|
|
552
|
|
|
553
|
|
|
554 void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) {
|
|
|
555 struct uv__queue queue;
|
|
|
556 struct uv__queue* q;
|
|
|
557 uv_handle_t* h;
|
|
|
558
|
|
|
559 uv__queue_move(&loop->handle_queue, &queue);
|
|
|
560 while (!uv__queue_empty(&queue)) {
|
|
|
561 q = uv__queue_head(&queue);
|
|
|
562 h = uv__queue_data(q, uv_handle_t, handle_queue);
|
|
|
563
|
|
|
564 uv__queue_remove(q);
|
|
|
565 uv__queue_insert_tail(&loop->handle_queue, q);
|
|
|
566
|
|
|
567 if (h->flags & UV_HANDLE_INTERNAL) continue;
|
|
|
568 walk_cb(h, arg);
|
|
|
569 }
|
|
|
570 }
|
|
|
571
|
|
|
572
|
|
|
573 static void uv__print_handles(uv_loop_t* loop, int only_active, FILE* stream) {
|
|
|
574 const char* type;
|
|
|
575 struct uv__queue* q;
|
|
|
576 uv_handle_t* h;
|
|
|
577
|
|
|
578 if (loop == NULL)
|
|
|
579 loop = uv_default_loop();
|
|
|
580
|
|
|
581 if (stream == NULL)
|
|
|
582 stream = stderr;
|
|
|
583
|
|
|
584 uv__queue_foreach(q, &loop->handle_queue) {
|
|
|
585 h = uv__queue_data(q, uv_handle_t, handle_queue);
|
|
|
586
|
|
|
587 if (only_active && !uv__is_active(h))
|
|
|
588 continue;
|
|
|
589
|
|
|
590 switch (h->type) {
|
|
|
591 #define X(uc, lc) case UV_##uc: type = #lc; break;
|
|
|
592 UV_HANDLE_TYPE_MAP(X)
|
|
|
593 #undef X
|
|
|
594 default: type = "<unknown>";
|
|
|
595 }
|
|
|
596
|
|
|
597 fprintf(stream,
|
|
|
598 "[%c%c%c] %-8s %p\n",
|
|
|
599 "R-"[!(h->flags & UV_HANDLE_REF)],
|
|
|
600 "A-"[!(h->flags & UV_HANDLE_ACTIVE)],
|
|
|
601 "I-"[!(h->flags & UV_HANDLE_INTERNAL)],
|
|
|
602 type,
|
|
|
603 (void*)h);
|
|
|
604 }
|
|
|
605 }
|
|
|
606
|
|
|
607
|
|
|
608 void uv_print_all_handles(uv_loop_t* loop, FILE* stream) {
|
|
|
609 uv__print_handles(loop, 0, stream);
|
|
|
610 }
|
|
|
611
|
|
|
612
|
|
|
613 void uv_print_active_handles(uv_loop_t* loop, FILE* stream) {
|
|
|
614 uv__print_handles(loop, 1, stream);
|
|
|
615 }
|
|
|
616
|
|
|
617
|
|
|
618 void uv_ref(uv_handle_t* handle) {
|
|
|
619 uv__handle_ref(handle);
|
|
|
620 }
|
|
|
621
|
|
|
622
|
|
|
623 void uv_unref(uv_handle_t* handle) {
|
|
|
624 uv__handle_unref(handle);
|
|
|
625 }
|
|
|
626
|
|
|
627
|
|
|
628 int uv_has_ref(const uv_handle_t* handle) {
|
|
|
629 return uv__has_ref(handle);
|
|
|
630 }
|
|
|
631
|
|
|
632
|
|
|
633 void uv_stop(uv_loop_t* loop) {
|
|
|
634 loop->stop_flag = 1;
|
|
|
635 }
|
|
|
636
|
|
|
637
|
|
|
638 uint64_t uv_now(const uv_loop_t* loop) {
|
|
|
639 return loop->time;
|
|
|
640 }
|
|
|
641
|
|
|
642
|
|
|
643
|
|
|
644 size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs) {
|
|
|
645 unsigned int i;
|
|
|
646 size_t bytes;
|
|
|
647
|
|
|
648 bytes = 0;
|
|
|
649 for (i = 0; i < nbufs; i++)
|
|
|
650 bytes += (size_t) bufs[i].len;
|
|
|
651
|
|
|
652 return bytes;
|
|
|
653 }
|
|
|
654
|
|
|
655 int uv_recv_buffer_size(uv_handle_t* handle, int* value) {
|
|
|
656 return uv__socket_sockopt(handle, SO_RCVBUF, value);
|
|
|
657 }
|
|
|
658
|
|
|
659 int uv_send_buffer_size(uv_handle_t* handle, int *value) {
|
|
|
660 return uv__socket_sockopt(handle, SO_SNDBUF, value);
|
|
|
661 }
|
|
|
662
|
|
|
663 int uv_fs_event_getpath(uv_fs_event_t* handle, char* buffer, size_t* size) {
|
|
|
664 size_t required_len;
|
|
|
665
|
|
|
666 if (buffer == NULL || size == NULL || *size == 0)
|
|
|
667 return UV_EINVAL;
|
|
|
668
|
|
|
669 if (!uv__is_active(handle)) {
|
|
|
670 *size = 0;
|
|
|
671 return UV_EINVAL;
|
|
|
672 }
|
|
|
673
|
|
|
674 required_len = strlen(handle->path);
|
|
|
675 if (required_len >= *size) {
|
|
|
676 *size = required_len + 1;
|
|
|
677 return UV_ENOBUFS;
|
|
|
678 }
|
|
|
679
|
|
|
680 memcpy(buffer, handle->path, required_len);
|
|
|
681 *size = required_len;
|
|
|
682 buffer[required_len] = '\0';
|
|
|
683
|
|
|
684 return 0;
|
|
|
685 }
|
|
|
686
|
|
|
687 /* The windows implementation does not have the same structure layout as
|
|
|
688 * the unix implementation (nbufs is not directly inside req but is
|
|
|
689 * contained in a nested union/struct) so this function locates it.
|
|
|
690 */
|
|
|
691 static unsigned int* uv__get_nbufs(uv_fs_t* req) {
|
|
|
692 #ifdef _WIN32
|
|
|
693 return &req->fs.info.nbufs;
|
|
|
694 #else
|
|
|
695 return &req->nbufs;
|
|
|
696 #endif
|
|
|
697 }
|
|
|
698
|
|
|
699 /* uv_fs_scandir() uses the system allocator to allocate memory on non-Windows
|
|
|
700 * systems. So, the memory should be released using free(). On Windows,
|
|
|
701 * uv__malloc() is used, so use uv__free() to free memory.
|
|
|
702 */
|
|
|
703 #ifdef _WIN32
|
|
|
704 # define uv__fs_scandir_free uv__free
|
|
|
705 #else
|
|
|
706 # define uv__fs_scandir_free free
|
|
|
707 #endif
|
|
|
708
|
|
|
709 void uv__fs_scandir_cleanup(uv_fs_t* req) {
|
|
|
710 uv__dirent_t** dents;
|
|
|
711 unsigned int* nbufs;
|
|
|
712 unsigned int i;
|
|
|
713 unsigned int n;
|
|
|
714
|
|
|
715 if (req->result >= 0) {
|
|
|
716 dents = req->ptr;
|
|
|
717 nbufs = uv__get_nbufs(req);
|
|
|
718
|
|
|
719 i = 0;
|
|
|
720 if (*nbufs > 0)
|
|
|
721 i = *nbufs - 1;
|
|
|
722
|
|
|
723 n = (unsigned int) req->result;
|
|
|
724 for (; i < n; i++)
|
|
|
725 uv__fs_scandir_free(dents[i]);
|
|
|
726 }
|
|
|
727
|
|
|
728 uv__fs_scandir_free(req->ptr);
|
|
|
729 req->ptr = NULL;
|
|
|
730 }
|
|
|
731
|
|
|
732
|
|
|
733 int uv_fs_scandir_next(uv_fs_t* req, uv_dirent_t* ent) {
|
|
|
734 uv__dirent_t** dents;
|
|
|
735 uv__dirent_t* dent;
|
|
|
736 unsigned int* nbufs;
|
|
|
737
|
|
|
738 /* Check to see if req passed */
|
|
|
739 if (req->result < 0)
|
|
|
740 return req->result;
|
|
|
741
|
|
|
742 /* Ptr will be null if req was canceled or no files found */
|
|
|
743 if (!req->ptr)
|
|
|
744 return UV_EOF;
|
|
|
745
|
|
|
746 nbufs = uv__get_nbufs(req);
|
|
|
747 assert(nbufs);
|
|
|
748
|
|
|
749 dents = req->ptr;
|
|
|
750
|
|
|
751 /* Free previous entity */
|
|
|
752 if (*nbufs > 0)
|
|
|
753 uv__fs_scandir_free(dents[*nbufs - 1]);
|
|
|
754
|
|
|
755 /* End was already reached */
|
|
|
756 if (*nbufs == (unsigned int) req->result) {
|
|
|
757 uv__fs_scandir_free(dents);
|
|
|
758 req->ptr = NULL;
|
|
|
759 return UV_EOF;
|
|
|
760 }
|
|
|
761
|
|
|
762 dent = dents[(*nbufs)++];
|
|
|
763
|
|
|
764 ent->name = dent->d_name;
|
|
|
765 ent->type = uv__fs_get_dirent_type(dent);
|
|
|
766
|
|
|
767 return 0;
|
|
|
768 }
|
|
|
769
|
|
|
770 uv_dirent_type_t uv__fs_get_dirent_type(uv__dirent_t* dent) {
|
|
|
771 uv_dirent_type_t type;
|
|
|
772
|
|
|
773 #ifdef HAVE_DIRENT_TYPES
|
|
|
774 switch (dent->d_type) {
|
|
|
775 case UV__DT_DIR:
|
|
|
776 type = UV_DIRENT_DIR;
|
|
|
777 break;
|
|
|
778 case UV__DT_FILE:
|
|
|
779 type = UV_DIRENT_FILE;
|
|
|
780 break;
|
|
|
781 case UV__DT_LINK:
|
|
|
782 type = UV_DIRENT_LINK;
|
|
|
783 break;
|
|
|
784 case UV__DT_FIFO:
|
|
|
785 type = UV_DIRENT_FIFO;
|
|
|
786 break;
|
|
|
787 case UV__DT_SOCKET:
|
|
|
788 type = UV_DIRENT_SOCKET;
|
|
|
789 break;
|
|
|
790 case UV__DT_CHAR:
|
|
|
791 type = UV_DIRENT_CHAR;
|
|
|
792 break;
|
|
|
793 case UV__DT_BLOCK:
|
|
|
794 type = UV_DIRENT_BLOCK;
|
|
|
795 break;
|
|
|
796 default:
|
|
|
797 type = UV_DIRENT_UNKNOWN;
|
|
|
798 }
|
|
|
799 #else
|
|
|
800 type = UV_DIRENT_UNKNOWN;
|
|
|
801 #endif
|
|
|
802
|
|
|
803 return type;
|
|
|
804 }
|
|
|
805
|
|
|
806 void uv__fs_readdir_cleanup(uv_fs_t* req) {
|
|
|
807 uv_dir_t* dir;
|
|
|
808 uv_dirent_t* dirents;
|
|
|
809 int i;
|
|
|
810
|
|
|
811 if (req->ptr == NULL)
|
|
|
812 return;
|
|
|
813
|
|
|
814 dir = req->ptr;
|
|
|
815 dirents = dir->dirents;
|
|
|
816 req->ptr = NULL;
|
|
|
817
|
|
|
818 if (dirents == NULL)
|
|
|
819 return;
|
|
|
820
|
|
|
821 for (i = 0; i < req->result; ++i) {
|
|
|
822 uv__free((char*) dirents[i].name);
|
|
|
823 dirents[i].name = NULL;
|
|
|
824 }
|
|
|
825 }
|
|
|
826
|
|
|
827
|
|
|
828 int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...) {
|
|
|
829 va_list ap;
|
|
|
830 int err;
|
|
|
831
|
|
|
832 va_start(ap, option);
|
|
|
833 /* Any platform-agnostic options should be handled here. */
|
|
|
834 err = uv__loop_configure(loop, option, ap);
|
|
|
835 va_end(ap);
|
|
|
836
|
|
|
837 return err;
|
|
|
838 }
|
|
|
839
|
|
|
840
|
|
|
841 static uv_loop_t default_loop_struct;
|
|
|
842 static uv_loop_t* default_loop_ptr;
|
|
|
843
|
|
|
844
|
|
|
845 uv_loop_t* uv_default_loop(void) {
|
|
|
846 if (default_loop_ptr != NULL)
|
|
|
847 return default_loop_ptr;
|
|
|
848
|
|
|
849 if (uv_loop_init(&default_loop_struct))
|
|
|
850 return NULL;
|
|
|
851
|
|
|
852 default_loop_ptr = &default_loop_struct;
|
|
|
853 return default_loop_ptr;
|
|
|
854 }
|
|
|
855
|
|
|
856
|
|
|
857 uv_loop_t* uv_loop_new(void) {
|
|
|
858 uv_loop_t* loop;
|
|
|
859
|
|
|
860 loop = uv__malloc(sizeof(*loop));
|
|
|
861 if (loop == NULL)
|
|
|
862 return NULL;
|
|
|
863
|
|
|
864 if (uv_loop_init(loop)) {
|
|
|
865 uv__free(loop);
|
|
|
866 return NULL;
|
|
|
867 }
|
|
|
868
|
|
|
869 return loop;
|
|
|
870 }
|
|
|
871
|
|
|
872
|
|
|
873 int uv_loop_close(uv_loop_t* loop) {
|
|
|
874 struct uv__queue* q;
|
|
|
875 uv_handle_t* h;
|
|
|
876 #ifndef NDEBUG
|
|
|
877 void* saved_data;
|
|
|
878 #endif
|
|
|
879
|
|
|
880 if (uv__has_active_reqs(loop))
|
|
|
881 return UV_EBUSY;
|
|
|
882
|
|
|
883 uv__queue_foreach(q, &loop->handle_queue) {
|
|
|
884 h = uv__queue_data(q, uv_handle_t, handle_queue);
|
|
|
885 if (!(h->flags & UV_HANDLE_INTERNAL))
|
|
|
886 return UV_EBUSY;
|
|
|
887 }
|
|
|
888
|
|
|
889 uv__loop_close(loop);
|
|
|
890
|
|
|
891 #ifndef NDEBUG
|
|
|
892 saved_data = loop->data;
|
|
|
893 memset(loop, -1, sizeof(*loop));
|
|
|
894 loop->data = saved_data;
|
|
|
895 #endif
|
|
|
896 if (loop == default_loop_ptr)
|
|
|
897 default_loop_ptr = NULL;
|
|
|
898
|
|
|
899 return 0;
|
|
|
900 }
|
|
|
901
|
|
|
902
|
|
|
903 void uv_loop_delete(uv_loop_t* loop) {
|
|
|
904 uv_loop_t* default_loop;
|
|
|
905 int err;
|
|
|
906
|
|
|
907 default_loop = default_loop_ptr;
|
|
|
908
|
|
|
909 err = uv_loop_close(loop);
|
|
|
910 (void) err; /* Squelch compiler warnings. */
|
|
|
911 assert(err == 0);
|
|
|
912 if (loop != default_loop)
|
|
|
913 uv__free(loop);
|
|
|
914 }
|
|
|
915
|
|
|
916
|
|
|
917 int uv_read_start(uv_stream_t* stream,
|
|
|
918 uv_alloc_cb alloc_cb,
|
|
|
919 uv_read_cb read_cb) {
|
|
|
920 if (stream == NULL || alloc_cb == NULL || read_cb == NULL)
|
|
|
921 return UV_EINVAL;
|
|
|
922
|
|
|
923 if (stream->flags & UV_HANDLE_CLOSING)
|
|
|
924 return UV_EINVAL;
|
|
|
925
|
|
|
926 if (stream->flags & UV_HANDLE_READING)
|
|
|
927 return UV_EALREADY;
|
|
|
928
|
|
|
929 if (!(stream->flags & UV_HANDLE_READABLE))
|
|
|
930 return UV_ENOTCONN;
|
|
|
931
|
|
|
932 return uv__read_start(stream, alloc_cb, read_cb);
|
|
|
933 }
|
|
|
934
|
|
|
935
|
|
|
936 void uv_os_free_environ(uv_env_item_t* envitems, int count) {
|
|
|
937 int i;
|
|
|
938
|
|
|
939 for (i = 0; i < count; i++) {
|
|
|
940 uv__free(envitems[i].name);
|
|
|
941 }
|
|
|
942
|
|
|
943 uv__free(envitems);
|
|
|
944 }
|
|
|
945
|
|
|
946
|
|
|
947 void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
|
|
|
948 #ifdef __linux__
|
|
|
949 (void) &count;
|
|
|
950 uv__free(cpu_infos);
|
|
|
951 #else
|
|
|
952 int i;
|
|
|
953
|
|
|
954 for (i = 0; i < count; i++)
|
|
|
955 uv__free(cpu_infos[i].model);
|
|
|
956
|
|
|
957 uv__free(cpu_infos);
|
|
|
958 #endif /* __linux__ */
|
|
|
959 }
|
|
|
960
|
|
|
961
|
|
|
962 /* Also covers __clang__ and __INTEL_COMPILER. Disabled on Windows because
|
|
|
963 * threads have already been forcibly terminated by the operating system
|
|
|
964 * by the time destructors run, ergo, it's not safe to try to clean them up.
|
|
|
965 */
|
|
|
966 #if defined(__GNUC__) && !defined(_WIN32)
|
|
|
967 __attribute__((destructor))
|
|
|
968 #endif
|
|
|
969 void uv_library_shutdown(void) {
|
|
|
970 static int was_shutdown;
|
|
|
971
|
|
|
972 if (uv__exchange_int_relaxed(&was_shutdown, 1))
|
|
|
973 return;
|
|
|
974
|
|
|
975 uv__process_title_cleanup();
|
|
|
976 uv__signal_cleanup();
|
|
|
977 #ifdef __MVS__
|
|
|
978 /* TODO(itodorov) - zos: revisit when Woz compiler is available. */
|
|
|
979 uv__os390_cleanup();
|
|
|
980 #else
|
|
|
981 uv__threadpool_cleanup();
|
|
|
982 #endif
|
|
|
983 }
|
|
|
984
|
|
|
985
|
|
|
986 void uv__metrics_update_idle_time(uv_loop_t* loop) {
|
|
|
987 uv__loop_metrics_t* loop_metrics;
|
|
|
988 uint64_t entry_time;
|
|
|
989 uint64_t exit_time;
|
|
|
990
|
|
|
991 if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
|
|
|
992 return;
|
|
|
993
|
|
|
994 loop_metrics = uv__get_loop_metrics(loop);
|
|
|
995
|
|
|
996 /* The thread running uv__metrics_update_idle_time() is always the same
|
|
|
997 * thread that sets provider_entry_time. So it's unnecessary to lock before
|
|
|
998 * retrieving this value.
|
|
|
999 */
|
|
|
1000 if (loop_metrics->provider_entry_time == 0)
|
|
|
1001 return;
|
|
|
1002
|
|
|
1003 exit_time = uv_hrtime();
|
|
|
1004
|
|
|
1005 uv_mutex_lock(&loop_metrics->lock);
|
|
|
1006 entry_time = loop_metrics->provider_entry_time;
|
|
|
1007 loop_metrics->provider_entry_time = 0;
|
|
|
1008 loop_metrics->provider_idle_time += exit_time - entry_time;
|
|
|
1009 uv_mutex_unlock(&loop_metrics->lock);
|
|
|
1010 }
|
|
|
1011
|
|
|
1012
|
|
|
1013 void uv__metrics_set_provider_entry_time(uv_loop_t* loop) {
|
|
|
1014 uv__loop_metrics_t* loop_metrics;
|
|
|
1015 uint64_t now;
|
|
|
1016
|
|
|
1017 if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
|
|
|
1018 return;
|
|
|
1019
|
|
|
1020 now = uv_hrtime();
|
|
|
1021 loop_metrics = uv__get_loop_metrics(loop);
|
|
|
1022 uv_mutex_lock(&loop_metrics->lock);
|
|
|
1023 loop_metrics->provider_entry_time = now;
|
|
|
1024 uv_mutex_unlock(&loop_metrics->lock);
|
|
|
1025 }
|
|
|
1026
|
|
|
1027
|
|
|
1028 int uv_metrics_info(uv_loop_t* loop, uv_metrics_t* metrics) {
|
|
|
1029 memcpy(metrics,
|
|
|
1030 &uv__get_loop_metrics(loop)->metrics,
|
|
|
1031 sizeof(*metrics));
|
|
|
1032
|
|
|
1033 return 0;
|
|
|
1034 }
|
|
|
1035
|
|
|
1036
|
|
|
1037 uint64_t uv_metrics_idle_time(uv_loop_t* loop) {
|
|
|
1038 uv__loop_metrics_t* loop_metrics;
|
|
|
1039 uint64_t entry_time;
|
|
|
1040 uint64_t idle_time;
|
|
|
1041
|
|
|
1042 loop_metrics = uv__get_loop_metrics(loop);
|
|
|
1043 uv_mutex_lock(&loop_metrics->lock);
|
|
|
1044 idle_time = loop_metrics->provider_idle_time;
|
|
|
1045 entry_time = loop_metrics->provider_entry_time;
|
|
|
1046 uv_mutex_unlock(&loop_metrics->lock);
|
|
|
1047
|
|
|
1048 if (entry_time > 0)
|
|
|
1049 idle_time += uv_hrtime() - entry_time;
|
|
|
1050 return idle_time;
|
|
|
1051 }
|