comparison third_party/libuv/src/unix/fs.c @ 160:948de3f54cea

[ThirdParty] Added libuv
author June Park <parkjune1995@gmail.com>
date Wed, 14 Jan 2026 19:39:52 -0800
parents
children
comparison
equal deleted inserted replaced
159:05cf9467a1c3 160:948de3f54cea
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22 /* Caveat emptor: this file deviates from the libuv convention of returning
23 * negated errno codes. Most uv_fs_*() functions map directly to the system
24 * call of the same name. For more complex wrappers, it's easier to just
25 * return -1 with errno set. The dispatcher in uv__fs_work() takes care of
26 * getting the errno to the right place (req->result or as the return value.)
27 */
28
29 #include "uv.h"
30 #include "internal.h"
31
32 #include <errno.h>
33 #include <dlfcn.h>
34 #include <stdatomic.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <limits.h> /* PATH_MAX */
39
40 #include <sys/types.h>
41 #include <sys/socket.h>
42 #include <sys/stat.h>
43 #include <sys/time.h>
44 #include <sys/uio.h>
45 #include <unistd.h>
46 #include <fcntl.h>
47 #include <poll.h>
48
49 #if defined(__linux__)
50 # include <sys/sendfile.h>
51 #endif
52
53 #if defined(__sun)
54 # include <sys/sendfile.h>
55 # include <sys/sysmacros.h>
56 #endif
57
58 #if defined(__APPLE__)
59 # include <sys/sysctl.h>
60 #elif defined(__linux__) && !defined(FICLONE)
61 # include <sys/ioctl.h>
62 # define FICLONE _IOW(0x94, 9, int)
63 #endif
64
65 #if defined(_AIX) && !defined(_AIX71)
66 # include <utime.h>
67 #endif
68
69 #if defined(__APPLE__) || \
70 defined(__DragonFly__) || \
71 defined(__FreeBSD__) || \
72 defined(__OpenBSD__) || \
73 defined(__NetBSD__)
74 # include <sys/param.h>
75 # include <sys/mount.h>
76 #elif defined(__sun) || \
77 defined(__MVS__) || \
78 defined(__NetBSD__) || \
79 defined(__HAIKU__) || \
80 defined(__QNX__)
81 # include <sys/statvfs.h>
82 #else
83 # include <sys/statfs.h>
84 #endif
85
86 #if defined(_AIX) && _XOPEN_SOURCE <= 600
87 extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */
88 #endif
89
90 #define INIT(subtype) \
91 do { \
92 if (req == NULL) \
93 return UV_EINVAL; \
94 UV_REQ_INIT(req, UV_FS); \
95 req->fs_type = UV_FS_ ## subtype; \
96 req->result = 0; \
97 req->ptr = NULL; \
98 req->loop = loop; \
99 req->path = NULL; \
100 req->new_path = NULL; \
101 req->bufs = NULL; \
102 req->cb = cb; \
103 } \
104 while (0)
105
106 #define PATH \
107 do { \
108 assert(path != NULL); \
109 if (cb == NULL) { \
110 req->path = path; \
111 } else { \
112 req->path = uv__strdup(path); \
113 if (req->path == NULL) \
114 return UV_ENOMEM; \
115 } \
116 } \
117 while (0)
118
119 #define PATH2 \
120 do { \
121 if (cb == NULL) { \
122 req->path = path; \
123 req->new_path = new_path; \
124 } else { \
125 size_t path_len; \
126 size_t new_path_len; \
127 path_len = strlen(path) + 1; \
128 new_path_len = strlen(new_path) + 1; \
129 req->path = uv__malloc(path_len + new_path_len); \
130 if (req->path == NULL) \
131 return UV_ENOMEM; \
132 req->new_path = req->path + path_len; \
133 memcpy((void*) req->path, path, path_len); \
134 memcpy((void*) req->new_path, new_path, new_path_len); \
135 } \
136 } \
137 while (0)
138
139 #define POST \
140 do { \
141 if (cb != NULL) { \
142 uv__req_register(loop); \
143 uv__work_submit(loop, \
144 &req->work_req, \
145 UV__WORK_FAST_IO, \
146 uv__fs_work, \
147 uv__fs_done); \
148 return 0; \
149 } \
150 else { \
151 uv__fs_work(&req->work_req); \
152 return req->result; \
153 } \
154 } \
155 while (0)
156
157
158 static int uv__fs_close(int fd) {
159 int rc;
160
161 rc = uv__close_nocancel(fd);
162 if (rc == -1)
163 if (errno == EINTR || errno == EINPROGRESS)
164 rc = 0; /* The close is in progress, not an error. */
165
166 return rc;
167 }
168
169
170 static ssize_t uv__fs_fsync(uv_fs_t* req) {
171 #if defined(__APPLE__)
172 /* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache
173 * to the drive platters. This is in contrast to Linux's fdatasync and fsync
174 * which do, according to recent man pages. F_FULLFSYNC is Apple's equivalent
175 * for flushing buffered data to permanent storage. If F_FULLFSYNC is not
176 * supported by the file system we fall back to F_BARRIERFSYNC or fsync().
177 * This is the same approach taken by sqlite, except sqlite does not issue
178 * an F_BARRIERFSYNC call.
179 */
180 int r;
181
182 r = fcntl(req->file, F_FULLFSYNC);
183 if (r != 0)
184 r = fcntl(req->file, 85 /* F_BARRIERFSYNC */); /* fsync + barrier */
185 if (r != 0)
186 r = fsync(req->file);
187 return r;
188 #else
189 return fsync(req->file);
190 #endif
191 }
192
193
194 static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
195 #if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
196 return fdatasync(req->file);
197 #elif defined(__APPLE__)
198 /* See the comment in uv__fs_fsync. */
199 return uv__fs_fsync(req);
200 #else
201 return fsync(req->file);
202 #endif
203 }
204
205
206 #if defined(__APPLE__) \
207 || defined(_AIX71) \
208 || defined(__DragonFly__) \
209 || defined(__FreeBSD__) \
210 || defined(__HAIKU__) \
211 || defined(__NetBSD__) \
212 || defined(__OpenBSD__) \
213 || defined(__linux__) \
214 || defined(__sun)
215 static struct timespec uv__fs_to_timespec(double time) {
216 struct timespec ts;
217
218 if (uv__isinf(time))
219 return (struct timespec){UTIME_NOW, UTIME_NOW};
220 if (uv__isnan(time))
221 return (struct timespec){UTIME_OMIT, UTIME_OMIT};
222
223 ts.tv_sec = time;
224 ts.tv_nsec = (time - ts.tv_sec) * 1e9;
225
226 /* TODO(bnoordhuis) Remove this. utimesat() has nanosecond resolution but we
227 * stick to microsecond resolution for the sake of consistency with other
228 * platforms. I'm the original author of this compatibility hack but I'm
229 * less convinced it's useful nowadays.
230 */
231 ts.tv_nsec -= ts.tv_nsec % 1000;
232
233 if (ts.tv_nsec < 0) {
234 ts.tv_nsec += 1e9;
235 ts.tv_sec -= 1;
236 }
237 return ts;
238 }
239 #endif
240
241
242 static ssize_t uv__fs_futime(uv_fs_t* req) {
243 #if defined(__APPLE__) \
244 || defined(_AIX71) \
245 || defined(__DragonFly__) \
246 || defined(__FreeBSD__) \
247 || defined(__HAIKU__) \
248 || defined(__NetBSD__) \
249 || defined(__OpenBSD__) \
250 || defined(__linux__) \
251 || defined(__sun)
252 struct timespec ts[2];
253 ts[0] = uv__fs_to_timespec(req->atime);
254 ts[1] = uv__fs_to_timespec(req->mtime);
255 return futimens(req->file, ts);
256 #elif defined(__MVS__)
257 attrib_t atr;
258 memset(&atr, 0, sizeof(atr));
259 atr.att_mtimechg = 1;
260 atr.att_atimechg = 1;
261 atr.att_mtime = req->mtime;
262 atr.att_atime = req->atime;
263 return __fchattr(req->file, &atr, sizeof(atr));
264 #else
265 errno = ENOSYS;
266 return -1;
267 #endif
268 }
269
270
271 static ssize_t uv__fs_mkdtemp(uv_fs_t* req) {
272 return mkdtemp((char*) req->path) ? 0 : -1;
273 }
274
275
276 static int (*uv__mkostemp)(char*, int);
277
278
279 static void uv__mkostemp_initonce(void) {
280 /* z/os doesn't have RTLD_DEFAULT but that's okay
281 * because it doesn't have mkostemp(O_CLOEXEC) either.
282 */
283 #ifdef RTLD_DEFAULT
284 uv__mkostemp = (int (*)(char*, int)) dlsym(RTLD_DEFAULT, "mkostemp");
285
286 /* We don't care about errors, but we do want to clean them up.
287 * If there has been no error, then dlerror() will just return
288 * NULL.
289 */
290 dlerror();
291 #endif /* RTLD_DEFAULT */
292 }
293
294
295 static int uv__fs_mkstemp(uv_fs_t* req) {
296 static uv_once_t once = UV_ONCE_INIT;
297 int r;
298 #ifdef O_CLOEXEC
299 static _Atomic int no_cloexec_support;
300 #endif
301 static const char pattern[] = "XXXXXX";
302 static const size_t pattern_size = sizeof(pattern) - 1;
303 char* path;
304 size_t path_length;
305
306 path = (char*) req->path;
307 path_length = strlen(path);
308
309 /* EINVAL can be returned for 2 reasons:
310 1. The template's last 6 characters were not XXXXXX
311 2. open() didn't support O_CLOEXEC
312 We want to avoid going to the fallback path in case
313 of 1, so it's manually checked before. */
314 if (path_length < pattern_size ||
315 strcmp(path + path_length - pattern_size, pattern)) {
316 errno = EINVAL;
317 r = -1;
318 goto clobber;
319 }
320
321 uv_once(&once, uv__mkostemp_initonce);
322
323 #ifdef O_CLOEXEC
324 if (atomic_load_explicit(&no_cloexec_support, memory_order_relaxed) == 0 &&
325 uv__mkostemp != NULL) {
326 r = uv__mkostemp(path, O_CLOEXEC);
327
328 if (r >= 0)
329 return r;
330
331 /* If mkostemp() returns EINVAL, it means the kernel doesn't
332 support O_CLOEXEC, so we just fallback to mkstemp() below. */
333 if (errno != EINVAL)
334 goto clobber;
335
336 /* We set the static variable so that next calls don't even
337 try to use mkostemp. */
338 atomic_store_explicit(&no_cloexec_support, 1, memory_order_relaxed);
339 }
340 #endif /* O_CLOEXEC */
341
342 if (req->cb != NULL)
343 uv_rwlock_rdlock(&req->loop->cloexec_lock);
344
345 r = mkstemp(path);
346
347 /* In case of failure `uv__cloexec` will leave error in `errno`,
348 * so it is enough to just set `r` to `-1`.
349 */
350 if (r >= 0 && uv__cloexec(r, 1) != 0) {
351 r = uv__close(r);
352 if (r != 0)
353 abort();
354 r = -1;
355 }
356
357 if (req->cb != NULL)
358 uv_rwlock_rdunlock(&req->loop->cloexec_lock);
359
360 clobber:
361 if (r < 0)
362 path[0] = '\0';
363 return r;
364 }
365
366
367 static ssize_t uv__fs_open(uv_fs_t* req) {
368 #ifdef O_CLOEXEC
369 return open(req->path, req->flags | O_CLOEXEC, req->mode);
370 #else /* O_CLOEXEC */
371 int r;
372
373 if (req->cb != NULL)
374 uv_rwlock_rdlock(&req->loop->cloexec_lock);
375
376 r = open(req->path, req->flags, req->mode);
377
378 /* In case of failure `uv__cloexec` will leave error in `errno`,
379 * so it is enough to just set `r` to `-1`.
380 */
381 if (r >= 0 && uv__cloexec(r, 1) != 0) {
382 r = uv__close(r);
383 if (r != 0)
384 abort();
385 r = -1;
386 }
387
388 if (req->cb != NULL)
389 uv_rwlock_rdunlock(&req->loop->cloexec_lock);
390
391 return r;
392 #endif /* O_CLOEXEC */
393 }
394
395
396 static ssize_t uv__preadv_or_pwritev_emul(int fd,
397 const struct iovec* bufs,
398 size_t nbufs,
399 off_t off,
400 int is_pread) {
401 ssize_t total;
402 ssize_t r;
403 size_t i;
404 size_t n;
405 void* p;
406
407 total = 0;
408 for (i = 0; i < (size_t) nbufs; i++) {
409 p = bufs[i].iov_base;
410 n = bufs[i].iov_len;
411
412 do
413 if (is_pread)
414 r = pread(fd, p, n, off);
415 else
416 r = pwrite(fd, p, n, off);
417 while (r == -1 && errno == EINTR);
418
419 if (r == -1) {
420 if (total > 0)
421 return total;
422 return -1;
423 }
424
425 off += r;
426 total += r;
427
428 if ((size_t) r < n)
429 return total;
430 }
431
432 return total;
433 }
434
435
436 #ifdef __linux__
437 typedef int uv__iovcnt;
438 #else
439 typedef size_t uv__iovcnt;
440 #endif
441
442
443 static ssize_t uv__preadv_emul(int fd,
444 const struct iovec* bufs,
445 uv__iovcnt nbufs,
446 off_t off) {
447 return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/1);
448 }
449
450
451 static ssize_t uv__pwritev_emul(int fd,
452 const struct iovec* bufs,
453 uv__iovcnt nbufs,
454 off_t off) {
455 return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/0);
456 }
457
458
459 /* The function pointer cache is an uintptr_t because _Atomic void*
460 * doesn't work on macos/ios/etc...
461 */
462 static ssize_t uv__preadv_or_pwritev(int fd,
463 const struct iovec* bufs,
464 size_t nbufs,
465 off_t off,
466 _Atomic uintptr_t* cache,
467 int is_pread) {
468 ssize_t (*f)(int, const struct iovec*, uv__iovcnt, off_t);
469 void* p;
470
471 p = (void*) atomic_load_explicit(cache, memory_order_relaxed);
472 if (p == NULL) {
473 #ifdef RTLD_DEFAULT
474 /* Try _LARGEFILE_SOURCE version of preadv/pwritev first,
475 * then fall back to the plain version, for libcs like musl.
476 */
477 p = dlsym(RTLD_DEFAULT, is_pread ? "preadv64" : "pwritev64");
478 if (p == NULL)
479 p = dlsym(RTLD_DEFAULT, is_pread ? "preadv" : "pwritev");
480 dlerror(); /* Clear errors. */
481 #endif /* RTLD_DEFAULT */
482 if (p == NULL)
483 p = is_pread ? uv__preadv_emul : uv__pwritev_emul;
484 atomic_store_explicit(cache, (uintptr_t) p, memory_order_relaxed);
485 }
486
487 f = p;
488 return f(fd, bufs, nbufs, off);
489 }
490
491
492 static ssize_t uv__preadv(int fd,
493 const struct iovec* bufs,
494 size_t nbufs,
495 off_t off) {
496 static _Atomic uintptr_t cache;
497 return uv__preadv_or_pwritev(fd, bufs, nbufs, off, &cache, /*is_pread*/1);
498 }
499
500
501 static ssize_t uv__pwritev(int fd,
502 const struct iovec* bufs,
503 size_t nbufs,
504 off_t off) {
505 static _Atomic uintptr_t cache;
506 return uv__preadv_or_pwritev(fd, bufs, nbufs, off, &cache, /*is_pread*/0);
507 }
508
509
510 static ssize_t uv__fs_read(uv_fs_t* req) {
511 const struct iovec* bufs;
512 unsigned int iovmax;
513 size_t nbufs;
514 ssize_t r;
515 off_t off;
516 int fd;
517
518 fd = req->file;
519 off = req->off;
520 bufs = (const struct iovec*) req->bufs;
521 nbufs = req->nbufs;
522
523 iovmax = uv__getiovmax();
524 if (nbufs > iovmax)
525 nbufs = iovmax;
526
527 r = 0;
528 if (off < 0) {
529 if (nbufs == 1)
530 r = read(fd, bufs->iov_base, bufs->iov_len);
531 else if (nbufs > 1)
532 r = readv(fd, bufs, nbufs);
533 } else {
534 if (nbufs == 1)
535 r = pread(fd, bufs->iov_base, bufs->iov_len, off);
536 else if (nbufs > 1)
537 r = uv__preadv(fd, bufs, nbufs, off);
538 }
539
540 #ifdef __PASE__
541 /* PASE returns EOPNOTSUPP when reading a directory, convert to EISDIR */
542 if (r == -1 && errno == EOPNOTSUPP) {
543 struct stat buf;
544 ssize_t rc;
545 rc = uv__fstat(fd, &buf);
546 if (rc == 0 && S_ISDIR(buf.st_mode)) {
547 errno = EISDIR;
548 }
549 }
550 #endif
551
552 /* We don't own the buffer list in the synchronous case. */
553 if (req->cb != NULL)
554 if (req->bufs != req->bufsml)
555 uv__free(req->bufs);
556
557 req->bufs = NULL;
558 req->nbufs = 0;
559
560 return r;
561 }
562
563
564 static int uv__fs_scandir_filter(const uv__dirent_t* dent) {
565 return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
566 }
567
568
569 static int uv__fs_scandir_sort(const uv__dirent_t** a, const uv__dirent_t** b) {
570 return strcmp((*a)->d_name, (*b)->d_name);
571 }
572
573
574 static ssize_t uv__fs_scandir(uv_fs_t* req) {
575 uv__dirent_t** dents;
576 int n;
577
578 dents = NULL;
579 n = scandir(req->path, &dents, uv__fs_scandir_filter, uv__fs_scandir_sort);
580
581 /* NOTE: We will use nbufs as an index field */
582 req->nbufs = 0;
583
584 if (n == 0) {
585 /* OS X still needs to deallocate some memory.
586 * Memory was allocated using the system allocator, so use free() here.
587 */
588 free(dents);
589 dents = NULL;
590 } else if (n == -1) {
591 return n;
592 }
593
594 req->ptr = dents;
595
596 return n;
597 }
598
599 static int uv__fs_opendir(uv_fs_t* req) {
600 uv_dir_t* dir;
601
602 dir = uv__malloc(sizeof(*dir));
603 if (dir == NULL)
604 goto error;
605
606 dir->dir = opendir(req->path);
607 if (dir->dir == NULL)
608 goto error;
609
610 req->ptr = dir;
611 return 0;
612
613 error:
614 uv__free(dir);
615 req->ptr = NULL;
616 return -1;
617 }
618
619 static int uv__fs_readdir(uv_fs_t* req) {
620 uv_dir_t* dir;
621 uv_dirent_t* dirent;
622 struct dirent* res;
623 unsigned int dirent_idx;
624 unsigned int i;
625
626 dir = req->ptr;
627 dirent_idx = 0;
628
629 while (dirent_idx < dir->nentries) {
630 /* readdir() returns NULL on end of directory, as well as on error. errno
631 is used to differentiate between the two conditions. */
632 errno = 0;
633 res = readdir(dir->dir);
634
635 if (res == NULL) {
636 if (errno != 0)
637 goto error;
638 break;
639 }
640
641 if (strcmp(res->d_name, ".") == 0 || strcmp(res->d_name, "..") == 0)
642 continue;
643
644 dirent = &dir->dirents[dirent_idx];
645 dirent->name = uv__strdup(res->d_name);
646
647 if (dirent->name == NULL)
648 goto error;
649
650 dirent->type = uv__fs_get_dirent_type(res);
651 ++dirent_idx;
652 }
653
654 return dirent_idx;
655
656 error:
657 for (i = 0; i < dirent_idx; ++i) {
658 uv__free((char*) dir->dirents[i].name);
659 dir->dirents[i].name = NULL;
660 }
661
662 return -1;
663 }
664
665 static int uv__fs_closedir(uv_fs_t* req) {
666 uv_dir_t* dir;
667
668 dir = req->ptr;
669
670 if (dir->dir != NULL) {
671 closedir(dir->dir);
672 dir->dir = NULL;
673 }
674
675 uv__free(req->ptr);
676 req->ptr = NULL;
677 return 0;
678 }
679
680 static int uv__fs_statfs(uv_fs_t* req) {
681 uv_statfs_t* stat_fs;
682 #if defined(__sun) || \
683 defined(__MVS__) || \
684 defined(__NetBSD__) || \
685 defined(__HAIKU__) || \
686 defined(__QNX__)
687 struct statvfs buf;
688
689 if (0 != statvfs(req->path, &buf))
690 #else
691 struct statfs buf;
692
693 if (0 != statfs(req->path, &buf))
694 #endif /* defined(__sun) */
695 return -1;
696
697 stat_fs = uv__malloc(sizeof(*stat_fs));
698 if (stat_fs == NULL) {
699 errno = ENOMEM;
700 return -1;
701 }
702
703 #if defined(__sun) || \
704 defined(__MVS__) || \
705 defined(__OpenBSD__) || \
706 defined(__NetBSD__) || \
707 defined(__HAIKU__) || \
708 defined(__QNX__)
709 stat_fs->f_type = 0; /* f_type is not supported. */
710 #else
711 stat_fs->f_type = buf.f_type;
712 #endif
713 stat_fs->f_bsize = buf.f_bsize;
714 stat_fs->f_blocks = buf.f_blocks;
715 stat_fs->f_bfree = buf.f_bfree;
716 stat_fs->f_bavail = buf.f_bavail;
717 stat_fs->f_files = buf.f_files;
718 stat_fs->f_ffree = buf.f_ffree;
719 req->ptr = stat_fs;
720 return 0;
721 }
722
723 static ssize_t uv__fs_pathmax_size(const char* path) {
724 ssize_t pathmax;
725
726 pathmax = pathconf(path, _PC_PATH_MAX);
727
728 if (pathmax == -1)
729 pathmax = UV__PATH_MAX;
730
731 return pathmax;
732 }
733
734 static ssize_t uv__fs_readlink(uv_fs_t* req) {
735 ssize_t maxlen;
736 ssize_t len;
737 char* buf;
738
739 #if defined(_POSIX_PATH_MAX) || defined(PATH_MAX)
740 maxlen = uv__fs_pathmax_size(req->path);
741 #else
742 /* We may not have a real PATH_MAX. Read size of link. */
743 struct stat st;
744 int ret;
745 ret = uv__lstat(req->path, &st);
746 if (ret != 0)
747 return -1;
748 if (!S_ISLNK(st.st_mode)) {
749 errno = EINVAL;
750 return -1;
751 }
752
753 maxlen = st.st_size;
754
755 /* According to readlink(2) lstat can report st_size == 0
756 for some symlinks, such as those in /proc or /sys. */
757 if (maxlen == 0)
758 maxlen = uv__fs_pathmax_size(req->path);
759 #endif
760
761 buf = uv__malloc(maxlen);
762
763 if (buf == NULL) {
764 errno = ENOMEM;
765 return -1;
766 }
767
768 #if defined(__MVS__)
769 len = os390_readlink(req->path, buf, maxlen);
770 #else
771 len = readlink(req->path, buf, maxlen);
772 #endif
773
774 if (len == -1) {
775 uv__free(buf);
776 return -1;
777 }
778
779 /* Uncommon case: resize to make room for the trailing nul byte. */
780 if (len == maxlen) {
781 buf = uv__reallocf(buf, len + 1);
782
783 if (buf == NULL)
784 return -1;
785 }
786
787 buf[len] = '\0';
788 req->ptr = buf;
789
790 return 0;
791 }
792
793 static ssize_t uv__fs_realpath(uv_fs_t* req) {
794 char* buf;
795 char* tmp;
796
797 #if defined(_POSIX_VERSION) && _POSIX_VERSION >= 200809L
798 tmp = realpath(req->path, NULL);
799 if (tmp == NULL)
800 return -1;
801 buf = uv__strdup(tmp);
802 free(tmp); /* _Not_ uv__free. */
803 if (buf == NULL) {
804 errno = ENOMEM;
805 return -1;
806 }
807 #else
808 ssize_t len;
809
810 (void)tmp;
811
812 len = uv__fs_pathmax_size(req->path);
813 buf = uv__malloc(len + 1);
814
815 if (buf == NULL) {
816 errno = ENOMEM;
817 return -1;
818 }
819
820 if (realpath(req->path, buf) == NULL) {
821 uv__free(buf);
822 return -1;
823 }
824 #endif
825
826 req->ptr = buf;
827
828 return 0;
829 }
830
831 static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) {
832 struct pollfd pfd;
833 int use_pread;
834 off_t offset;
835 ssize_t nsent;
836 ssize_t nread;
837 ssize_t nwritten;
838 size_t buflen;
839 size_t len;
840 ssize_t n;
841 int in_fd;
842 int out_fd;
843 char buf[8192];
844
845 len = req->bufsml[0].len;
846 in_fd = req->flags;
847 out_fd = req->file;
848 offset = req->off;
849 use_pread = 1;
850
851 /* Here are the rules regarding errors:
852 *
853 * 1. Read errors are reported only if nsent==0, otherwise we return nsent.
854 * The user needs to know that some data has already been sent, to stop
855 * them from sending it twice.
856 *
857 * 2. Write errors are always reported. Write errors are bad because they
858 * mean data loss: we've read data but now we can't write it out.
859 *
860 * We try to use pread() and fall back to regular read() if the source fd
861 * doesn't support positional reads, for example when it's a pipe fd.
862 *
863 * If we get EAGAIN when writing to the target fd, we poll() on it until
864 * it becomes writable again.
865 *
866 * FIXME: If we get a write error when use_pread==1, it should be safe to
867 * return the number of sent bytes instead of an error because pread()
868 * is, in theory, idempotent. However, special files in /dev or /proc
869 * may support pread() but not necessarily return the same data on
870 * successive reads.
871 *
872 * FIXME: There is no way now to signal that we managed to send *some* data
873 * before a write error.
874 */
875 for (nsent = 0; (size_t) nsent < len; ) {
876 buflen = len - nsent;
877
878 if (buflen > sizeof(buf))
879 buflen = sizeof(buf);
880
881 do
882 if (use_pread)
883 nread = pread(in_fd, buf, buflen, offset);
884 else
885 nread = read(in_fd, buf, buflen);
886 while (nread == -1 && errno == EINTR);
887
888 if (nread == 0)
889 goto out;
890
891 if (nread == -1) {
892 if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) {
893 use_pread = 0;
894 continue;
895 }
896
897 if (nsent == 0)
898 nsent = -1;
899
900 goto out;
901 }
902
903 for (nwritten = 0; nwritten < nread; ) {
904 do
905 n = write(out_fd, buf + nwritten, nread - nwritten);
906 while (n == -1 && errno == EINTR);
907
908 if (n != -1) {
909 nwritten += n;
910 continue;
911 }
912
913 if (errno != EAGAIN && errno != EWOULDBLOCK) {
914 nsent = -1;
915 goto out;
916 }
917
918 pfd.fd = out_fd;
919 pfd.events = POLLOUT;
920 pfd.revents = 0;
921
922 do
923 n = poll(&pfd, 1, -1);
924 while (n == -1 && errno == EINTR);
925
926 if (n == -1 || (pfd.revents & ~POLLOUT) != 0) {
927 errno = EIO;
928 nsent = -1;
929 goto out;
930 }
931 }
932
933 offset += nread;
934 nsent += nread;
935 }
936
937 out:
938 if (nsent != -1)
939 req->off = offset;
940
941 return nsent;
942 }
943
944
945 #ifdef __linux__
946 /* Pre-4.20 kernels have a bug where CephFS uses the RADOS copy-from command
947 * in copy_file_range() when it shouldn't. There is no workaround except to
948 * fall back to a regular copy.
949 */
950 static int uv__is_buggy_cephfs(int fd) {
951 struct statfs s;
952
953 if (-1 == fstatfs(fd, &s))
954 return 0;
955
956 if (s.f_type != /* CephFS */ 0xC36400)
957 return 0;
958
959 return uv__kernel_version() < /* 4.20.0 */ 0x041400;
960 }
961
962
963 static int uv__is_cifs_or_smb(int fd) {
964 struct statfs s;
965
966 if (-1 == fstatfs(fd, &s))
967 return 0;
968
969 switch ((unsigned) s.f_type) {
970 case 0x0000517Bu: /* SMB */
971 case 0xFE534D42u: /* SMB2 */
972 case 0xFF534D42u: /* CIFS */
973 return 1;
974 }
975
976 return 0;
977 }
978
979
980 static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
981 int out_fd, size_t len) {
982 static _Atomic int no_copy_file_range_support;
983 ssize_t r;
984
985 if (atomic_load_explicit(&no_copy_file_range_support, memory_order_relaxed)) {
986 errno = ENOSYS;
987 return -1;
988 }
989
990 r = uv__fs_copy_file_range(in_fd, off, out_fd, NULL, len, 0);
991
992 if (r != -1)
993 return r;
994
995 switch (errno) {
996 case EACCES:
997 /* Pre-4.20 kernels have a bug where CephFS uses the RADOS
998 * copy-from command when it shouldn't.
999 */
1000 if (uv__is_buggy_cephfs(in_fd))
1001 errno = ENOSYS; /* Use fallback. */
1002 break;
1003 case ENOSYS:
1004 atomic_store_explicit(&no_copy_file_range_support, 1, memory_order_relaxed);
1005 break;
1006 case EPERM:
1007 /* It's been reported that CIFS spuriously fails.
1008 * Consider it a transient error.
1009 */
1010 if (uv__is_cifs_or_smb(out_fd))
1011 errno = ENOSYS; /* Use fallback. */
1012 break;
1013 case ENOTSUP:
1014 case EXDEV:
1015 /* ENOTSUP - it could work on another file system type.
1016 * EXDEV - it will not work when in_fd and out_fd are not on the same
1017 * mounted filesystem (pre Linux 5.3)
1018 */
1019 errno = ENOSYS; /* Use fallback. */
1020 break;
1021 }
1022
1023 return -1;
1024 }
1025
1026 #endif /* __linux__ */
1027
1028
1029 static ssize_t uv__fs_sendfile(uv_fs_t* req) {
1030 int in_fd;
1031 int out_fd;
1032
1033 in_fd = req->flags;
1034 out_fd = req->file;
1035
1036 #if defined(__linux__) || defined(__sun)
1037 {
1038 off_t off;
1039 ssize_t r;
1040 size_t len;
1041 int try_sendfile;
1042
1043 off = req->off;
1044 len = req->bufsml[0].len;
1045 try_sendfile = 1;
1046
1047 #ifdef __linux__
1048 r = uv__fs_try_copy_file_range(in_fd, &off, out_fd, len);
1049 try_sendfile = (r == -1 && errno == ENOSYS);
1050 #endif
1051
1052 if (try_sendfile)
1053 r = sendfile(out_fd, in_fd, &off, len);
1054
1055 /* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
1056 * it still writes out data. Fortunately, we can detect it by checking if
1057 * the offset has been updated.
1058 */
1059 if (r != -1 || off > req->off) {
1060 r = off - req->off;
1061 req->off = off;
1062 return r;
1063 }
1064
1065 if (errno == EINVAL ||
1066 errno == EIO ||
1067 errno == ENOTSOCK ||
1068 errno == EXDEV) {
1069 errno = 0;
1070 return uv__fs_sendfile_emul(req);
1071 }
1072
1073 return -1;
1074 }
1075 /* sendfile() on iOS(arm64) will throw SIGSYS signal cause crash. */
1076 #elif (defined(__APPLE__) && !TARGET_OS_IPHONE) \
1077 || defined(__DragonFly__) \
1078 || defined(__FreeBSD__)
1079 {
1080 off_t len;
1081 ssize_t r;
1082
1083 /* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in
1084 * non-blocking mode and not all data could be written. If a non-zero
1085 * number of bytes have been sent, we don't consider it an error.
1086 */
1087
1088 #if defined(__FreeBSD__) || defined(__DragonFly__)
1089 #if defined(__FreeBSD__)
1090 off_t off;
1091
1092 off = req->off;
1093 r = uv__fs_copy_file_range(in_fd, &off, out_fd, NULL, req->bufsml[0].len, 0);
1094 if (r >= 0) {
1095 r = off - req->off;
1096 req->off = off;
1097 return r;
1098 }
1099 #endif
1100 len = 0;
1101 r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
1102 #else
1103 /* The darwin sendfile takes len as an input for the length to send,
1104 * so make sure to initialize it with the caller's value. */
1105 len = req->bufsml[0].len;
1106 r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
1107 #endif
1108
1109 /*
1110 * The man page for sendfile(2) on DragonFly states that `len` contains
1111 * a meaningful value ONLY in case of EAGAIN and EINTR.
1112 * Nothing is said about it's value in case of other errors, so better
1113 * not depend on the potential wrong assumption that is was not modified
1114 * by the syscall.
1115 */
1116 if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) {
1117 req->off += len;
1118 return (ssize_t) len;
1119 }
1120
1121 if (errno == EINVAL ||
1122 errno == EIO ||
1123 errno == ENOTSOCK ||
1124 errno == EXDEV) {
1125 errno = 0;
1126 return uv__fs_sendfile_emul(req);
1127 }
1128
1129 return -1;
1130 }
1131 #else
1132 /* Squelch compiler warnings. */
1133 (void) &in_fd;
1134 (void) &out_fd;
1135
1136 return uv__fs_sendfile_emul(req);
1137 #endif
1138 }
1139
1140
1141 static ssize_t uv__fs_utime(uv_fs_t* req) {
1142 #if defined(__APPLE__) \
1143 || defined(_AIX71) \
1144 || defined(__DragonFly__) \
1145 || defined(__FreeBSD__) \
1146 || defined(__HAIKU__) \
1147 || defined(__NetBSD__) \
1148 || defined(__OpenBSD__) \
1149 || defined(__linux__) \
1150 || defined(__sun)
1151 struct timespec ts[2];
1152 ts[0] = uv__fs_to_timespec(req->atime);
1153 ts[1] = uv__fs_to_timespec(req->mtime);
1154 return utimensat(AT_FDCWD, req->path, ts, 0);
1155 #elif defined(_AIX) && !defined(_AIX71)
1156 struct utimbuf buf;
1157 buf.actime = req->atime;
1158 buf.modtime = req->mtime;
1159 return utime(req->path, &buf);
1160 #elif defined(__MVS__)
1161 attrib_t atr;
1162 memset(&atr, 0, sizeof(atr));
1163 atr.att_mtimechg = 1;
1164 atr.att_atimechg = 1;
1165 atr.att_mtime = req->mtime;
1166 atr.att_atime = req->atime;
1167 return __lchattr((char*) req->path, &atr, sizeof(atr));
1168 #else
1169 errno = ENOSYS;
1170 return -1;
1171 #endif
1172 }
1173
1174
1175 static ssize_t uv__fs_lutime(uv_fs_t* req) {
1176 #if defined(__APPLE__) \
1177 || defined(_AIX71) \
1178 || defined(__DragonFly__) \
1179 || defined(__FreeBSD__) \
1180 || defined(__HAIKU__) \
1181 || defined(__NetBSD__) \
1182 || defined(__OpenBSD__) \
1183 || defined(__linux__) \
1184 || defined(__sun)
1185 struct timespec ts[2];
1186 ts[0] = uv__fs_to_timespec(req->atime);
1187 ts[1] = uv__fs_to_timespec(req->mtime);
1188 return utimensat(AT_FDCWD, req->path, ts, AT_SYMLINK_NOFOLLOW);
1189 #else
1190 errno = ENOSYS;
1191 return -1;
1192 #endif
1193 }
1194
1195
1196 static ssize_t uv__fs_write(uv_fs_t* req) {
1197 const struct iovec* bufs;
1198 size_t nbufs;
1199 ssize_t r;
1200 off_t off;
1201 int fd;
1202
1203 fd = req->file;
1204 off = req->off;
1205 bufs = (const struct iovec*) req->bufs;
1206 nbufs = req->nbufs;
1207
1208 r = 0;
1209 if (off < 0) {
1210 if (nbufs == 1)
1211 r = write(fd, bufs->iov_base, bufs->iov_len);
1212 else if (nbufs > 1)
1213 r = writev(fd, bufs, nbufs);
1214 } else {
1215 if (nbufs == 1)
1216 r = pwrite(fd, bufs->iov_base, bufs->iov_len, off);
1217 else if (nbufs > 1)
1218 r = uv__pwritev(fd, bufs, nbufs, off);
1219 }
1220
1221 return r;
1222 }
1223
1224
1225 static ssize_t uv__fs_copyfile(uv_fs_t* req) {
1226 uv_fs_t fs_req;
1227 uv_file srcfd;
1228 uv_file dstfd;
1229 struct stat src_statsbuf;
1230 struct stat dst_statsbuf;
1231 struct timespec times[2];
1232 int dst_flags;
1233 int result;
1234 int err;
1235 off_t bytes_to_send;
1236 off_t in_offset;
1237 off_t bytes_written;
1238 size_t bytes_chunk;
1239
1240 dstfd = -1;
1241 err = 0;
1242
1243 /* Open the source file. */
1244 srcfd = uv_fs_open(NULL, &fs_req, req->path, O_RDONLY, 0, NULL);
1245 uv_fs_req_cleanup(&fs_req);
1246
1247 if (srcfd < 0)
1248 return srcfd;
1249
1250 /* Get the source file's mode. */
1251 if (uv__fstat(srcfd, &src_statsbuf)) {
1252 err = UV__ERR(errno);
1253 goto out;
1254 }
1255
1256 dst_flags = O_WRONLY | O_CREAT;
1257
1258 if (req->flags & UV_FS_COPYFILE_EXCL)
1259 dst_flags |= O_EXCL;
1260
1261 /* Open the destination file. */
1262 dstfd = uv_fs_open(NULL,
1263 &fs_req,
1264 req->new_path,
1265 dst_flags,
1266 src_statsbuf.st_mode,
1267 NULL);
1268 uv_fs_req_cleanup(&fs_req);
1269
1270 if (dstfd < 0) {
1271 err = dstfd;
1272 goto out;
1273 }
1274
1275 /* If the file is not being opened exclusively, verify that the source and
1276 destination are not the same file. If they are the same, bail out early. */
1277 if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) {
1278 /* Get the destination file's mode. */
1279 if (uv__fstat(dstfd, &dst_statsbuf)) {
1280 err = UV__ERR(errno);
1281 goto out;
1282 }
1283
1284 /* Check if srcfd and dstfd refer to the same file */
1285 if (src_statsbuf.st_dev == dst_statsbuf.st_dev &&
1286 src_statsbuf.st_ino == dst_statsbuf.st_ino) {
1287 goto out;
1288 }
1289
1290 /* Truncate the file in case the destination already existed. */
1291 if (ftruncate(dstfd, 0) != 0) {
1292 err = UV__ERR(errno);
1293
1294 /* ftruncate() on ceph-fuse fails with EACCES when the file is created
1295 * with read only permissions. Since ftruncate() on a newly created
1296 * file is a meaningless operation anyway, detect that condition
1297 * and squelch the error.
1298 */
1299 if (err != UV_EACCES)
1300 goto out;
1301
1302 if (dst_statsbuf.st_size > 0)
1303 goto out;
1304
1305 err = 0;
1306 }
1307 }
1308
1309 /**
1310 * Change the timestamps of the destination file to match the source file.
1311 */
1312 #if defined(__APPLE__)
1313 times[0] = src_statsbuf.st_atimespec;
1314 times[1] = src_statsbuf.st_mtimespec;
1315 #elif defined(_AIX)
1316 times[0].tv_sec = src_statsbuf.st_atime;
1317 times[0].tv_nsec = src_statsbuf.st_atime_n;
1318 times[1].tv_sec = src_statsbuf.st_mtime;
1319 times[1].tv_nsec = src_statsbuf.st_mtime_n;
1320 #else
1321 times[0] = src_statsbuf.st_atim;
1322 times[1] = src_statsbuf.st_mtim;
1323 #endif
1324
1325 if (futimens(dstfd, times) == -1) {
1326 err = UV__ERR(errno);
1327 goto out;
1328 }
1329
1330 /*
1331 * Change the ownership and permissions of the destination file to match the
1332 * source file.
1333 * `cp -p` does not care about errors here, so we don't either. Reuse the
1334 * `result` variable to silence a -Wunused-result warning.
1335 */
1336 result = fchown(dstfd, src_statsbuf.st_uid, src_statsbuf.st_gid);
1337
1338 if (fchmod(dstfd, src_statsbuf.st_mode) == -1) {
1339 err = UV__ERR(errno);
1340 #ifdef __linux__
1341 /* fchmod() on CIFS shares always fails with EPERM unless the share is
1342 * mounted with "noperm". As fchmod() is a meaningless operation on such
1343 * shares anyway, detect that condition and squelch the error.
1344 */
1345 if (err != UV_EPERM)
1346 goto out;
1347
1348 if (!uv__is_cifs_or_smb(dstfd))
1349 goto out;
1350
1351 err = 0;
1352 #else /* !__linux__ */
1353 goto out;
1354 #endif /* !__linux__ */
1355 }
1356
1357 #ifdef FICLONE
1358 if (req->flags & UV_FS_COPYFILE_FICLONE ||
1359 req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1360 if (ioctl(dstfd, FICLONE, srcfd) == 0) {
1361 /* ioctl() with FICLONE succeeded. */
1362 goto out;
1363 }
1364 /* If an error occurred and force was set, return the error to the caller;
1365 * fall back to sendfile() when force was not set. */
1366 if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1367 err = UV__ERR(errno);
1368 goto out;
1369 }
1370 }
1371 #else
1372 if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1373 err = UV_ENOSYS;
1374 goto out;
1375 }
1376 #endif
1377
1378 bytes_to_send = src_statsbuf.st_size;
1379 in_offset = 0;
1380 while (bytes_to_send != 0) {
1381 bytes_chunk = SSIZE_MAX;
1382 if (bytes_to_send < (off_t) bytes_chunk)
1383 bytes_chunk = bytes_to_send;
1384 uv_fs_sendfile(NULL, &fs_req, dstfd, srcfd, in_offset, bytes_chunk, NULL);
1385 bytes_written = fs_req.result;
1386 uv_fs_req_cleanup(&fs_req);
1387
1388 if (bytes_written < 0) {
1389 err = bytes_written;
1390 break;
1391 }
1392
1393 bytes_to_send -= bytes_written;
1394 in_offset += bytes_written;
1395 }
1396
1397 out:
1398 if (err < 0)
1399 result = err;
1400 else
1401 result = 0;
1402
1403 /* Close the source file. */
1404 err = uv__close_nocheckstdio(srcfd);
1405
1406 /* Don't overwrite any existing errors. */
1407 if (err != 0 && result == 0)
1408 result = err;
1409
1410 /* Close the destination file if it is open. */
1411 if (dstfd >= 0) {
1412 err = uv__close_nocheckstdio(dstfd);
1413
1414 /* Don't overwrite any existing errors. */
1415 if (err != 0 && result == 0)
1416 result = err;
1417
1418 /* Remove the destination file if something went wrong. */
1419 if (result != 0) {
1420 uv_fs_unlink(NULL, &fs_req, req->new_path, NULL);
1421 /* Ignore the unlink return value, as an error already happened. */
1422 uv_fs_req_cleanup(&fs_req);
1423 }
1424 }
1425
1426 if (result == 0)
1427 return 0;
1428
1429 errno = UV__ERR(result);
1430 return -1;
1431 }
1432
1433 static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
1434 dst->st_dev = src->st_dev;
1435 dst->st_mode = src->st_mode;
1436 dst->st_nlink = src->st_nlink;
1437 dst->st_uid = src->st_uid;
1438 dst->st_gid = src->st_gid;
1439 dst->st_rdev = src->st_rdev;
1440 dst->st_ino = src->st_ino;
1441 dst->st_size = src->st_size;
1442 dst->st_blksize = src->st_blksize;
1443 dst->st_blocks = src->st_blocks;
1444
1445 #if defined(__APPLE__)
1446 dst->st_atim.tv_sec = src->st_atimespec.tv_sec;
1447 dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec;
1448 dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec;
1449 dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec;
1450 dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec;
1451 dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec;
1452 dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec;
1453 dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec;
1454 dst->st_flags = src->st_flags;
1455 dst->st_gen = src->st_gen;
1456 #elif defined(__ANDROID__)
1457 dst->st_atim.tv_sec = src->st_atime;
1458 dst->st_atim.tv_nsec = src->st_atimensec;
1459 dst->st_mtim.tv_sec = src->st_mtime;
1460 dst->st_mtim.tv_nsec = src->st_mtimensec;
1461 dst->st_ctim.tv_sec = src->st_ctime;
1462 dst->st_ctim.tv_nsec = src->st_ctimensec;
1463 dst->st_birthtim.tv_sec = src->st_ctime;
1464 dst->st_birthtim.tv_nsec = src->st_ctimensec;
1465 dst->st_flags = 0;
1466 dst->st_gen = 0;
1467 #elif !defined(_AIX) && \
1468 !defined(__MVS__) && ( \
1469 defined(__DragonFly__) || \
1470 defined(__FreeBSD__) || \
1471 defined(__OpenBSD__) || \
1472 defined(__NetBSD__) || \
1473 defined(_GNU_SOURCE) || \
1474 defined(_BSD_SOURCE) || \
1475 defined(_SVID_SOURCE) || \
1476 defined(_XOPEN_SOURCE) || \
1477 defined(_DEFAULT_SOURCE))
1478 dst->st_atim.tv_sec = src->st_atim.tv_sec;
1479 dst->st_atim.tv_nsec = src->st_atim.tv_nsec;
1480 dst->st_mtim.tv_sec = src->st_mtim.tv_sec;
1481 dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec;
1482 dst->st_ctim.tv_sec = src->st_ctim.tv_sec;
1483 dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec;
1484 # if defined(__FreeBSD__) || \
1485 defined(__NetBSD__)
1486 dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec;
1487 dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec;
1488 dst->st_flags = src->st_flags;
1489 dst->st_gen = src->st_gen;
1490 # else
1491 dst->st_birthtim.tv_sec = src->st_ctim.tv_sec;
1492 dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec;
1493 dst->st_flags = 0;
1494 dst->st_gen = 0;
1495 # endif
1496 #else
1497 dst->st_atim.tv_sec = src->st_atime;
1498 dst->st_atim.tv_nsec = 0;
1499 dst->st_mtim.tv_sec = src->st_mtime;
1500 dst->st_mtim.tv_nsec = 0;
1501 dst->st_ctim.tv_sec = src->st_ctime;
1502 dst->st_ctim.tv_nsec = 0;
1503 dst->st_birthtim.tv_sec = src->st_ctime;
1504 dst->st_birthtim.tv_nsec = 0;
1505 dst->st_flags = 0;
1506 dst->st_gen = 0;
1507 #endif
1508 }
1509
1510
1511 static int uv__fs_statx(int fd,
1512 const char* path,
1513 int is_fstat,
1514 int is_lstat,
1515 uv_stat_t* buf) {
1516 STATIC_ASSERT(UV_ENOSYS != -1);
1517 #ifdef __linux__
1518 static _Atomic int no_statx;
1519 struct uv__statx statxbuf;
1520 int dirfd;
1521 int flags;
1522 int mode;
1523 int rc;
1524
1525 if (atomic_load_explicit(&no_statx, memory_order_relaxed))
1526 return UV_ENOSYS;
1527
1528 dirfd = AT_FDCWD;
1529 flags = 0; /* AT_STATX_SYNC_AS_STAT */
1530 mode = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */
1531
1532 if (is_fstat) {
1533 dirfd = fd;
1534 flags |= 0x1000; /* AT_EMPTY_PATH */
1535 }
1536
1537 if (is_lstat)
1538 flags |= AT_SYMLINK_NOFOLLOW;
1539
1540 rc = uv__statx(dirfd, path, flags, mode, &statxbuf);
1541
1542 switch (rc) {
1543 case 0:
1544 break;
1545 case -1:
1546 /* EPERM happens when a seccomp filter rejects the system call.
1547 * Has been observed with libseccomp < 2.3.3 and docker < 18.04.
1548 * EOPNOTSUPP is used on DVS exported filesystems
1549 */
1550 if (errno != EINVAL && errno != EPERM && errno != ENOSYS && errno != EOPNOTSUPP)
1551 return -1;
1552 /* Fall through. */
1553 default:
1554 /* Normally on success, zero is returned and On error, -1 is returned.
1555 * Observed on S390 RHEL running in a docker container with statx not
1556 * implemented, rc might return 1 with 0 set as the error code in which
1557 * case we return ENOSYS.
1558 */
1559 atomic_store_explicit(&no_statx, 1, memory_order_relaxed);
1560 return UV_ENOSYS;
1561 }
1562
1563 uv__statx_to_stat(&statxbuf, buf);
1564
1565 return 0;
1566 #else
1567 return UV_ENOSYS;
1568 #endif /* __linux__ */
1569 }
1570
1571
1572 static int uv__fs_stat(const char *path, uv_stat_t *buf) {
1573 struct stat pbuf;
1574 int ret;
1575
1576 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 0, buf);
1577 if (ret != UV_ENOSYS)
1578 return ret;
1579
1580 ret = uv__stat(path, &pbuf);
1581 if (ret == 0)
1582 uv__to_stat(&pbuf, buf);
1583
1584 return ret;
1585 }
1586
1587
1588 static int uv__fs_lstat(const char *path, uv_stat_t *buf) {
1589 struct stat pbuf;
1590 int ret;
1591
1592 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 1, buf);
1593 if (ret != UV_ENOSYS)
1594 return ret;
1595
1596 ret = uv__lstat(path, &pbuf);
1597 if (ret == 0)
1598 uv__to_stat(&pbuf, buf);
1599
1600 return ret;
1601 }
1602
1603
1604 static int uv__fs_fstat(int fd, uv_stat_t *buf) {
1605 struct stat pbuf;
1606 int ret;
1607
1608 ret = uv__fs_statx(fd, "", /* is_fstat */ 1, /* is_lstat */ 0, buf);
1609 if (ret != UV_ENOSYS)
1610 return ret;
1611
1612 ret = uv__fstat(fd, &pbuf);
1613 if (ret == 0)
1614 uv__to_stat(&pbuf, buf);
1615
1616 return ret;
1617 }
1618
1619 static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
1620 size_t offset;
1621 /* Figure out which bufs are done */
1622 for (offset = 0; size > 0 && bufs[offset].len <= size; ++offset)
1623 size -= bufs[offset].len;
1624
1625 /* Fix a partial read/write */
1626 if (size > 0) {
1627 bufs[offset].base += size;
1628 bufs[offset].len -= size;
1629 }
1630 return offset;
1631 }
1632
1633 static ssize_t uv__fs_write_all(uv_fs_t* req) {
1634 unsigned int iovmax;
1635 unsigned int nbufs;
1636 uv_buf_t* bufs;
1637 ssize_t total;
1638 ssize_t result;
1639
1640 iovmax = uv__getiovmax();
1641 nbufs = req->nbufs;
1642 bufs = req->bufs;
1643 total = 0;
1644
1645 while (nbufs > 0) {
1646 req->nbufs = nbufs;
1647 if (req->nbufs > iovmax)
1648 req->nbufs = iovmax;
1649
1650 do
1651 result = uv__fs_write(req);
1652 while (result < 0 && errno == EINTR);
1653
1654 if (result <= 0) {
1655 if (total == 0)
1656 total = result;
1657 break;
1658 }
1659
1660 if (req->off >= 0)
1661 req->off += result;
1662
1663 req->nbufs = uv__fs_buf_offset(req->bufs, result);
1664 req->bufs += req->nbufs;
1665 nbufs -= req->nbufs;
1666 total += result;
1667 }
1668
1669 if (bufs != req->bufsml)
1670 uv__free(bufs);
1671
1672 req->bufs = NULL;
1673 req->nbufs = 0;
1674
1675 return total;
1676 }
1677
1678
1679 static void uv__fs_work(struct uv__work* w) {
1680 int retry_on_eintr;
1681 uv_fs_t* req;
1682 ssize_t r;
1683
1684 req = container_of(w, uv_fs_t, work_req);
1685 retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
1686 req->fs_type == UV_FS_READ);
1687
1688 do {
1689 errno = 0;
1690
1691 #define X(type, action) \
1692 case UV_FS_ ## type: \
1693 r = action; \
1694 break;
1695
1696 switch (req->fs_type) {
1697 X(ACCESS, access(req->path, req->flags));
1698 X(CHMOD, chmod(req->path, req->mode));
1699 X(CHOWN, chown(req->path, req->uid, req->gid));
1700 X(CLOSE, uv__fs_close(req->file));
1701 X(COPYFILE, uv__fs_copyfile(req));
1702 X(FCHMOD, fchmod(req->file, req->mode));
1703 X(FCHOWN, fchown(req->file, req->uid, req->gid));
1704 X(LCHOWN, lchown(req->path, req->uid, req->gid));
1705 X(FDATASYNC, uv__fs_fdatasync(req));
1706 X(FSTAT, uv__fs_fstat(req->file, &req->statbuf));
1707 X(FSYNC, uv__fs_fsync(req));
1708 X(FTRUNCATE, ftruncate(req->file, req->off));
1709 X(FUTIME, uv__fs_futime(req));
1710 X(LUTIME, uv__fs_lutime(req));
1711 X(LSTAT, uv__fs_lstat(req->path, &req->statbuf));
1712 X(LINK, link(req->path, req->new_path));
1713 X(MKDIR, mkdir(req->path, req->mode));
1714 X(MKDTEMP, uv__fs_mkdtemp(req));
1715 X(MKSTEMP, uv__fs_mkstemp(req));
1716 X(OPEN, uv__fs_open(req));
1717 X(READ, uv__fs_read(req));
1718 X(SCANDIR, uv__fs_scandir(req));
1719 X(OPENDIR, uv__fs_opendir(req));
1720 X(READDIR, uv__fs_readdir(req));
1721 X(CLOSEDIR, uv__fs_closedir(req));
1722 X(READLINK, uv__fs_readlink(req));
1723 X(REALPATH, uv__fs_realpath(req));
1724 X(RENAME, rename(req->path, req->new_path));
1725 X(RMDIR, rmdir(req->path));
1726 X(SENDFILE, uv__fs_sendfile(req));
1727 X(STAT, uv__fs_stat(req->path, &req->statbuf));
1728 X(STATFS, uv__fs_statfs(req));
1729 X(SYMLINK, symlink(req->path, req->new_path));
1730 X(UNLINK, unlink(req->path));
1731 X(UTIME, uv__fs_utime(req));
1732 X(WRITE, uv__fs_write_all(req));
1733 default: abort();
1734 }
1735 #undef X
1736 } while (r == -1 && errno == EINTR && retry_on_eintr);
1737
1738 if (r == -1)
1739 req->result = UV__ERR(errno);
1740 else
1741 req->result = r;
1742
1743 if (r == 0 && (req->fs_type == UV_FS_STAT ||
1744 req->fs_type == UV_FS_FSTAT ||
1745 req->fs_type == UV_FS_LSTAT)) {
1746 req->ptr = &req->statbuf;
1747 }
1748 }
1749
1750
1751 static void uv__fs_done(struct uv__work* w, int status) {
1752 uv_fs_t* req;
1753
1754 req = container_of(w, uv_fs_t, work_req);
1755 uv__req_unregister(req->loop);
1756
1757 if (status == UV_ECANCELED) {
1758 assert(req->result == 0);
1759 req->result = UV_ECANCELED;
1760 }
1761
1762 req->cb(req);
1763 }
1764
1765
1766 void uv__fs_post(uv_loop_t* loop, uv_fs_t* req) {
1767 uv__req_register(loop);
1768 uv__work_submit(loop,
1769 &req->work_req,
1770 UV__WORK_FAST_IO,
1771 uv__fs_work,
1772 uv__fs_done);
1773 }
1774
1775
1776 int uv_fs_access(uv_loop_t* loop,
1777 uv_fs_t* req,
1778 const char* path,
1779 int flags,
1780 uv_fs_cb cb) {
1781 INIT(ACCESS);
1782 PATH;
1783 req->flags = flags;
1784 POST;
1785 }
1786
1787
1788 int uv_fs_chmod(uv_loop_t* loop,
1789 uv_fs_t* req,
1790 const char* path,
1791 int mode,
1792 uv_fs_cb cb) {
1793 INIT(CHMOD);
1794 PATH;
1795 req->mode = mode;
1796 POST;
1797 }
1798
1799
1800 int uv_fs_chown(uv_loop_t* loop,
1801 uv_fs_t* req,
1802 const char* path,
1803 uv_uid_t uid,
1804 uv_gid_t gid,
1805 uv_fs_cb cb) {
1806 INIT(CHOWN);
1807 PATH;
1808 req->uid = uid;
1809 req->gid = gid;
1810 POST;
1811 }
1812
1813
1814 int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1815 INIT(CLOSE);
1816 req->file = file;
1817 if (cb != NULL)
1818 if (uv__iou_fs_close(loop, req))
1819 return 0;
1820 POST;
1821 }
1822
1823
1824 int uv_fs_fchmod(uv_loop_t* loop,
1825 uv_fs_t* req,
1826 uv_file file,
1827 int mode,
1828 uv_fs_cb cb) {
1829 INIT(FCHMOD);
1830 req->file = file;
1831 req->mode = mode;
1832 POST;
1833 }
1834
1835
1836 int uv_fs_fchown(uv_loop_t* loop,
1837 uv_fs_t* req,
1838 uv_file file,
1839 uv_uid_t uid,
1840 uv_gid_t gid,
1841 uv_fs_cb cb) {
1842 INIT(FCHOWN);
1843 req->file = file;
1844 req->uid = uid;
1845 req->gid = gid;
1846 POST;
1847 }
1848
1849
1850 int uv_fs_lchown(uv_loop_t* loop,
1851 uv_fs_t* req,
1852 const char* path,
1853 uv_uid_t uid,
1854 uv_gid_t gid,
1855 uv_fs_cb cb) {
1856 INIT(LCHOWN);
1857 PATH;
1858 req->uid = uid;
1859 req->gid = gid;
1860 POST;
1861 }
1862
1863
1864 int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1865 INIT(FDATASYNC);
1866 req->file = file;
1867 if (cb != NULL)
1868 if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* IORING_FSYNC_DATASYNC */ 1))
1869 return 0;
1870 POST;
1871 }
1872
1873
1874 int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1875 INIT(FSTAT);
1876 req->file = file;
1877 if (cb != NULL)
1878 if (uv__iou_fs_statx(loop, req, /* is_fstat */ 1, /* is_lstat */ 0))
1879 return 0;
1880 POST;
1881 }
1882
1883
1884 int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1885 INIT(FSYNC);
1886 req->file = file;
1887 if (cb != NULL)
1888 if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* no flags */ 0))
1889 return 0;
1890 POST;
1891 }
1892
1893
1894 int uv_fs_ftruncate(uv_loop_t* loop,
1895 uv_fs_t* req,
1896 uv_file file,
1897 int64_t off,
1898 uv_fs_cb cb) {
1899 INIT(FTRUNCATE);
1900 req->file = file;
1901 req->off = off;
1902 if (cb != NULL)
1903 if (uv__iou_fs_ftruncate(loop, req))
1904 return 0;
1905 POST;
1906 }
1907
1908
1909 int uv_fs_futime(uv_loop_t* loop,
1910 uv_fs_t* req,
1911 uv_file file,
1912 double atime,
1913 double mtime,
1914 uv_fs_cb cb) {
1915 INIT(FUTIME);
1916 req->file = file;
1917 req->atime = atime;
1918 req->mtime = mtime;
1919 POST;
1920 }
1921
1922 int uv_fs_lutime(uv_loop_t* loop,
1923 uv_fs_t* req,
1924 const char* path,
1925 double atime,
1926 double mtime,
1927 uv_fs_cb cb) {
1928 INIT(LUTIME);
1929 PATH;
1930 req->atime = atime;
1931 req->mtime = mtime;
1932 POST;
1933 }
1934
1935
1936 int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1937 INIT(LSTAT);
1938 PATH;
1939 if (cb != NULL)
1940 if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 1))
1941 return 0;
1942 POST;
1943 }
1944
1945
1946 int uv_fs_link(uv_loop_t* loop,
1947 uv_fs_t* req,
1948 const char* path,
1949 const char* new_path,
1950 uv_fs_cb cb) {
1951 INIT(LINK);
1952 PATH2;
1953 if (cb != NULL)
1954 if (uv__iou_fs_link(loop, req))
1955 return 0;
1956 POST;
1957 }
1958
1959
1960 int uv_fs_mkdir(uv_loop_t* loop,
1961 uv_fs_t* req,
1962 const char* path,
1963 int mode,
1964 uv_fs_cb cb) {
1965 INIT(MKDIR);
1966 PATH;
1967 req->mode = mode;
1968 if (cb != NULL)
1969 if (uv__iou_fs_mkdir(loop, req))
1970 return 0;
1971 POST;
1972 }
1973
1974
1975 int uv_fs_mkdtemp(uv_loop_t* loop,
1976 uv_fs_t* req,
1977 const char* tpl,
1978 uv_fs_cb cb) {
1979 INIT(MKDTEMP);
1980 req->path = uv__strdup(tpl);
1981 if (req->path == NULL)
1982 return UV_ENOMEM;
1983 POST;
1984 }
1985
1986
1987 int uv_fs_mkstemp(uv_loop_t* loop,
1988 uv_fs_t* req,
1989 const char* tpl,
1990 uv_fs_cb cb) {
1991 INIT(MKSTEMP);
1992 req->path = uv__strdup(tpl);
1993 if (req->path == NULL)
1994 return UV_ENOMEM;
1995 POST;
1996 }
1997
1998
1999 int uv_fs_open(uv_loop_t* loop,
2000 uv_fs_t* req,
2001 const char* path,
2002 int flags,
2003 int mode,
2004 uv_fs_cb cb) {
2005 INIT(OPEN);
2006 PATH;
2007 req->flags = flags;
2008 req->mode = mode;
2009 if (cb != NULL)
2010 if (uv__iou_fs_open(loop, req))
2011 return 0;
2012 POST;
2013 }
2014
2015
2016 int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
2017 uv_file file,
2018 const uv_buf_t bufs[],
2019 unsigned int nbufs,
2020 int64_t off,
2021 uv_fs_cb cb) {
2022 INIT(READ);
2023
2024 if (bufs == NULL || nbufs == 0)
2025 return UV_EINVAL;
2026
2027 req->off = off;
2028 req->file = file;
2029 req->bufs = (uv_buf_t*) bufs; /* Safe, doesn't mutate |bufs| */
2030 req->nbufs = nbufs;
2031
2032 if (cb == NULL)
2033 goto post;
2034
2035 req->bufs = req->bufsml;
2036 if (nbufs > ARRAY_SIZE(req->bufsml))
2037 req->bufs = uv__malloc(nbufs * sizeof(*bufs));
2038
2039 if (req->bufs == NULL)
2040 return UV_ENOMEM;
2041
2042 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
2043
2044 if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 1))
2045 return 0;
2046
2047 post:
2048 POST;
2049 }
2050
2051
2052 int uv_fs_scandir(uv_loop_t* loop,
2053 uv_fs_t* req,
2054 const char* path,
2055 int flags,
2056 uv_fs_cb cb) {
2057 INIT(SCANDIR);
2058 PATH;
2059 req->flags = flags;
2060 POST;
2061 }
2062
2063 int uv_fs_opendir(uv_loop_t* loop,
2064 uv_fs_t* req,
2065 const char* path,
2066 uv_fs_cb cb) {
2067 INIT(OPENDIR);
2068 PATH;
2069 POST;
2070 }
2071
2072 int uv_fs_readdir(uv_loop_t* loop,
2073 uv_fs_t* req,
2074 uv_dir_t* dir,
2075 uv_fs_cb cb) {
2076 INIT(READDIR);
2077
2078 if (dir == NULL || dir->dir == NULL || dir->dirents == NULL)
2079 return UV_EINVAL;
2080
2081 req->ptr = dir;
2082 POST;
2083 }
2084
2085 int uv_fs_closedir(uv_loop_t* loop,
2086 uv_fs_t* req,
2087 uv_dir_t* dir,
2088 uv_fs_cb cb) {
2089 INIT(CLOSEDIR);
2090
2091 if (dir == NULL)
2092 return UV_EINVAL;
2093
2094 req->ptr = dir;
2095 POST;
2096 }
2097
2098 int uv_fs_readlink(uv_loop_t* loop,
2099 uv_fs_t* req,
2100 const char* path,
2101 uv_fs_cb cb) {
2102 INIT(READLINK);
2103 PATH;
2104 POST;
2105 }
2106
2107
2108 int uv_fs_realpath(uv_loop_t* loop,
2109 uv_fs_t* req,
2110 const char * path,
2111 uv_fs_cb cb) {
2112 INIT(REALPATH);
2113 PATH;
2114 POST;
2115 }
2116
2117
2118 int uv_fs_rename(uv_loop_t* loop,
2119 uv_fs_t* req,
2120 const char* path,
2121 const char* new_path,
2122 uv_fs_cb cb) {
2123 INIT(RENAME);
2124 PATH2;
2125 if (cb != NULL)
2126 if (uv__iou_fs_rename(loop, req))
2127 return 0;
2128 POST;
2129 }
2130
2131
2132 int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2133 INIT(RMDIR);
2134 PATH;
2135 POST;
2136 }
2137
2138
2139 int uv_fs_sendfile(uv_loop_t* loop,
2140 uv_fs_t* req,
2141 uv_file out_fd,
2142 uv_file in_fd,
2143 int64_t off,
2144 size_t len,
2145 uv_fs_cb cb) {
2146 INIT(SENDFILE);
2147 req->flags = in_fd; /* hack */
2148 req->file = out_fd;
2149 req->off = off;
2150 req->bufsml[0].len = len;
2151 POST;
2152 }
2153
2154
2155 int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2156 INIT(STAT);
2157 PATH;
2158 if (cb != NULL)
2159 if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 0))
2160 return 0;
2161 POST;
2162 }
2163
2164
2165 int uv_fs_symlink(uv_loop_t* loop,
2166 uv_fs_t* req,
2167 const char* path,
2168 const char* new_path,
2169 int flags,
2170 uv_fs_cb cb) {
2171 INIT(SYMLINK);
2172 PATH2;
2173 req->flags = flags;
2174 if (cb != NULL)
2175 if (uv__iou_fs_symlink(loop, req))
2176 return 0;
2177 POST;
2178 }
2179
2180
2181 int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2182 INIT(UNLINK);
2183 PATH;
2184 if (cb != NULL)
2185 if (uv__iou_fs_unlink(loop, req))
2186 return 0;
2187 POST;
2188 }
2189
2190
2191 int uv_fs_utime(uv_loop_t* loop,
2192 uv_fs_t* req,
2193 const char* path,
2194 double atime,
2195 double mtime,
2196 uv_fs_cb cb) {
2197 INIT(UTIME);
2198 PATH;
2199 req->atime = atime;
2200 req->mtime = mtime;
2201 POST;
2202 }
2203
2204
2205 int uv_fs_write(uv_loop_t* loop,
2206 uv_fs_t* req,
2207 uv_file file,
2208 const uv_buf_t bufs[],
2209 unsigned int nbufs,
2210 int64_t off,
2211 uv_fs_cb cb) {
2212 INIT(WRITE);
2213
2214 if (bufs == NULL || nbufs == 0)
2215 return UV_EINVAL;
2216
2217 req->file = file;
2218
2219 req->nbufs = nbufs;
2220 req->bufs = req->bufsml;
2221 if (nbufs > ARRAY_SIZE(req->bufsml))
2222 req->bufs = uv__malloc(nbufs * sizeof(*bufs));
2223
2224 if (req->bufs == NULL)
2225 return UV_ENOMEM;
2226
2227 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
2228
2229 req->off = off;
2230
2231 if (cb != NULL)
2232 if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 0))
2233 return 0;
2234
2235 POST;
2236 }
2237
2238
2239 void uv_fs_req_cleanup(uv_fs_t* req) {
2240 if (req == NULL)
2241 return;
2242
2243 /* Only necessary for asynchronous requests, i.e., requests with a callback.
2244 * Synchronous ones don't copy their arguments and have req->path and
2245 * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and
2246 * UV_FS_MKSTEMP are the exception to the rule, they always allocate memory.
2247 */
2248 if (req->path != NULL &&
2249 (req->cb != NULL ||
2250 req->fs_type == UV_FS_MKDTEMP || req->fs_type == UV_FS_MKSTEMP))
2251 uv__free((void*) req->path); /* Memory is shared with req->new_path. */
2252
2253 req->path = NULL;
2254 req->new_path = NULL;
2255
2256 if (req->fs_type == UV_FS_READDIR && req->ptr != NULL)
2257 uv__fs_readdir_cleanup(req);
2258
2259 if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL)
2260 uv__fs_scandir_cleanup(req);
2261
2262 if (req->bufs != req->bufsml)
2263 uv__free(req->bufs);
2264 req->bufs = NULL;
2265
2266 if (req->fs_type != UV_FS_OPENDIR && req->ptr != &req->statbuf)
2267 uv__free(req->ptr);
2268 req->ptr = NULL;
2269 }
2270
2271
2272 int uv_fs_copyfile(uv_loop_t* loop,
2273 uv_fs_t* req,
2274 const char* path,
2275 const char* new_path,
2276 int flags,
2277 uv_fs_cb cb) {
2278 INIT(COPYFILE);
2279
2280 if (flags & ~(UV_FS_COPYFILE_EXCL |
2281 UV_FS_COPYFILE_FICLONE |
2282 UV_FS_COPYFILE_FICLONE_FORCE)) {
2283 return UV_EINVAL;
2284 }
2285
2286 PATH2;
2287 req->flags = flags;
2288 POST;
2289 }
2290
2291
2292 int uv_fs_statfs(uv_loop_t* loop,
2293 uv_fs_t* req,
2294 const char* path,
2295 uv_fs_cb cb) {
2296 INIT(STATFS);
2297 PATH;
2298 POST;
2299 }
2300
2301 int uv_fs_get_system_error(const uv_fs_t* req) {
2302 return -req->result;
2303 }