Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Kitware
GitHub Repository: Kitware/CMake
Path: blob/master/Utilities/cmlibuv/src/unix/fs.c
3156 views
1
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2
*
3
* Permission is hereby granted, free of charge, to any person obtaining a copy
4
* of this software and associated documentation files (the "Software"), to
5
* deal in the Software without restriction, including without limitation the
6
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7
* sell copies of the Software, and to permit persons to whom the Software is
8
* furnished to do so, subject to the following conditions:
9
*
10
* The above copyright notice and this permission notice shall be included in
11
* all copies or substantial portions of the Software.
12
*
13
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19
* IN THE SOFTWARE.
20
*/
21
22
/* Caveat emptor: this file deviates from the libuv convention of returning
23
* negated errno codes. Most uv_fs_*() functions map directly to the system
24
* call of the same name. For more complex wrappers, it's easier to just
25
* return -1 with errno set. The dispatcher in uv__fs_work() takes care of
26
* getting the errno to the right place (req->result or as the return value.)
27
*/
28
29
#include "uv.h"
30
#include "internal.h"
31
32
#include <errno.h>
33
#include <dlfcn.h>
34
#include <stdio.h>
35
#include <stdlib.h>
36
#include <string.h>
37
#include <limits.h> /* PATH_MAX */
38
39
#include <sys/types.h>
40
#include <sys/socket.h>
41
#include <sys/stat.h>
42
#include <sys/time.h>
43
#include <sys/uio.h>
44
#include <pthread.h>
45
#include <unistd.h>
46
#include <fcntl.h>
47
#include <poll.h>
48
49
#if defined(__DragonFly__) || \
50
defined(__FreeBSD__) || \
51
defined(__FreeBSD_kernel__) || \
52
defined(__OpenBSD__) || \
53
defined(__NetBSD__)
54
# define HAVE_PREADV 1
55
#else
56
# define HAVE_PREADV 0
57
#endif
58
59
#if defined(__linux__)
60
# include "sys/utsname.h"
61
#endif
62
63
#if defined(__linux__) || defined(__sun)
64
# include <sys/sendfile.h>
65
# include <sys/sysmacros.h>
66
#endif
67
68
#if defined(__APPLE__)
69
# include <sys/sysctl.h>
70
#elif defined(__linux__) && !defined(FICLONE)
71
# include <sys/ioctl.h>
72
# define FICLONE _IOW(0x94, 9, int)
73
#endif
74
75
#if defined(_AIX) && !defined(_AIX71)
76
# include <utime.h>
77
#endif
78
79
#if defined(__APPLE__) || \
80
defined(__DragonFly__) || \
81
defined(__FreeBSD__) || \
82
defined(__FreeBSD_kernel__) || \
83
defined(__OpenBSD__) || \
84
defined(__NetBSD__)
85
# include <sys/param.h>
86
# include <sys/mount.h>
87
#elif defined(__sun) || \
88
defined(__MVS__) || \
89
defined(__NetBSD__) || \
90
defined(__HAIKU__) || \
91
defined(__QNX__)
92
# include <sys/statvfs.h>
93
#else
94
# include <sys/statfs.h>
95
#endif
96
97
#if defined(_AIX) && _XOPEN_SOURCE <= 600
98
extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */
99
#endif
100
101
#define INIT(subtype) \
102
do { \
103
if (req == NULL) \
104
return UV_EINVAL; \
105
UV_REQ_INIT(req, UV_FS); \
106
req->fs_type = UV_FS_ ## subtype; \
107
req->result = 0; \
108
req->ptr = NULL; \
109
req->loop = loop; \
110
req->path = NULL; \
111
req->new_path = NULL; \
112
req->bufs = NULL; \
113
req->cb = cb; \
114
} \
115
while (0)
116
117
#define PATH \
118
do { \
119
assert(path != NULL); \
120
if (cb == NULL) { \
121
req->path = path; \
122
} else { \
123
req->path = uv__strdup(path); \
124
if (req->path == NULL) \
125
return UV_ENOMEM; \
126
} \
127
} \
128
while (0)
129
130
#define PATH2 \
131
do { \
132
if (cb == NULL) { \
133
req->path = path; \
134
req->new_path = new_path; \
135
} else { \
136
size_t path_len; \
137
size_t new_path_len; \
138
path_len = strlen(path) + 1; \
139
new_path_len = strlen(new_path) + 1; \
140
req->path = uv__malloc(path_len + new_path_len); \
141
if (req->path == NULL) \
142
return UV_ENOMEM; \
143
req->new_path = req->path + path_len; \
144
memcpy((void*) req->path, path, path_len); \
145
memcpy((void*) req->new_path, new_path, new_path_len); \
146
} \
147
} \
148
while (0)
149
150
#define POST \
151
do { \
152
if (cb != NULL) { \
153
uv__req_register(loop, req); \
154
uv__work_submit(loop, \
155
&req->work_req, \
156
UV__WORK_FAST_IO, \
157
uv__fs_work, \
158
uv__fs_done); \
159
return 0; \
160
} \
161
else { \
162
uv__fs_work(&req->work_req); \
163
return req->result; \
164
} \
165
} \
166
while (0)
167
168
169
static int uv__fs_close(int fd) {
170
int rc;
171
172
rc = uv__close_nocancel(fd);
173
if (rc == -1)
174
if (errno == EINTR || errno == EINPROGRESS)
175
rc = 0; /* The close is in progress, not an error. */
176
177
return rc;
178
}
179
180
181
static ssize_t uv__fs_fsync(uv_fs_t* req) {
182
#if defined(__APPLE__)
183
/* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache
184
* to the drive platters. This is in contrast to Linux's fdatasync and fsync
185
* which do, according to recent man pages. F_FULLFSYNC is Apple's equivalent
186
* for flushing buffered data to permanent storage. If F_FULLFSYNC is not
187
* supported by the file system we fall back to F_BARRIERFSYNC or fsync().
188
* This is the same approach taken by sqlite, except sqlite does not issue
189
* an F_BARRIERFSYNC call.
190
*/
191
int r;
192
193
r = fcntl(req->file, F_FULLFSYNC);
194
if (r != 0)
195
r = fcntl(req->file, 85 /* F_BARRIERFSYNC */); /* fsync + barrier */
196
if (r != 0)
197
r = fsync(req->file);
198
return r;
199
#else
200
return fsync(req->file);
201
#endif
202
}
203
204
205
static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
206
#if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
207
return fdatasync(req->file);
208
#elif defined(__APPLE__)
209
/* See the comment in uv__fs_fsync. */
210
return uv__fs_fsync(req);
211
#else
212
return fsync(req->file);
213
#endif
214
}
215
216
217
UV_UNUSED(static struct timespec uv__fs_to_timespec(double time)) {
218
struct timespec ts;
219
ts.tv_sec = time;
220
ts.tv_nsec = (time - ts.tv_sec) * 1e9;
221
222
/* TODO(bnoordhuis) Remove this. utimesat() has nanosecond resolution but we
223
* stick to microsecond resolution for the sake of consistency with other
224
* platforms. I'm the original author of this compatibility hack but I'm
225
* less convinced it's useful nowadays.
226
*/
227
ts.tv_nsec -= ts.tv_nsec % 1000;
228
229
if (ts.tv_nsec < 0) {
230
ts.tv_nsec += 1e9;
231
ts.tv_sec -= 1;
232
}
233
return ts;
234
}
235
236
UV_UNUSED(static struct timeval uv__fs_to_timeval(double time)) {
237
struct timeval tv;
238
tv.tv_sec = time;
239
tv.tv_usec = (time - tv.tv_sec) * 1e6;
240
if (tv.tv_usec < 0) {
241
tv.tv_usec += 1e6;
242
tv.tv_sec -= 1;
243
}
244
return tv;
245
}
246
247
static ssize_t uv__fs_futime(uv_fs_t* req) {
248
#if defined(__linux__) \
249
|| defined(_AIX71) \
250
|| defined(__HAIKU__) \
251
|| defined(__GNU__)
252
struct timespec ts[2];
253
ts[0] = uv__fs_to_timespec(req->atime);
254
ts[1] = uv__fs_to_timespec(req->mtime);
255
return futimens(req->file, ts);
256
#elif defined(__APPLE__) \
257
|| defined(__DragonFly__) \
258
|| defined(__FreeBSD__) \
259
|| defined(__FreeBSD_kernel__) \
260
|| defined(__NetBSD__) \
261
|| defined(__OpenBSD__) \
262
|| defined(__sun)
263
struct timeval tv[2];
264
tv[0] = uv__fs_to_timeval(req->atime);
265
tv[1] = uv__fs_to_timeval(req->mtime);
266
# if defined(__sun)
267
return futimesat(req->file, NULL, tv);
268
# else
269
return futimes(req->file, tv);
270
# endif
271
#elif defined(__MVS__)
272
attrib_t atr;
273
memset(&atr, 0, sizeof(atr));
274
atr.att_mtimechg = 1;
275
atr.att_atimechg = 1;
276
atr.att_mtime = req->mtime;
277
atr.att_atime = req->atime;
278
return __fchattr(req->file, &atr, sizeof(atr));
279
#else
280
errno = ENOSYS;
281
return -1;
282
#endif
283
}
284
285
#if defined(CMAKE_BOOTSTRAP) && defined(__sun) && defined(__i386)
286
# define CMAKE_NO_MKDTEMP
287
#endif
288
289
#if defined(CMAKE_NO_MKDTEMP)
290
static char* uv__mkdtemp_fallback(char *template) {
291
if (!mktemp(template) || mkdir(template, 0700))
292
return NULL;
293
return template;
294
}
295
static char* (*uv__mkdtemp_f)(char*);
296
static void uv__mkdtemp_initonce(void) {
297
uv__mkdtemp_f = (char* (*)(char*)) dlsym(RTLD_DEFAULT, "mkdtemp");
298
dlerror(); /* Ignore/cleanup dlsym errors. */
299
if (uv__mkdtemp_f == NULL) {
300
uv__mkdtemp_f = uv__mkdtemp_fallback;
301
}
302
}
303
static char* uv__mkdtemp(char *template)
304
{
305
static uv_once_t once = UV_ONCE_INIT;
306
uv_once(&once, uv__mkdtemp_initonce);
307
return uv__mkdtemp_f(template);
308
}
309
#else
310
#define uv__mkdtemp mkdtemp
311
#endif
312
313
static ssize_t uv__fs_mkdtemp(uv_fs_t* req) {
314
return uv__mkdtemp((char*) req->path) ? 0 : -1;
315
}
316
317
318
static int (*uv__mkostemp)(char*, int);
319
320
321
static void uv__mkostemp_initonce(void) {
322
/* z/os doesn't have RTLD_DEFAULT but that's okay
323
* because it doesn't have mkostemp(O_CLOEXEC) either.
324
*/
325
#ifdef RTLD_DEFAULT
326
uv__mkostemp = (int (*)(char*, int)) dlsym(RTLD_DEFAULT, "mkostemp");
327
328
/* We don't care about errors, but we do want to clean them up.
329
* If there has been no error, then dlerror() will just return
330
* NULL.
331
*/
332
dlerror();
333
#endif /* RTLD_DEFAULT */
334
}
335
336
337
static int uv__fs_mkstemp(uv_fs_t* req) {
338
static uv_once_t once = UV_ONCE_INIT;
339
int r;
340
#ifdef O_CLOEXEC
341
static int no_cloexec_support;
342
#endif
343
static const char pattern[] = "XXXXXX";
344
static const size_t pattern_size = sizeof(pattern) - 1;
345
char* path;
346
size_t path_length;
347
348
path = (char*) req->path;
349
path_length = strlen(path);
350
351
/* EINVAL can be returned for 2 reasons:
352
1. The template's last 6 characters were not XXXXXX
353
2. open() didn't support O_CLOEXEC
354
We want to avoid going to the fallback path in case
355
of 1, so it's manually checked before. */
356
if (path_length < pattern_size ||
357
strcmp(path + path_length - pattern_size, pattern)) {
358
errno = EINVAL;
359
r = -1;
360
goto clobber;
361
}
362
363
uv_once(&once, uv__mkostemp_initonce);
364
365
#ifdef O_CLOEXEC
366
if (uv__load_relaxed(&no_cloexec_support) == 0 && uv__mkostemp != NULL) {
367
r = uv__mkostemp(path, O_CLOEXEC);
368
369
if (r >= 0)
370
return r;
371
372
/* If mkostemp() returns EINVAL, it means the kernel doesn't
373
support O_CLOEXEC, so we just fallback to mkstemp() below. */
374
if (errno != EINVAL)
375
goto clobber;
376
377
/* We set the static variable so that next calls don't even
378
try to use mkostemp. */
379
uv__store_relaxed(&no_cloexec_support, 1);
380
}
381
#endif /* O_CLOEXEC */
382
383
if (req->cb != NULL)
384
uv_rwlock_rdlock(&req->loop->cloexec_lock);
385
386
r = mkstemp(path);
387
388
/* In case of failure `uv__cloexec` will leave error in `errno`,
389
* so it is enough to just set `r` to `-1`.
390
*/
391
if (r >= 0 && uv__cloexec(r, 1) != 0) {
392
r = uv__close(r);
393
if (r != 0)
394
abort();
395
r = -1;
396
}
397
398
if (req->cb != NULL)
399
uv_rwlock_rdunlock(&req->loop->cloexec_lock);
400
401
clobber:
402
if (r < 0)
403
path[0] = '\0';
404
return r;
405
}
406
407
408
static ssize_t uv__fs_open(uv_fs_t* req) {
409
#ifdef O_CLOEXEC
410
return open(req->path, req->flags | O_CLOEXEC, req->mode);
411
#else /* O_CLOEXEC */
412
int r;
413
414
if (req->cb != NULL)
415
uv_rwlock_rdlock(&req->loop->cloexec_lock);
416
417
r = open(req->path, req->flags, req->mode);
418
419
/* In case of failure `uv__cloexec` will leave error in `errno`,
420
* so it is enough to just set `r` to `-1`.
421
*/
422
if (r >= 0 && uv__cloexec(r, 1) != 0) {
423
r = uv__close(r);
424
if (r != 0)
425
abort();
426
r = -1;
427
}
428
429
if (req->cb != NULL)
430
uv_rwlock_rdunlock(&req->loop->cloexec_lock);
431
432
return r;
433
#endif /* O_CLOEXEC */
434
}
435
436
437
#if !HAVE_PREADV
438
static ssize_t uv__fs_preadv(uv_file fd,
439
uv_buf_t* bufs,
440
unsigned int nbufs,
441
off_t off) {
442
uv_buf_t* buf;
443
uv_buf_t* end;
444
ssize_t result;
445
ssize_t rc;
446
size_t pos;
447
448
assert(nbufs > 0);
449
450
result = 0;
451
pos = 0;
452
buf = bufs + 0;
453
end = bufs + nbufs;
454
455
for (;;) {
456
do
457
rc = pread(fd, buf->base + pos, buf->len - pos, off + result);
458
while (rc == -1 && errno == EINTR);
459
460
if (rc == 0)
461
break;
462
463
if (rc == -1 && result == 0)
464
return UV__ERR(errno);
465
466
if (rc == -1)
467
break; /* We read some data so return that, ignore the error. */
468
469
pos += rc;
470
result += rc;
471
472
if (pos < buf->len)
473
continue;
474
475
pos = 0;
476
buf += 1;
477
478
if (buf == end)
479
break;
480
}
481
482
return result;
483
}
484
#endif
485
486
487
static ssize_t uv__fs_read(uv_fs_t* req) {
488
#if defined(__linux__)
489
static int no_preadv;
490
#endif
491
unsigned int iovmax;
492
ssize_t result;
493
494
iovmax = uv__getiovmax();
495
if (req->nbufs > iovmax)
496
req->nbufs = iovmax;
497
498
if (req->off < 0) {
499
if (req->nbufs == 1)
500
result = read(req->file, req->bufs[0].base, req->bufs[0].len);
501
else
502
result = readv(req->file, (struct iovec*) req->bufs, req->nbufs);
503
} else {
504
if (req->nbufs == 1) {
505
result = pread(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
506
goto done;
507
}
508
509
#if HAVE_PREADV
510
result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
511
#else
512
# if defined(__linux__)
513
if (uv__load_relaxed(&no_preadv)) retry:
514
# endif
515
{
516
result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
517
}
518
# if defined(__linux__)
519
else {
520
result = uv__preadv(req->file,
521
(struct iovec*)req->bufs,
522
req->nbufs,
523
req->off);
524
if (result == -1 && errno == ENOSYS) {
525
uv__store_relaxed(&no_preadv, 1);
526
goto retry;
527
}
528
}
529
# endif
530
#endif
531
}
532
533
done:
534
/* Early cleanup of bufs allocation, since we're done with it. */
535
if (req->bufs != req->bufsml)
536
uv__free(req->bufs);
537
538
req->bufs = NULL;
539
req->nbufs = 0;
540
541
#ifdef __PASE__
542
/* PASE returns EOPNOTSUPP when reading a directory, convert to EISDIR */
543
if (result == -1 && errno == EOPNOTSUPP) {
544
struct stat buf;
545
ssize_t rc;
546
rc = fstat(req->file, &buf);
547
if (rc == 0 && S_ISDIR(buf.st_mode)) {
548
errno = EISDIR;
549
}
550
}
551
#endif
552
553
return result;
554
}
555
556
557
#if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8)
558
#define UV_CONST_DIRENT uv__dirent_t
559
#else
560
#define UV_CONST_DIRENT const uv__dirent_t
561
#endif
562
563
564
static int uv__fs_scandir_filter(UV_CONST_DIRENT* dent) {
565
return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
566
}
567
568
569
static int uv__fs_scandir_sort(UV_CONST_DIRENT** a, UV_CONST_DIRENT** b) {
570
return strcmp((*a)->d_name, (*b)->d_name);
571
}
572
573
574
static ssize_t uv__fs_scandir(uv_fs_t* req) {
575
uv__dirent_t** dents;
576
int n;
577
578
dents = NULL;
579
n = scandir(req->path, &dents, uv__fs_scandir_filter, uv__fs_scandir_sort);
580
581
/* NOTE: We will use nbufs as an index field */
582
req->nbufs = 0;
583
584
if (n == 0) {
585
/* OS X still needs to deallocate some memory.
586
* Memory was allocated using the system allocator, so use free() here.
587
*/
588
free(dents);
589
dents = NULL;
590
} else if (n == -1) {
591
return n;
592
}
593
594
req->ptr = dents;
595
596
return n;
597
}
598
599
static int uv__fs_opendir(uv_fs_t* req) {
600
uv_dir_t* dir;
601
602
dir = uv__malloc(sizeof(*dir));
603
if (dir == NULL)
604
goto error;
605
606
dir->dir = opendir(req->path);
607
if (dir->dir == NULL)
608
goto error;
609
610
req->ptr = dir;
611
return 0;
612
613
error:
614
uv__free(dir);
615
req->ptr = NULL;
616
return -1;
617
}
618
619
static int uv__fs_readdir(uv_fs_t* req) {
620
uv_dir_t* dir;
621
uv_dirent_t* dirent;
622
struct dirent* res;
623
unsigned int dirent_idx;
624
unsigned int i;
625
626
dir = req->ptr;
627
dirent_idx = 0;
628
629
while (dirent_idx < dir->nentries) {
630
/* readdir() returns NULL on end of directory, as well as on error. errno
631
is used to differentiate between the two conditions. */
632
errno = 0;
633
res = readdir(dir->dir);
634
635
if (res == NULL) {
636
if (errno != 0)
637
goto error;
638
break;
639
}
640
641
if (strcmp(res->d_name, ".") == 0 || strcmp(res->d_name, "..") == 0)
642
continue;
643
644
dirent = &dir->dirents[dirent_idx];
645
dirent->name = uv__strdup(res->d_name);
646
647
if (dirent->name == NULL)
648
goto error;
649
650
dirent->type = uv__fs_get_dirent_type(res);
651
++dirent_idx;
652
}
653
654
return dirent_idx;
655
656
error:
657
for (i = 0; i < dirent_idx; ++i) {
658
uv__free((char*) dir->dirents[i].name);
659
dir->dirents[i].name = NULL;
660
}
661
662
return -1;
663
}
664
665
static int uv__fs_closedir(uv_fs_t* req) {
666
uv_dir_t* dir;
667
668
dir = req->ptr;
669
670
if (dir->dir != NULL) {
671
closedir(dir->dir);
672
dir->dir = NULL;
673
}
674
675
uv__free(req->ptr);
676
req->ptr = NULL;
677
return 0;
678
}
679
680
static int uv__fs_statfs(uv_fs_t* req) {
681
uv_statfs_t* stat_fs;
682
#if defined(__sun) || \
683
defined(__MVS__) || \
684
defined(__NetBSD__) || \
685
defined(__HAIKU__) || \
686
defined(__QNX__)
687
struct statvfs buf;
688
689
if (0 != statvfs(req->path, &buf))
690
#else
691
struct statfs buf;
692
693
if (0 != statfs(req->path, &buf))
694
#endif /* defined(__sun) */
695
return -1;
696
697
stat_fs = uv__malloc(sizeof(*stat_fs));
698
if (stat_fs == NULL) {
699
errno = ENOMEM;
700
return -1;
701
}
702
703
#if defined(__sun) || \
704
defined(__MVS__) || \
705
defined(__OpenBSD__) || \
706
defined(__NetBSD__) || \
707
defined(__HAIKU__) || \
708
defined(__QNX__)
709
stat_fs->f_type = 0; /* f_type is not supported. */
710
#else
711
stat_fs->f_type = buf.f_type;
712
#endif
713
stat_fs->f_bsize = buf.f_bsize;
714
stat_fs->f_blocks = buf.f_blocks;
715
stat_fs->f_bfree = buf.f_bfree;
716
stat_fs->f_bavail = buf.f_bavail;
717
stat_fs->f_files = buf.f_files;
718
stat_fs->f_ffree = buf.f_ffree;
719
req->ptr = stat_fs;
720
return 0;
721
}
722
723
static ssize_t uv__fs_pathmax_size(const char* path) {
724
ssize_t pathmax;
725
726
pathmax = pathconf(path, _PC_PATH_MAX);
727
728
if (pathmax == -1)
729
pathmax = UV__PATH_MAX;
730
731
return pathmax;
732
}
733
734
static ssize_t uv__fs_readlink(uv_fs_t* req) {
735
ssize_t maxlen;
736
ssize_t len;
737
char* buf;
738
739
#if defined(_POSIX_PATH_MAX) || defined(PATH_MAX)
740
maxlen = uv__fs_pathmax_size(req->path);
741
#else
742
/* We may not have a real PATH_MAX. Read size of link. */
743
struct stat st;
744
int ret;
745
ret = lstat(req->path, &st);
746
if (ret != 0)
747
return -1;
748
if (!S_ISLNK(st.st_mode)) {
749
errno = EINVAL;
750
return -1;
751
}
752
753
maxlen = st.st_size;
754
755
/* According to readlink(2) lstat can report st_size == 0
756
for some symlinks, such as those in /proc or /sys. */
757
if (maxlen == 0)
758
maxlen = uv__fs_pathmax_size(req->path);
759
#endif
760
761
buf = uv__malloc(maxlen);
762
763
if (buf == NULL) {
764
errno = ENOMEM;
765
return -1;
766
}
767
768
#if defined(__MVS__)
769
len = os390_readlink(req->path, buf, maxlen);
770
#else
771
len = readlink(req->path, buf, maxlen);
772
#endif
773
774
if (len == -1) {
775
uv__free(buf);
776
return -1;
777
}
778
779
/* Uncommon case: resize to make room for the trailing nul byte. */
780
if (len == maxlen) {
781
buf = uv__reallocf(buf, len + 1);
782
783
if (buf == NULL)
784
return -1;
785
}
786
787
buf[len] = '\0';
788
req->ptr = buf;
789
790
return 0;
791
}
792
793
static ssize_t uv__fs_realpath(uv_fs_t* req) {
794
char* buf;
795
796
#if defined(_POSIX_VERSION) && _POSIX_VERSION >= 200809L
797
buf = realpath(req->path, NULL);
798
if (buf == NULL)
799
return -1;
800
#else
801
ssize_t len;
802
803
len = uv__fs_pathmax_size(req->path);
804
buf = uv__malloc(len + 1);
805
806
if (buf == NULL) {
807
errno = ENOMEM;
808
return -1;
809
}
810
811
if (realpath(req->path, buf) == NULL) {
812
uv__free(buf);
813
return -1;
814
}
815
#endif
816
817
req->ptr = buf;
818
819
return 0;
820
}
821
822
static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) {
823
struct pollfd pfd;
824
int use_pread;
825
off_t offset;
826
ssize_t nsent;
827
ssize_t nread;
828
ssize_t nwritten;
829
size_t buflen;
830
size_t len;
831
ssize_t n;
832
int in_fd;
833
int out_fd;
834
char buf[8192];
835
836
len = req->bufsml[0].len;
837
in_fd = req->flags;
838
out_fd = req->file;
839
offset = req->off;
840
use_pread = 1;
841
842
/* Here are the rules regarding errors:
843
*
844
* 1. Read errors are reported only if nsent==0, otherwise we return nsent.
845
* The user needs to know that some data has already been sent, to stop
846
* them from sending it twice.
847
*
848
* 2. Write errors are always reported. Write errors are bad because they
849
* mean data loss: we've read data but now we can't write it out.
850
*
851
* We try to use pread() and fall back to regular read() if the source fd
852
* doesn't support positional reads, for example when it's a pipe fd.
853
*
854
* If we get EAGAIN when writing to the target fd, we poll() on it until
855
* it becomes writable again.
856
*
857
* FIXME: If we get a write error when use_pread==1, it should be safe to
858
* return the number of sent bytes instead of an error because pread()
859
* is, in theory, idempotent. However, special files in /dev or /proc
860
* may support pread() but not necessarily return the same data on
861
* successive reads.
862
*
863
* FIXME: There is no way now to signal that we managed to send *some* data
864
* before a write error.
865
*/
866
for (nsent = 0; (size_t) nsent < len; ) {
867
buflen = len - nsent;
868
869
if (buflen > sizeof(buf))
870
buflen = sizeof(buf);
871
872
do
873
if (use_pread)
874
nread = pread(in_fd, buf, buflen, offset);
875
else
876
nread = read(in_fd, buf, buflen);
877
while (nread == -1 && errno == EINTR);
878
879
if (nread == 0)
880
goto out;
881
882
if (nread == -1) {
883
if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) {
884
use_pread = 0;
885
continue;
886
}
887
888
if (nsent == 0)
889
nsent = -1;
890
891
goto out;
892
}
893
894
for (nwritten = 0; nwritten < nread; ) {
895
do
896
n = write(out_fd, buf + nwritten, nread - nwritten);
897
while (n == -1 && errno == EINTR);
898
899
if (n != -1) {
900
nwritten += n;
901
continue;
902
}
903
904
if (errno != EAGAIN && errno != EWOULDBLOCK) {
905
nsent = -1;
906
goto out;
907
}
908
909
pfd.fd = out_fd;
910
pfd.events = POLLOUT;
911
pfd.revents = 0;
912
913
do
914
n = poll(&pfd, 1, -1);
915
while (n == -1 && errno == EINTR);
916
917
if (n == -1 || (pfd.revents & ~POLLOUT) != 0) {
918
errno = EIO;
919
nsent = -1;
920
goto out;
921
}
922
}
923
924
offset += nread;
925
nsent += nread;
926
}
927
928
out:
929
if (nsent != -1)
930
req->off = offset;
931
932
return nsent;
933
}
934
935
936
#ifdef __linux__
937
static unsigned uv__kernel_version(void) {
938
static unsigned cached_version;
939
struct utsname u;
940
unsigned version;
941
unsigned major;
942
unsigned minor;
943
unsigned patch;
944
945
version = uv__load_relaxed(&cached_version);
946
if (version != 0)
947
return version;
948
949
if (-1 == uname(&u))
950
return 0;
951
952
if (3 != sscanf(u.release, "%u.%u.%u", &major, &minor, &patch))
953
return 0;
954
955
version = major * 65536 + minor * 256 + patch;
956
uv__store_relaxed(&cached_version, version);
957
958
return version;
959
}
960
961
962
/* Pre-4.20 kernels have a bug where CephFS uses the RADOS copy-from command
963
* in copy_file_range() when it shouldn't. There is no workaround except to
964
* fall back to a regular copy.
965
*/
966
static int uv__is_buggy_cephfs(int fd) {
967
struct statfs s;
968
969
if (-1 == fstatfs(fd, &s))
970
return 0;
971
972
if (s.f_type != /* CephFS */ 0xC36400)
973
return 0;
974
975
return uv__kernel_version() < /* 4.20.0 */ 0x041400;
976
}
977
978
979
static int uv__is_cifs_or_smb(int fd) {
980
struct statfs s;
981
982
if (-1 == fstatfs(fd, &s))
983
return 0;
984
985
switch ((unsigned) s.f_type) {
986
case 0x0000517Bu: /* SMB */
987
case 0xFE534D42u: /* SMB2 */
988
case 0xFF534D42u: /* CIFS */
989
return 1;
990
}
991
992
return 0;
993
}
994
995
996
static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
997
int out_fd, size_t len) {
998
static int no_copy_file_range_support;
999
ssize_t r;
1000
1001
if (uv__load_relaxed(&no_copy_file_range_support)) {
1002
errno = ENOSYS;
1003
return -1;
1004
}
1005
1006
r = uv__fs_copy_file_range(in_fd, off, out_fd, NULL, len, 0);
1007
1008
if (r != -1)
1009
return r;
1010
1011
switch (errno) {
1012
case EACCES:
1013
/* Pre-4.20 kernels have a bug where CephFS uses the RADOS
1014
* copy-from command when it shouldn't.
1015
*/
1016
if (uv__is_buggy_cephfs(in_fd))
1017
errno = ENOSYS; /* Use fallback. */
1018
break;
1019
case ENOSYS:
1020
uv__store_relaxed(&no_copy_file_range_support, 1);
1021
break;
1022
case EPERM:
1023
/* It's been reported that CIFS spuriously fails.
1024
* Consider it a transient error.
1025
*/
1026
if (uv__is_cifs_or_smb(out_fd))
1027
errno = ENOSYS; /* Use fallback. */
1028
break;
1029
case ENOTSUP:
1030
case EXDEV:
1031
/* ENOTSUP - it could work on another file system type.
1032
* EXDEV - it will not work when in_fd and out_fd are not on the same
1033
* mounted filesystem (pre Linux 5.3)
1034
*/
1035
errno = ENOSYS; /* Use fallback. */
1036
break;
1037
}
1038
1039
return -1;
1040
}
1041
1042
#endif /* __linux__ */
1043
1044
1045
static ssize_t uv__fs_sendfile(uv_fs_t* req) {
1046
int in_fd;
1047
int out_fd;
1048
1049
in_fd = req->flags;
1050
out_fd = req->file;
1051
1052
#if defined(__linux__) || defined(__sun)
1053
{
1054
off_t off;
1055
ssize_t r;
1056
size_t len;
1057
int try_sendfile;
1058
1059
off = req->off;
1060
len = req->bufsml[0].len;
1061
1062
#ifdef __linux__
1063
r = uv__fs_try_copy_file_range(in_fd, &off, out_fd, len);
1064
try_sendfile = (r == -1 && errno == ENOSYS);
1065
#else
1066
try_sendfile = 1;
1067
#endif
1068
1069
if (try_sendfile)
1070
r = sendfile(out_fd, in_fd, &off, len);
1071
1072
/* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
1073
* it still writes out data. Fortunately, we can detect it by checking if
1074
* the offset has been updated.
1075
*/
1076
if (r != -1 || off > req->off) {
1077
r = off - req->off;
1078
req->off = off;
1079
return r;
1080
}
1081
1082
if (errno == EINVAL ||
1083
errno == EIO ||
1084
errno == ENOTSOCK ||
1085
errno == EXDEV) {
1086
errno = 0;
1087
return uv__fs_sendfile_emul(req);
1088
}
1089
1090
return -1;
1091
}
1092
#elif defined(__APPLE__) || \
1093
defined(__DragonFly__) || \
1094
defined(__FreeBSD__) || \
1095
defined(__FreeBSD_kernel__)
1096
{
1097
off_t len;
1098
ssize_t r;
1099
1100
/* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in
1101
* non-blocking mode and not all data could be written. If a non-zero
1102
* number of bytes have been sent, we don't consider it an error.
1103
*/
1104
1105
#if defined(__FreeBSD__) || defined(__DragonFly__)
1106
#if defined(__FreeBSD__)
1107
off_t off;
1108
1109
off = req->off;
1110
r = uv__fs_copy_file_range(in_fd, &off, out_fd, NULL, req->bufsml[0].len, 0);
1111
if (r >= 0) {
1112
r = off - req->off;
1113
req->off = off;
1114
return r;
1115
}
1116
#endif
1117
len = 0;
1118
r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
1119
#elif defined(__FreeBSD_kernel__)
1120
len = 0;
1121
r = bsd_sendfile(in_fd,
1122
out_fd,
1123
req->off,
1124
req->bufsml[0].len,
1125
NULL,
1126
&len,
1127
0);
1128
#else
1129
/* The darwin sendfile takes len as an input for the length to send,
1130
* so make sure to initialize it with the caller's value. */
1131
len = req->bufsml[0].len;
1132
r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
1133
#endif
1134
1135
/*
1136
* The man page for sendfile(2) on DragonFly states that `len` contains
1137
* a meaningful value ONLY in case of EAGAIN and EINTR.
1138
* Nothing is said about it's value in case of other errors, so better
1139
* not depend on the potential wrong assumption that is was not modified
1140
* by the syscall.
1141
*/
1142
if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) {
1143
req->off += len;
1144
return (ssize_t) len;
1145
}
1146
1147
if (errno == EINVAL ||
1148
errno == EIO ||
1149
errno == ENOTSOCK ||
1150
errno == EXDEV) {
1151
errno = 0;
1152
return uv__fs_sendfile_emul(req);
1153
}
1154
1155
return -1;
1156
}
1157
#else
1158
/* Squelch compiler warnings. */
1159
(void) &in_fd;
1160
(void) &out_fd;
1161
1162
return uv__fs_sendfile_emul(req);
1163
#endif
1164
}
1165
1166
1167
static ssize_t uv__fs_utime(uv_fs_t* req) {
1168
#if defined(__linux__) \
1169
|| defined(_AIX71) \
1170
|| defined(__sun) \
1171
|| defined(__HAIKU__)
1172
struct timespec ts[2];
1173
ts[0] = uv__fs_to_timespec(req->atime);
1174
ts[1] = uv__fs_to_timespec(req->mtime);
1175
return utimensat(AT_FDCWD, req->path, ts, 0);
1176
#elif defined(__APPLE__) \
1177
|| defined(__DragonFly__) \
1178
|| defined(__FreeBSD__) \
1179
|| defined(__FreeBSD_kernel__) \
1180
|| defined(__NetBSD__) \
1181
|| defined(__OpenBSD__)
1182
struct timeval tv[2];
1183
tv[0] = uv__fs_to_timeval(req->atime);
1184
tv[1] = uv__fs_to_timeval(req->mtime);
1185
return utimes(req->path, tv);
1186
#elif defined(_AIX) \
1187
&& !defined(_AIX71)
1188
struct utimbuf buf;
1189
buf.actime = req->atime;
1190
buf.modtime = req->mtime;
1191
return utime(req->path, &buf);
1192
#elif defined(__MVS__)
1193
attrib_t atr;
1194
memset(&atr, 0, sizeof(atr));
1195
atr.att_mtimechg = 1;
1196
atr.att_atimechg = 1;
1197
atr.att_mtime = req->mtime;
1198
atr.att_atime = req->atime;
1199
return __lchattr((char*) req->path, &atr, sizeof(atr));
1200
#else
1201
errno = ENOSYS;
1202
return -1;
1203
#endif
1204
}
1205
1206
1207
static ssize_t uv__fs_lutime(uv_fs_t* req) {
1208
#if defined(__linux__) || \
1209
defined(_AIX71) || \
1210
defined(__sun) || \
1211
defined(__HAIKU__) || \
1212
defined(__GNU__) || \
1213
defined(__OpenBSD__)
1214
struct timespec ts[2];
1215
ts[0] = uv__fs_to_timespec(req->atime);
1216
ts[1] = uv__fs_to_timespec(req->mtime);
1217
return utimensat(AT_FDCWD, req->path, ts, AT_SYMLINK_NOFOLLOW);
1218
#elif defined(__APPLE__) || \
1219
defined(__DragonFly__) || \
1220
defined(__FreeBSD__) || \
1221
defined(__FreeBSD_kernel__) || \
1222
defined(__NetBSD__)
1223
struct timeval tv[2];
1224
tv[0] = uv__fs_to_timeval(req->atime);
1225
tv[1] = uv__fs_to_timeval(req->mtime);
1226
return lutimes(req->path, tv);
1227
#else
1228
errno = ENOSYS;
1229
return -1;
1230
#endif
1231
}
1232
1233
1234
static ssize_t uv__fs_write(uv_fs_t* req) {
1235
#if defined(__linux__)
1236
static int no_pwritev;
1237
#endif
1238
ssize_t r;
1239
1240
/* Serialize writes on OS X, concurrent write() and pwrite() calls result in
1241
* data loss. We can't use a per-file descriptor lock, the descriptor may be
1242
* a dup().
1243
*/
1244
#if defined(__APPLE__)
1245
static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
1246
1247
if (pthread_mutex_lock(&lock))
1248
abort();
1249
#endif
1250
1251
if (req->off < 0) {
1252
if (req->nbufs == 1)
1253
r = write(req->file, req->bufs[0].base, req->bufs[0].len);
1254
else
1255
r = writev(req->file, (struct iovec*) req->bufs, req->nbufs);
1256
} else {
1257
if (req->nbufs == 1) {
1258
r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
1259
goto done;
1260
}
1261
#if HAVE_PREADV
1262
r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
1263
#else
1264
# if defined(__linux__)
1265
if (no_pwritev) retry:
1266
# endif
1267
{
1268
r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
1269
}
1270
# if defined(__linux__)
1271
else {
1272
r = uv__pwritev(req->file,
1273
(struct iovec*) req->bufs,
1274
req->nbufs,
1275
req->off);
1276
if (r == -1 && errno == ENOSYS) {
1277
no_pwritev = 1;
1278
goto retry;
1279
}
1280
}
1281
# endif
1282
#endif
1283
}
1284
1285
done:
1286
#if defined(__APPLE__)
1287
if (pthread_mutex_unlock(&lock))
1288
abort();
1289
#endif
1290
1291
return r;
1292
}
1293
1294
static ssize_t uv__fs_copyfile(uv_fs_t* req) {
1295
uv_fs_t fs_req;
1296
uv_file srcfd;
1297
uv_file dstfd;
1298
struct stat src_statsbuf;
1299
struct stat dst_statsbuf;
1300
int dst_flags;
1301
int result;
1302
int err;
1303
off_t bytes_to_send;
1304
off_t in_offset;
1305
off_t bytes_written;
1306
size_t bytes_chunk;
1307
1308
dstfd = -1;
1309
err = 0;
1310
1311
/* Open the source file. */
1312
srcfd = uv_fs_open(NULL, &fs_req, req->path, O_RDONLY, 0, NULL);
1313
uv_fs_req_cleanup(&fs_req);
1314
1315
if (srcfd < 0)
1316
return srcfd;
1317
1318
/* Get the source file's mode. */
1319
if (fstat(srcfd, &src_statsbuf)) {
1320
err = UV__ERR(errno);
1321
goto out;
1322
}
1323
1324
dst_flags = O_WRONLY | O_CREAT;
1325
1326
if (req->flags & UV_FS_COPYFILE_EXCL)
1327
dst_flags |= O_EXCL;
1328
1329
/* Open the destination file. */
1330
dstfd = uv_fs_open(NULL,
1331
&fs_req,
1332
req->new_path,
1333
dst_flags,
1334
src_statsbuf.st_mode,
1335
NULL);
1336
uv_fs_req_cleanup(&fs_req);
1337
1338
if (dstfd < 0) {
1339
err = dstfd;
1340
goto out;
1341
}
1342
1343
/* If the file is not being opened exclusively, verify that the source and
1344
destination are not the same file. If they are the same, bail out early. */
1345
if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) {
1346
/* Get the destination file's mode. */
1347
if (fstat(dstfd, &dst_statsbuf)) {
1348
err = UV__ERR(errno);
1349
goto out;
1350
}
1351
1352
/* Check if srcfd and dstfd refer to the same file */
1353
if (src_statsbuf.st_dev == dst_statsbuf.st_dev &&
1354
src_statsbuf.st_ino == dst_statsbuf.st_ino) {
1355
goto out;
1356
}
1357
1358
/* Truncate the file in case the destination already existed. */
1359
if (ftruncate(dstfd, 0) != 0) {
1360
err = UV__ERR(errno);
1361
goto out;
1362
}
1363
}
1364
1365
if (fchmod(dstfd, src_statsbuf.st_mode) == -1) {
1366
err = UV__ERR(errno);
1367
#ifdef __linux__
1368
/* fchmod() on CIFS shares always fails with EPERM unless the share is
1369
* mounted with "noperm". As fchmod() is a meaningless operation on such
1370
* shares anyway, detect that condition and squelch the error.
1371
*/
1372
if (err != UV_EPERM)
1373
goto out;
1374
1375
if (!uv__is_cifs_or_smb(dstfd))
1376
goto out;
1377
1378
err = 0;
1379
#else /* !__linux__ */
1380
goto out;
1381
#endif /* !__linux__ */
1382
}
1383
1384
#ifdef FICLONE
1385
if (req->flags & UV_FS_COPYFILE_FICLONE ||
1386
req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1387
if (ioctl(dstfd, FICLONE, srcfd) == 0) {
1388
/* ioctl() with FICLONE succeeded. */
1389
goto out;
1390
}
1391
/* If an error occurred and force was set, return the error to the caller;
1392
* fall back to sendfile() when force was not set. */
1393
if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1394
err = UV__ERR(errno);
1395
goto out;
1396
}
1397
}
1398
#else
1399
if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1400
err = UV_ENOSYS;
1401
goto out;
1402
}
1403
#endif
1404
1405
bytes_to_send = src_statsbuf.st_size;
1406
in_offset = 0;
1407
while (bytes_to_send != 0) {
1408
bytes_chunk = SSIZE_MAX;
1409
if (bytes_to_send < (off_t) bytes_chunk)
1410
bytes_chunk = bytes_to_send;
1411
uv_fs_sendfile(NULL, &fs_req, dstfd, srcfd, in_offset, bytes_chunk, NULL);
1412
bytes_written = fs_req.result;
1413
uv_fs_req_cleanup(&fs_req);
1414
1415
if (bytes_written < 0) {
1416
err = bytes_written;
1417
break;
1418
}
1419
1420
bytes_to_send -= bytes_written;
1421
in_offset += bytes_written;
1422
}
1423
1424
out:
1425
if (err < 0)
1426
result = err;
1427
else
1428
result = 0;
1429
1430
/* Close the source file. */
1431
err = uv__close_nocheckstdio(srcfd);
1432
1433
/* Don't overwrite any existing errors. */
1434
if (err != 0 && result == 0)
1435
result = err;
1436
1437
/* Close the destination file if it is open. */
1438
if (dstfd >= 0) {
1439
err = uv__close_nocheckstdio(dstfd);
1440
1441
/* Don't overwrite any existing errors. */
1442
if (err != 0 && result == 0)
1443
result = err;
1444
1445
/* Remove the destination file if something went wrong. */
1446
if (result != 0) {
1447
uv_fs_unlink(NULL, &fs_req, req->new_path, NULL);
1448
/* Ignore the unlink return value, as an error already happened. */
1449
uv_fs_req_cleanup(&fs_req);
1450
}
1451
}
1452
1453
if (result == 0)
1454
return 0;
1455
1456
errno = UV__ERR(result);
1457
return -1;
1458
}
1459
1460
static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
1461
dst->st_dev = src->st_dev;
1462
dst->st_mode = src->st_mode;
1463
dst->st_nlink = src->st_nlink;
1464
dst->st_uid = src->st_uid;
1465
dst->st_gid = src->st_gid;
1466
dst->st_rdev = src->st_rdev;
1467
dst->st_ino = src->st_ino;
1468
dst->st_size = src->st_size;
1469
dst->st_blksize = src->st_blksize;
1470
dst->st_blocks = src->st_blocks;
1471
1472
#if defined(__APPLE__)
1473
dst->st_atim.tv_sec = src->st_atimespec.tv_sec;
1474
dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec;
1475
dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec;
1476
dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec;
1477
dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec;
1478
dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec;
1479
dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec;
1480
dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec;
1481
dst->st_flags = src->st_flags;
1482
dst->st_gen = src->st_gen;
1483
#elif defined(__ANDROID__)
1484
dst->st_atim.tv_sec = src->st_atime;
1485
dst->st_atim.tv_nsec = src->st_atimensec;
1486
dst->st_mtim.tv_sec = src->st_mtime;
1487
dst->st_mtim.tv_nsec = src->st_mtimensec;
1488
dst->st_ctim.tv_sec = src->st_ctime;
1489
dst->st_ctim.tv_nsec = src->st_ctimensec;
1490
dst->st_birthtim.tv_sec = src->st_ctime;
1491
dst->st_birthtim.tv_nsec = src->st_ctimensec;
1492
dst->st_flags = 0;
1493
dst->st_gen = 0;
1494
#elif !defined(_AIX) && \
1495
!defined(__MVS__) && ( \
1496
defined(__DragonFly__) || \
1497
defined(__FreeBSD__) || \
1498
defined(__OpenBSD__) || \
1499
defined(__NetBSD__) || \
1500
defined(_GNU_SOURCE) || \
1501
defined(_BSD_SOURCE) || \
1502
defined(_SVID_SOURCE) || \
1503
defined(_XOPEN_SOURCE) || \
1504
defined(_DEFAULT_SOURCE))
1505
dst->st_atim.tv_sec = src->st_atim.tv_sec;
1506
dst->st_atim.tv_nsec = src->st_atim.tv_nsec;
1507
dst->st_mtim.tv_sec = src->st_mtim.tv_sec;
1508
dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec;
1509
dst->st_ctim.tv_sec = src->st_ctim.tv_sec;
1510
dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec;
1511
# if defined(__FreeBSD__) || \
1512
defined(__NetBSD__)
1513
dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec;
1514
dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec;
1515
dst->st_flags = src->st_flags;
1516
dst->st_gen = src->st_gen;
1517
# else
1518
dst->st_birthtim.tv_sec = src->st_ctim.tv_sec;
1519
dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec;
1520
dst->st_flags = 0;
1521
dst->st_gen = 0;
1522
# endif
1523
#else
1524
dst->st_atim.tv_sec = src->st_atime;
1525
dst->st_atim.tv_nsec = 0;
1526
dst->st_mtim.tv_sec = src->st_mtime;
1527
dst->st_mtim.tv_nsec = 0;
1528
dst->st_ctim.tv_sec = src->st_ctime;
1529
dst->st_ctim.tv_nsec = 0;
1530
dst->st_birthtim.tv_sec = src->st_ctime;
1531
dst->st_birthtim.tv_nsec = 0;
1532
dst->st_flags = 0;
1533
dst->st_gen = 0;
1534
#endif
1535
}
1536
1537
1538
static int uv__fs_statx(int fd,
1539
const char* path,
1540
int is_fstat,
1541
int is_lstat,
1542
uv_stat_t* buf) {
1543
STATIC_ASSERT(UV_ENOSYS != -1);
1544
#ifdef __linux__
1545
static int no_statx;
1546
struct uv__statx statxbuf;
1547
int dirfd;
1548
int flags;
1549
int mode;
1550
int rc;
1551
1552
if (uv__load_relaxed(&no_statx))
1553
return UV_ENOSYS;
1554
1555
dirfd = AT_FDCWD;
1556
flags = 0; /* AT_STATX_SYNC_AS_STAT */
1557
mode = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */
1558
1559
if (is_fstat) {
1560
dirfd = fd;
1561
flags |= 0x1000; /* AT_EMPTY_PATH */
1562
}
1563
1564
if (is_lstat)
1565
flags |= AT_SYMLINK_NOFOLLOW;
1566
1567
rc = uv__statx(dirfd, path, flags, mode, &statxbuf);
1568
1569
switch (rc) {
1570
case 0:
1571
break;
1572
case -1:
1573
/* EPERM happens when a seccomp filter rejects the system call.
1574
* Has been observed with libseccomp < 2.3.3 and docker < 18.04.
1575
* EOPNOTSUPP is used on DVS exported filesystems
1576
*/
1577
if (errno != EINVAL && errno != EPERM && errno != ENOSYS && errno != EOPNOTSUPP)
1578
return -1;
1579
/* Fall through. */
1580
default:
1581
/* Normally on success, zero is returned and On error, -1 is returned.
1582
* Observed on S390 RHEL running in a docker container with statx not
1583
* implemented, rc might return 1 with 0 set as the error code in which
1584
* case we return ENOSYS.
1585
*/
1586
uv__store_relaxed(&no_statx, 1);
1587
return UV_ENOSYS;
1588
}
1589
1590
buf->st_dev = makedev(statxbuf.stx_dev_major, statxbuf.stx_dev_minor);
1591
buf->st_mode = statxbuf.stx_mode;
1592
buf->st_nlink = statxbuf.stx_nlink;
1593
buf->st_uid = statxbuf.stx_uid;
1594
buf->st_gid = statxbuf.stx_gid;
1595
buf->st_rdev = makedev(statxbuf.stx_rdev_major, statxbuf.stx_rdev_minor);
1596
buf->st_ino = statxbuf.stx_ino;
1597
buf->st_size = statxbuf.stx_size;
1598
buf->st_blksize = statxbuf.stx_blksize;
1599
buf->st_blocks = statxbuf.stx_blocks;
1600
buf->st_atim.tv_sec = statxbuf.stx_atime.tv_sec;
1601
buf->st_atim.tv_nsec = statxbuf.stx_atime.tv_nsec;
1602
buf->st_mtim.tv_sec = statxbuf.stx_mtime.tv_sec;
1603
buf->st_mtim.tv_nsec = statxbuf.stx_mtime.tv_nsec;
1604
buf->st_ctim.tv_sec = statxbuf.stx_ctime.tv_sec;
1605
buf->st_ctim.tv_nsec = statxbuf.stx_ctime.tv_nsec;
1606
buf->st_birthtim.tv_sec = statxbuf.stx_btime.tv_sec;
1607
buf->st_birthtim.tv_nsec = statxbuf.stx_btime.tv_nsec;
1608
buf->st_flags = 0;
1609
buf->st_gen = 0;
1610
1611
return 0;
1612
#else
1613
return UV_ENOSYS;
1614
#endif /* __linux__ */
1615
}
1616
1617
1618
static int uv__fs_stat(const char *path, uv_stat_t *buf) {
1619
struct stat pbuf;
1620
int ret;
1621
1622
ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 0, buf);
1623
if (ret != UV_ENOSYS)
1624
return ret;
1625
1626
ret = stat(path, &pbuf);
1627
if (ret == 0)
1628
uv__to_stat(&pbuf, buf);
1629
1630
return ret;
1631
}
1632
1633
1634
static int uv__fs_lstat(const char *path, uv_stat_t *buf) {
1635
struct stat pbuf;
1636
int ret;
1637
1638
ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 1, buf);
1639
if (ret != UV_ENOSYS)
1640
return ret;
1641
1642
ret = lstat(path, &pbuf);
1643
if (ret == 0)
1644
uv__to_stat(&pbuf, buf);
1645
1646
return ret;
1647
}
1648
1649
1650
static int uv__fs_fstat(int fd, uv_stat_t *buf) {
1651
struct stat pbuf;
1652
int ret;
1653
1654
ret = uv__fs_statx(fd, "", /* is_fstat */ 1, /* is_lstat */ 0, buf);
1655
if (ret != UV_ENOSYS)
1656
return ret;
1657
1658
ret = fstat(fd, &pbuf);
1659
if (ret == 0)
1660
uv__to_stat(&pbuf, buf);
1661
1662
return ret;
1663
}
1664
1665
static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
1666
size_t offset;
1667
/* Figure out which bufs are done */
1668
for (offset = 0; size > 0 && bufs[offset].len <= size; ++offset)
1669
size -= bufs[offset].len;
1670
1671
/* Fix a partial read/write */
1672
if (size > 0) {
1673
bufs[offset].base += size;
1674
bufs[offset].len -= size;
1675
}
1676
return offset;
1677
}
1678
1679
static ssize_t uv__fs_write_all(uv_fs_t* req) {
1680
unsigned int iovmax;
1681
unsigned int nbufs;
1682
uv_buf_t* bufs;
1683
ssize_t total;
1684
ssize_t result;
1685
1686
iovmax = uv__getiovmax();
1687
nbufs = req->nbufs;
1688
bufs = req->bufs;
1689
total = 0;
1690
1691
while (nbufs > 0) {
1692
req->nbufs = nbufs;
1693
if (req->nbufs > iovmax)
1694
req->nbufs = iovmax;
1695
1696
do
1697
result = uv__fs_write(req);
1698
while (result < 0 && errno == EINTR);
1699
1700
if (result <= 0) {
1701
if (total == 0)
1702
total = result;
1703
break;
1704
}
1705
1706
if (req->off >= 0)
1707
req->off += result;
1708
1709
req->nbufs = uv__fs_buf_offset(req->bufs, result);
1710
req->bufs += req->nbufs;
1711
nbufs -= req->nbufs;
1712
total += result;
1713
}
1714
1715
if (bufs != req->bufsml)
1716
uv__free(bufs);
1717
1718
req->bufs = NULL;
1719
req->nbufs = 0;
1720
1721
return total;
1722
}
1723
1724
1725
static void uv__fs_work(struct uv__work* w) {
1726
int retry_on_eintr;
1727
uv_fs_t* req;
1728
ssize_t r;
1729
1730
req = container_of(w, uv_fs_t, work_req);
1731
retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
1732
req->fs_type == UV_FS_READ);
1733
1734
do {
1735
errno = 0;
1736
1737
#define X(type, action) \
1738
case UV_FS_ ## type: \
1739
r = action; \
1740
break;
1741
1742
switch (req->fs_type) {
1743
X(ACCESS, access(req->path, req->flags));
1744
X(CHMOD, chmod(req->path, req->mode));
1745
X(CHOWN, chown(req->path, req->uid, req->gid));
1746
X(CLOSE, uv__fs_close(req->file));
1747
X(COPYFILE, uv__fs_copyfile(req));
1748
X(FCHMOD, fchmod(req->file, req->mode));
1749
X(FCHOWN, fchown(req->file, req->uid, req->gid));
1750
X(LCHOWN, lchown(req->path, req->uid, req->gid));
1751
X(FDATASYNC, uv__fs_fdatasync(req));
1752
X(FSTAT, uv__fs_fstat(req->file, &req->statbuf));
1753
X(FSYNC, uv__fs_fsync(req));
1754
X(FTRUNCATE, ftruncate(req->file, req->off));
1755
X(FUTIME, uv__fs_futime(req));
1756
X(LUTIME, uv__fs_lutime(req));
1757
X(LSTAT, uv__fs_lstat(req->path, &req->statbuf));
1758
X(LINK, link(req->path, req->new_path));
1759
X(MKDIR, mkdir(req->path, req->mode));
1760
X(MKDTEMP, uv__fs_mkdtemp(req));
1761
X(MKSTEMP, uv__fs_mkstemp(req));
1762
X(OPEN, uv__fs_open(req));
1763
X(READ, uv__fs_read(req));
1764
X(SCANDIR, uv__fs_scandir(req));
1765
X(OPENDIR, uv__fs_opendir(req));
1766
X(READDIR, uv__fs_readdir(req));
1767
X(CLOSEDIR, uv__fs_closedir(req));
1768
X(READLINK, uv__fs_readlink(req));
1769
X(REALPATH, uv__fs_realpath(req));
1770
X(RENAME, rename(req->path, req->new_path));
1771
X(RMDIR, rmdir(req->path));
1772
X(SENDFILE, uv__fs_sendfile(req));
1773
X(STAT, uv__fs_stat(req->path, &req->statbuf));
1774
X(STATFS, uv__fs_statfs(req));
1775
X(SYMLINK, symlink(req->path, req->new_path));
1776
X(UNLINK, unlink(req->path));
1777
X(UTIME, uv__fs_utime(req));
1778
X(WRITE, uv__fs_write_all(req));
1779
default: abort();
1780
}
1781
#undef X
1782
} while (r == -1 && errno == EINTR && retry_on_eintr);
1783
1784
if (r == -1)
1785
req->result = UV__ERR(errno);
1786
else
1787
req->result = r;
1788
1789
if (r == 0 && (req->fs_type == UV_FS_STAT ||
1790
req->fs_type == UV_FS_FSTAT ||
1791
req->fs_type == UV_FS_LSTAT)) {
1792
req->ptr = &req->statbuf;
1793
}
1794
}
1795
1796
1797
static void uv__fs_done(struct uv__work* w, int status) {
1798
uv_fs_t* req;
1799
1800
req = container_of(w, uv_fs_t, work_req);
1801
uv__req_unregister(req->loop, req);
1802
1803
if (status == UV_ECANCELED) {
1804
assert(req->result == 0);
1805
req->result = UV_ECANCELED;
1806
}
1807
1808
req->cb(req);
1809
}
1810
1811
1812
int uv_fs_access(uv_loop_t* loop,
1813
uv_fs_t* req,
1814
const char* path,
1815
int flags,
1816
uv_fs_cb cb) {
1817
INIT(ACCESS);
1818
PATH;
1819
req->flags = flags;
1820
POST;
1821
}
1822
1823
1824
int uv_fs_chmod(uv_loop_t* loop,
1825
uv_fs_t* req,
1826
const char* path,
1827
int mode,
1828
uv_fs_cb cb) {
1829
INIT(CHMOD);
1830
PATH;
1831
req->mode = mode;
1832
POST;
1833
}
1834
1835
1836
int uv_fs_chown(uv_loop_t* loop,
1837
uv_fs_t* req,
1838
const char* path,
1839
uv_uid_t uid,
1840
uv_gid_t gid,
1841
uv_fs_cb cb) {
1842
INIT(CHOWN);
1843
PATH;
1844
req->uid = uid;
1845
req->gid = gid;
1846
POST;
1847
}
1848
1849
1850
int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1851
INIT(CLOSE);
1852
req->file = file;
1853
POST;
1854
}
1855
1856
1857
int uv_fs_fchmod(uv_loop_t* loop,
1858
uv_fs_t* req,
1859
uv_file file,
1860
int mode,
1861
uv_fs_cb cb) {
1862
INIT(FCHMOD);
1863
req->file = file;
1864
req->mode = mode;
1865
POST;
1866
}
1867
1868
1869
int uv_fs_fchown(uv_loop_t* loop,
1870
uv_fs_t* req,
1871
uv_file file,
1872
uv_uid_t uid,
1873
uv_gid_t gid,
1874
uv_fs_cb cb) {
1875
INIT(FCHOWN);
1876
req->file = file;
1877
req->uid = uid;
1878
req->gid = gid;
1879
POST;
1880
}
1881
1882
1883
int uv_fs_lchown(uv_loop_t* loop,
1884
uv_fs_t* req,
1885
const char* path,
1886
uv_uid_t uid,
1887
uv_gid_t gid,
1888
uv_fs_cb cb) {
1889
INIT(LCHOWN);
1890
PATH;
1891
req->uid = uid;
1892
req->gid = gid;
1893
POST;
1894
}
1895
1896
1897
int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1898
INIT(FDATASYNC);
1899
req->file = file;
1900
POST;
1901
}
1902
1903
1904
int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1905
INIT(FSTAT);
1906
req->file = file;
1907
POST;
1908
}
1909
1910
1911
int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1912
INIT(FSYNC);
1913
req->file = file;
1914
POST;
1915
}
1916
1917
1918
int uv_fs_ftruncate(uv_loop_t* loop,
1919
uv_fs_t* req,
1920
uv_file file,
1921
int64_t off,
1922
uv_fs_cb cb) {
1923
INIT(FTRUNCATE);
1924
req->file = file;
1925
req->off = off;
1926
POST;
1927
}
1928
1929
1930
int uv_fs_futime(uv_loop_t* loop,
1931
uv_fs_t* req,
1932
uv_file file,
1933
double atime,
1934
double mtime,
1935
uv_fs_cb cb) {
1936
INIT(FUTIME);
1937
req->file = file;
1938
req->atime = atime;
1939
req->mtime = mtime;
1940
POST;
1941
}
1942
1943
int uv_fs_lutime(uv_loop_t* loop,
1944
uv_fs_t* req,
1945
const char* path,
1946
double atime,
1947
double mtime,
1948
uv_fs_cb cb) {
1949
INIT(LUTIME);
1950
PATH;
1951
req->atime = atime;
1952
req->mtime = mtime;
1953
POST;
1954
}
1955
1956
1957
int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1958
INIT(LSTAT);
1959
PATH;
1960
POST;
1961
}
1962
1963
1964
int uv_fs_link(uv_loop_t* loop,
1965
uv_fs_t* req,
1966
const char* path,
1967
const char* new_path,
1968
uv_fs_cb cb) {
1969
INIT(LINK);
1970
PATH2;
1971
POST;
1972
}
1973
1974
1975
int uv_fs_mkdir(uv_loop_t* loop,
1976
uv_fs_t* req,
1977
const char* path,
1978
int mode,
1979
uv_fs_cb cb) {
1980
INIT(MKDIR);
1981
PATH;
1982
req->mode = mode;
1983
POST;
1984
}
1985
1986
1987
int uv_fs_mkdtemp(uv_loop_t* loop,
1988
uv_fs_t* req,
1989
const char* tpl,
1990
uv_fs_cb cb) {
1991
INIT(MKDTEMP);
1992
req->path = uv__strdup(tpl);
1993
if (req->path == NULL)
1994
return UV_ENOMEM;
1995
POST;
1996
}
1997
1998
1999
int uv_fs_mkstemp(uv_loop_t* loop,
2000
uv_fs_t* req,
2001
const char* tpl,
2002
uv_fs_cb cb) {
2003
INIT(MKSTEMP);
2004
req->path = uv__strdup(tpl);
2005
if (req->path == NULL)
2006
return UV_ENOMEM;
2007
POST;
2008
}
2009
2010
2011
int uv_fs_open(uv_loop_t* loop,
2012
uv_fs_t* req,
2013
const char* path,
2014
int flags,
2015
int mode,
2016
uv_fs_cb cb) {
2017
INIT(OPEN);
2018
PATH;
2019
req->flags = flags;
2020
req->mode = mode;
2021
POST;
2022
}
2023
2024
2025
int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
2026
uv_file file,
2027
const uv_buf_t bufs[],
2028
unsigned int nbufs,
2029
int64_t off,
2030
uv_fs_cb cb) {
2031
INIT(READ);
2032
2033
if (bufs == NULL || nbufs == 0)
2034
return UV_EINVAL;
2035
2036
req->file = file;
2037
2038
req->nbufs = nbufs;
2039
req->bufs = req->bufsml;
2040
if (nbufs > ARRAY_SIZE(req->bufsml))
2041
req->bufs = uv__malloc(nbufs * sizeof(*bufs));
2042
2043
if (req->bufs == NULL)
2044
return UV_ENOMEM;
2045
2046
memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
2047
2048
req->off = off;
2049
POST;
2050
}
2051
2052
2053
int uv_fs_scandir(uv_loop_t* loop,
2054
uv_fs_t* req,
2055
const char* path,
2056
int flags,
2057
uv_fs_cb cb) {
2058
INIT(SCANDIR);
2059
PATH;
2060
req->flags = flags;
2061
POST;
2062
}
2063
2064
int uv_fs_opendir(uv_loop_t* loop,
2065
uv_fs_t* req,
2066
const char* path,
2067
uv_fs_cb cb) {
2068
INIT(OPENDIR);
2069
PATH;
2070
POST;
2071
}
2072
2073
int uv_fs_readdir(uv_loop_t* loop,
2074
uv_fs_t* req,
2075
uv_dir_t* dir,
2076
uv_fs_cb cb) {
2077
INIT(READDIR);
2078
2079
if (dir == NULL || dir->dir == NULL || dir->dirents == NULL)
2080
return UV_EINVAL;
2081
2082
req->ptr = dir;
2083
POST;
2084
}
2085
2086
int uv_fs_closedir(uv_loop_t* loop,
2087
uv_fs_t* req,
2088
uv_dir_t* dir,
2089
uv_fs_cb cb) {
2090
INIT(CLOSEDIR);
2091
2092
if (dir == NULL)
2093
return UV_EINVAL;
2094
2095
req->ptr = dir;
2096
POST;
2097
}
2098
2099
int uv_fs_readlink(uv_loop_t* loop,
2100
uv_fs_t* req,
2101
const char* path,
2102
uv_fs_cb cb) {
2103
INIT(READLINK);
2104
PATH;
2105
POST;
2106
}
2107
2108
2109
int uv_fs_realpath(uv_loop_t* loop,
2110
uv_fs_t* req,
2111
const char * path,
2112
uv_fs_cb cb) {
2113
INIT(REALPATH);
2114
PATH;
2115
POST;
2116
}
2117
2118
2119
int uv_fs_rename(uv_loop_t* loop,
2120
uv_fs_t* req,
2121
const char* path,
2122
const char* new_path,
2123
uv_fs_cb cb) {
2124
INIT(RENAME);
2125
PATH2;
2126
POST;
2127
}
2128
2129
2130
int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2131
INIT(RMDIR);
2132
PATH;
2133
POST;
2134
}
2135
2136
2137
int uv_fs_sendfile(uv_loop_t* loop,
2138
uv_fs_t* req,
2139
uv_file out_fd,
2140
uv_file in_fd,
2141
int64_t off,
2142
size_t len,
2143
uv_fs_cb cb) {
2144
INIT(SENDFILE);
2145
req->flags = in_fd; /* hack */
2146
req->file = out_fd;
2147
req->off = off;
2148
req->bufsml[0].len = len;
2149
POST;
2150
}
2151
2152
2153
int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2154
INIT(STAT);
2155
PATH;
2156
POST;
2157
}
2158
2159
2160
int uv_fs_symlink(uv_loop_t* loop,
2161
uv_fs_t* req,
2162
const char* path,
2163
const char* new_path,
2164
int flags,
2165
uv_fs_cb cb) {
2166
INIT(SYMLINK);
2167
PATH2;
2168
req->flags = flags;
2169
POST;
2170
}
2171
2172
2173
int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2174
INIT(UNLINK);
2175
PATH;
2176
POST;
2177
}
2178
2179
2180
int uv_fs_utime(uv_loop_t* loop,
2181
uv_fs_t* req,
2182
const char* path,
2183
double atime,
2184
double mtime,
2185
uv_fs_cb cb) {
2186
INIT(UTIME);
2187
PATH;
2188
req->atime = atime;
2189
req->mtime = mtime;
2190
POST;
2191
}
2192
2193
2194
int uv_fs_write(uv_loop_t* loop,
2195
uv_fs_t* req,
2196
uv_file file,
2197
const uv_buf_t bufs[],
2198
unsigned int nbufs,
2199
int64_t off,
2200
uv_fs_cb cb) {
2201
INIT(WRITE);
2202
2203
if (bufs == NULL || nbufs == 0)
2204
return UV_EINVAL;
2205
2206
req->file = file;
2207
2208
req->nbufs = nbufs;
2209
req->bufs = req->bufsml;
2210
if (nbufs > ARRAY_SIZE(req->bufsml))
2211
req->bufs = uv__malloc(nbufs * sizeof(*bufs));
2212
2213
if (req->bufs == NULL)
2214
return UV_ENOMEM;
2215
2216
memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
2217
2218
req->off = off;
2219
POST;
2220
}
2221
2222
2223
void uv_fs_req_cleanup(uv_fs_t* req) {
2224
if (req == NULL)
2225
return;
2226
2227
/* Only necessary for asychronous requests, i.e., requests with a callback.
2228
* Synchronous ones don't copy their arguments and have req->path and
2229
* req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and
2230
* UV_FS_MKSTEMP are the exception to the rule, they always allocate memory.
2231
*/
2232
if (req->path != NULL &&
2233
(req->cb != NULL ||
2234
req->fs_type == UV_FS_MKDTEMP || req->fs_type == UV_FS_MKSTEMP))
2235
uv__free((void*) req->path); /* Memory is shared with req->new_path. */
2236
2237
req->path = NULL;
2238
req->new_path = NULL;
2239
2240
if (req->fs_type == UV_FS_READDIR && req->ptr != NULL)
2241
uv__fs_readdir_cleanup(req);
2242
2243
if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL)
2244
uv__fs_scandir_cleanup(req);
2245
2246
if (req->bufs != req->bufsml)
2247
uv__free(req->bufs);
2248
req->bufs = NULL;
2249
2250
if (req->fs_type != UV_FS_OPENDIR && req->ptr != &req->statbuf)
2251
uv__free(req->ptr);
2252
req->ptr = NULL;
2253
}
2254
2255
2256
int uv_fs_copyfile(uv_loop_t* loop,
2257
uv_fs_t* req,
2258
const char* path,
2259
const char* new_path,
2260
int flags,
2261
uv_fs_cb cb) {
2262
INIT(COPYFILE);
2263
2264
if (flags & ~(UV_FS_COPYFILE_EXCL |
2265
UV_FS_COPYFILE_FICLONE |
2266
UV_FS_COPYFILE_FICLONE_FORCE)) {
2267
return UV_EINVAL;
2268
}
2269
2270
PATH2;
2271
req->flags = flags;
2272
POST;
2273
}
2274
2275
2276
int uv_fs_statfs(uv_loop_t* loop,
2277
uv_fs_t* req,
2278
const char* path,
2279
uv_fs_cb cb) {
2280
INIT(STATFS);
2281
PATH;
2282
POST;
2283
}
2284
2285
int uv_fs_get_system_error(const uv_fs_t* req) {
2286
return -req->result;
2287
}
2288
2289