Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Kitware
GitHub Repository: Kitware/CMake
Path: blob/master/Utilities/cmlibuv/src/unix/core.c
3156 views
1
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2
* Permission is hereby granted, free of charge, to any person obtaining a copy
3
* of this software and associated documentation files (the "Software"), to
4
* deal in the Software without restriction, including without limitation the
5
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6
* sell copies of the Software, and to permit persons to whom the Software is
7
* furnished to do so, subject to the following conditions:
8
*
9
* The above copyright notice and this permission notice shall be included in
10
* all copies or substantial portions of the Software.
11
*
12
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18
* IN THE SOFTWARE.
19
*/
20
21
#include "uv.h"
22
#include "internal.h"
23
#include "strtok.h"
24
25
#include <stddef.h> /* NULL */
26
#include <stdio.h> /* printf */
27
#include <stdlib.h>
28
#include <string.h> /* strerror */
29
#include <errno.h>
30
#include <assert.h>
31
#include <unistd.h>
32
#include <sys/types.h>
33
#include <sys/stat.h>
34
#include <fcntl.h> /* O_CLOEXEC */
35
#include <sys/ioctl.h>
36
#include <sys/socket.h>
37
#include <sys/un.h>
38
#include <netinet/in.h>
39
#include <arpa/inet.h>
40
#include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */
41
#include <sys/uio.h> /* writev */
42
#include <sys/resource.h> /* getrusage */
43
#include <pwd.h>
44
#include <sched.h>
45
#include <sys/utsname.h>
46
#include <sys/time.h>
47
48
#ifdef __sun
49
# include <sys/filio.h>
50
# include <sys/types.h>
51
# include <sys/wait.h>
52
#endif
53
54
#if defined(__APPLE__)
55
# include <sys/filio.h>
56
# endif /* defined(__APPLE__) */
57
58
59
#if defined(__APPLE__) && !TARGET_OS_IPHONE
60
# include <crt_externs.h>
61
# include <mach-o/dyld.h> /* _NSGetExecutablePath */
62
# define environ (*_NSGetEnviron())
63
#else /* defined(__APPLE__) && !TARGET_OS_IPHONE */
64
extern char** environ;
65
#endif /* !(defined(__APPLE__) && !TARGET_OS_IPHONE) */
66
67
68
#if defined(__DragonFly__) || \
69
defined(__FreeBSD__) || \
70
defined(__FreeBSD_kernel__) || \
71
defined(__NetBSD__) || \
72
defined(__OpenBSD__)
73
# include <sys/sysctl.h>
74
# include <sys/filio.h>
75
# include <sys/wait.h>
76
# if defined(__FreeBSD__)
77
# define uv__accept4 accept4
78
# endif
79
# if defined(__NetBSD__)
80
# define uv__accept4(a, b, c, d) paccept((a), (b), (c), NULL, (d))
81
# endif
82
#endif
83
84
#if defined(__FreeBSD__)
85
# include <sys/param.h>
86
# include <sys/cpuset.h>
87
#endif
88
89
#if defined(__MVS__)
90
# include <sys/ioctl.h>
91
# include "zos-sys-info.h"
92
#endif
93
94
#if defined(__linux__)
95
# include <sched.h>
96
# include <sys/syscall.h>
97
# define uv__accept4 accept4
98
#endif
99
100
#if defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
101
# include <sanitizer/linux_syscall_hooks.h>
102
#endif
103
104
static void uv__run_pending(uv_loop_t* loop);
105
106
/* Verify that uv_buf_t is ABI-compatible with struct iovec. */
107
STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
108
STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->base) ==
109
sizeof(((struct iovec*) 0)->iov_base));
110
STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->len) ==
111
sizeof(((struct iovec*) 0)->iov_len));
112
STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
113
STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
114
115
116
uint64_t uv_hrtime(void) {
117
return uv__hrtime(UV_CLOCK_PRECISE);
118
}
119
120
121
void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
122
assert(!uv__is_closing(handle));
123
124
handle->flags |= UV_HANDLE_CLOSING;
125
handle->close_cb = close_cb;
126
127
switch (handle->type) {
128
case UV_NAMED_PIPE:
129
uv__pipe_close((uv_pipe_t*)handle);
130
break;
131
132
case UV_TTY:
133
uv__stream_close((uv_stream_t*)handle);
134
break;
135
136
case UV_TCP:
137
uv__tcp_close((uv_tcp_t*)handle);
138
break;
139
140
case UV_UDP:
141
uv__udp_close((uv_udp_t*)handle);
142
break;
143
144
case UV_PREPARE:
145
uv__prepare_close((uv_prepare_t*)handle);
146
break;
147
148
case UV_CHECK:
149
uv__check_close((uv_check_t*)handle);
150
break;
151
152
case UV_IDLE:
153
uv__idle_close((uv_idle_t*)handle);
154
break;
155
156
case UV_ASYNC:
157
uv__async_close((uv_async_t*)handle);
158
break;
159
160
case UV_TIMER:
161
uv__timer_close((uv_timer_t*)handle);
162
break;
163
164
case UV_PROCESS:
165
uv__process_close((uv_process_t*)handle);
166
break;
167
168
case UV_FS_EVENT:
169
uv__fs_event_close((uv_fs_event_t*)handle);
170
#if defined(__sun) || defined(__MVS__)
171
/*
172
* On Solaris, illumos, and z/OS we will not be able to dissociate the
173
* watcher for an event which is pending delivery, so we cannot always call
174
* uv__make_close_pending() straight away. The backend will call the
175
* function once the event has cleared.
176
*/
177
return;
178
#endif
179
break;
180
181
case UV_POLL:
182
uv__poll_close((uv_poll_t*)handle);
183
break;
184
185
case UV_FS_POLL:
186
uv__fs_poll_close((uv_fs_poll_t*)handle);
187
/* Poll handles use file system requests, and one of them may still be
188
* running. The poll code will call uv__make_close_pending() for us. */
189
return;
190
191
case UV_SIGNAL:
192
uv__signal_close((uv_signal_t*) handle);
193
break;
194
195
default:
196
assert(0);
197
}
198
199
uv__make_close_pending(handle);
200
}
201
202
int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
203
int r;
204
int fd;
205
socklen_t len;
206
207
if (handle == NULL || value == NULL)
208
return UV_EINVAL;
209
210
if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE)
211
fd = uv__stream_fd((uv_stream_t*) handle);
212
else if (handle->type == UV_UDP)
213
fd = ((uv_udp_t *) handle)->io_watcher.fd;
214
else
215
return UV_ENOTSUP;
216
217
len = sizeof(*value);
218
219
if (*value == 0)
220
r = getsockopt(fd, SOL_SOCKET, optname, value, &len);
221
else
222
r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len);
223
224
if (r < 0)
225
return UV__ERR(errno);
226
227
return 0;
228
}
229
230
void uv__make_close_pending(uv_handle_t* handle) {
231
assert(handle->flags & UV_HANDLE_CLOSING);
232
assert(!(handle->flags & UV_HANDLE_CLOSED));
233
handle->next_closing = handle->loop->closing_handles;
234
handle->loop->closing_handles = handle;
235
}
236
237
int uv__getiovmax(void) {
238
#if defined(IOV_MAX)
239
return IOV_MAX;
240
#elif defined(_SC_IOV_MAX)
241
static int iovmax_cached = -1;
242
int iovmax;
243
244
iovmax = uv__load_relaxed(&iovmax_cached);
245
if (iovmax != -1)
246
return iovmax;
247
248
/* On some embedded devices (arm-linux-uclibc based ip camera),
249
* sysconf(_SC_IOV_MAX) can not get the correct value. The return
250
* value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
251
*/
252
iovmax = sysconf(_SC_IOV_MAX);
253
if (iovmax == -1)
254
iovmax = 1;
255
256
uv__store_relaxed(&iovmax_cached, iovmax);
257
258
return iovmax;
259
#else
260
return 1024;
261
#endif
262
}
263
264
265
static void uv__finish_close(uv_handle_t* handle) {
266
uv_signal_t* sh;
267
268
/* Note: while the handle is in the UV_HANDLE_CLOSING state now, it's still
269
* possible for it to be active in the sense that uv__is_active() returns
270
* true.
271
*
272
* A good example is when the user calls uv_shutdown(), immediately followed
273
* by uv_close(). The handle is considered active at this point because the
274
* completion of the shutdown req is still pending.
275
*/
276
assert(handle->flags & UV_HANDLE_CLOSING);
277
assert(!(handle->flags & UV_HANDLE_CLOSED));
278
handle->flags |= UV_HANDLE_CLOSED;
279
280
switch (handle->type) {
281
case UV_PREPARE:
282
case UV_CHECK:
283
case UV_IDLE:
284
case UV_ASYNC:
285
case UV_TIMER:
286
case UV_PROCESS:
287
case UV_FS_EVENT:
288
case UV_FS_POLL:
289
case UV_POLL:
290
break;
291
292
case UV_SIGNAL:
293
/* If there are any caught signals "trapped" in the signal pipe,
294
* we can't call the close callback yet. Reinserting the handle
295
* into the closing queue makes the event loop spin but that's
296
* okay because we only need to deliver the pending events.
297
*/
298
sh = (uv_signal_t*) handle;
299
if (sh->caught_signals > sh->dispatched_signals) {
300
handle->flags ^= UV_HANDLE_CLOSED;
301
uv__make_close_pending(handle); /* Back into the queue. */
302
return;
303
}
304
break;
305
306
case UV_NAMED_PIPE:
307
case UV_TCP:
308
case UV_TTY:
309
uv__stream_destroy((uv_stream_t*)handle);
310
break;
311
312
case UV_UDP:
313
uv__udp_finish_close((uv_udp_t*)handle);
314
break;
315
316
default:
317
assert(0);
318
break;
319
}
320
321
uv__handle_unref(handle);
322
QUEUE_REMOVE(&handle->handle_queue);
323
324
if (handle->close_cb) {
325
handle->close_cb(handle);
326
}
327
}
328
329
330
static void uv__run_closing_handles(uv_loop_t* loop) {
331
uv_handle_t* p;
332
uv_handle_t* q;
333
334
p = loop->closing_handles;
335
loop->closing_handles = NULL;
336
337
while (p) {
338
q = p->next_closing;
339
uv__finish_close(p);
340
p = q;
341
}
342
}
343
344
345
int uv_is_closing(const uv_handle_t* handle) {
346
return uv__is_closing(handle);
347
}
348
349
350
int uv_backend_fd(const uv_loop_t* loop) {
351
return loop->backend_fd;
352
}
353
354
355
static int uv__loop_alive(const uv_loop_t* loop) {
356
return uv__has_active_handles(loop) ||
357
uv__has_active_reqs(loop) ||
358
!QUEUE_EMPTY(&loop->pending_queue) ||
359
loop->closing_handles != NULL;
360
}
361
362
363
static int uv__backend_timeout(const uv_loop_t* loop) {
364
if (loop->stop_flag == 0 &&
365
/* uv__loop_alive(loop) && */
366
(uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
367
QUEUE_EMPTY(&loop->pending_queue) &&
368
QUEUE_EMPTY(&loop->idle_handles) &&
369
(loop->flags & UV_LOOP_REAP_CHILDREN) == 0 &&
370
loop->closing_handles == NULL)
371
return uv__next_timeout(loop);
372
return 0;
373
}
374
375
376
int uv_backend_timeout(const uv_loop_t* loop) {
377
if (QUEUE_EMPTY(&loop->watcher_queue))
378
return uv__backend_timeout(loop);
379
/* Need to call uv_run to update the backend fd state. */
380
return 0;
381
}
382
383
384
int uv_loop_alive(const uv_loop_t* loop) {
385
return uv__loop_alive(loop);
386
}
387
388
389
int uv_run(uv_loop_t* loop, uv_run_mode mode) {
390
int timeout;
391
int r;
392
int can_sleep;
393
394
r = uv__loop_alive(loop);
395
if (!r)
396
uv__update_time(loop);
397
398
while (r != 0 && loop->stop_flag == 0) {
399
uv__update_time(loop);
400
uv__run_timers(loop);
401
402
can_sleep =
403
QUEUE_EMPTY(&loop->pending_queue) && QUEUE_EMPTY(&loop->idle_handles);
404
405
uv__run_pending(loop);
406
uv__run_idle(loop);
407
uv__run_prepare(loop);
408
409
timeout = 0;
410
if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT)
411
timeout = uv__backend_timeout(loop);
412
413
uv__io_poll(loop, timeout);
414
415
/* Process immediate callbacks (e.g. write_cb) a small fixed number of
416
* times to avoid loop starvation.*/
417
for (r = 0; r < 8 && !QUEUE_EMPTY(&loop->pending_queue); r++)
418
uv__run_pending(loop);
419
420
/* Run one final update on the provider_idle_time in case uv__io_poll
421
* returned because the timeout expired, but no events were received. This
422
* call will be ignored if the provider_entry_time was either never set (if
423
* the timeout == 0) or was already updated b/c an event was received.
424
*/
425
uv__metrics_update_idle_time(loop);
426
427
uv__run_check(loop);
428
uv__run_closing_handles(loop);
429
430
if (mode == UV_RUN_ONCE) {
431
/* UV_RUN_ONCE implies forward progress: at least one callback must have
432
* been invoked when it returns. uv__io_poll() can return without doing
433
* I/O (meaning: no callbacks) when its timeout expires - which means we
434
* have pending timers that satisfy the forward progress constraint.
435
*
436
* UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
437
* the check.
438
*/
439
uv__update_time(loop);
440
uv__run_timers(loop);
441
}
442
443
r = uv__loop_alive(loop);
444
if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
445
break;
446
}
447
448
/* The if statement lets gcc compile it to a conditional store. Avoids
449
* dirtying a cache line.
450
*/
451
if (loop->stop_flag != 0)
452
loop->stop_flag = 0;
453
454
return r;
455
}
456
457
458
void uv_update_time(uv_loop_t* loop) {
459
uv__update_time(loop);
460
}
461
462
463
int uv_is_active(const uv_handle_t* handle) {
464
return uv__is_active(handle);
465
}
466
467
468
/* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
469
int uv__socket(int domain, int type, int protocol) {
470
int sockfd;
471
int err;
472
473
#if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
474
sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
475
if (sockfd != -1)
476
return sockfd;
477
478
if (errno != EINVAL)
479
return UV__ERR(errno);
480
#endif
481
482
sockfd = socket(domain, type, protocol);
483
if (sockfd == -1)
484
return UV__ERR(errno);
485
486
err = uv__nonblock(sockfd, 1);
487
if (err == 0)
488
err = uv__cloexec(sockfd, 1);
489
490
if (err) {
491
uv__close(sockfd);
492
return err;
493
}
494
495
#if defined(SO_NOSIGPIPE)
496
{
497
int on = 1;
498
setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
499
}
500
#endif
501
502
return sockfd;
503
}
504
505
/* get a file pointer to a file in read-only and close-on-exec mode */
506
FILE* uv__open_file(const char* path) {
507
int fd;
508
FILE* fp;
509
510
fd = uv__open_cloexec(path, O_RDONLY);
511
if (fd < 0)
512
return NULL;
513
514
fp = fdopen(fd, "r");
515
if (fp == NULL)
516
uv__close(fd);
517
518
return fp;
519
}
520
521
522
int uv__accept(int sockfd) {
523
int peerfd;
524
int err;
525
526
(void) &err;
527
assert(sockfd >= 0);
528
529
do
530
#ifdef uv__accept4
531
peerfd = uv__accept4(sockfd, NULL, NULL, SOCK_NONBLOCK|SOCK_CLOEXEC);
532
#else
533
peerfd = accept(sockfd, NULL, NULL);
534
#endif
535
while (peerfd == -1 && errno == EINTR);
536
537
if (peerfd == -1)
538
return UV__ERR(errno);
539
540
#ifndef uv__accept4
541
err = uv__cloexec(peerfd, 1);
542
if (err == 0)
543
err = uv__nonblock(peerfd, 1);
544
545
if (err != 0) {
546
uv__close(peerfd);
547
return err;
548
}
549
#endif
550
551
return peerfd;
552
}
553
554
555
/* close() on macos has the "interesting" quirk that it fails with EINTR
556
* without closing the file descriptor when a thread is in the cancel state.
557
* That's why libuv calls close$NOCANCEL() instead.
558
*
559
* glibc on linux has a similar issue: close() is a cancellation point and
560
* will unwind the thread when it's in the cancel state. Work around that
561
* by making the system call directly. Musl libc is unaffected.
562
*/
563
int uv__close_nocancel(int fd) {
564
#if defined(__APPLE__)
565
#pragma GCC diagnostic push
566
#pragma GCC diagnostic ignored "-Wdollar-in-identifier-extension"
567
#if defined(__LP64__) || TARGET_OS_IPHONE
568
extern int close$NOCANCEL(int);
569
return close$NOCANCEL(fd);
570
#else
571
extern int close$NOCANCEL$UNIX2003(int);
572
return close$NOCANCEL$UNIX2003(fd);
573
#endif
574
#pragma GCC diagnostic pop
575
#elif defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
576
long rc;
577
__sanitizer_syscall_pre_close(fd);
578
rc = syscall(SYS_close, fd);
579
__sanitizer_syscall_post_close(rc, fd);
580
return rc;
581
#elif defined(__linux__) && !defined(__SANITIZE_THREAD__)
582
return syscall(SYS_close, fd);
583
#else
584
return close(fd);
585
#endif
586
}
587
588
589
int uv__close_nocheckstdio(int fd) {
590
int saved_errno;
591
int rc;
592
593
assert(fd > -1); /* Catch uninitialized io_watcher.fd bugs. */
594
595
saved_errno = errno;
596
rc = uv__close_nocancel(fd);
597
if (rc == -1) {
598
rc = UV__ERR(errno);
599
if (rc == UV_EINTR || rc == UV__ERR(EINPROGRESS))
600
rc = 0; /* The close is in progress, not an error. */
601
errno = saved_errno;
602
}
603
604
return rc;
605
}
606
607
608
int uv__close(int fd) {
609
assert(fd > STDERR_FILENO); /* Catch stdio close bugs. */
610
#if defined(__MVS__)
611
SAVE_ERRNO(epoll_file_close(fd));
612
#endif
613
return uv__close_nocheckstdio(fd);
614
}
615
616
#if UV__NONBLOCK_IS_IOCTL
617
int uv__nonblock_ioctl(int fd, int set) {
618
int r;
619
620
do
621
r = ioctl(fd, FIONBIO, &set);
622
while (r == -1 && errno == EINTR);
623
624
if (r)
625
return UV__ERR(errno);
626
627
return 0;
628
}
629
#endif
630
631
632
int uv__nonblock_fcntl(int fd, int set) {
633
int flags;
634
int r;
635
636
do
637
r = fcntl(fd, F_GETFL);
638
while (r == -1 && errno == EINTR);
639
640
if (r == -1)
641
return UV__ERR(errno);
642
643
/* Bail out now if already set/clear. */
644
if (!!(r & O_NONBLOCK) == !!set)
645
return 0;
646
647
if (set)
648
flags = r | O_NONBLOCK;
649
else
650
flags = r & ~O_NONBLOCK;
651
652
do
653
r = fcntl(fd, F_SETFL, flags);
654
while (r == -1 && errno == EINTR);
655
656
if (r)
657
return UV__ERR(errno);
658
659
return 0;
660
}
661
662
663
int uv__cloexec(int fd, int set) {
664
int flags;
665
int r;
666
667
flags = 0;
668
if (set)
669
flags = FD_CLOEXEC;
670
671
do
672
r = fcntl(fd, F_SETFD, flags);
673
while (r == -1 && errno == EINTR);
674
675
if (r)
676
return UV__ERR(errno);
677
678
return 0;
679
}
680
681
682
ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
683
#if defined(__ANDROID__) || \
684
defined(__DragonFly__) || \
685
defined(__FreeBSD__) || \
686
defined(__NetBSD__) || \
687
defined(__OpenBSD__) || \
688
defined(__linux__)
689
ssize_t rc;
690
rc = recvmsg(fd, msg, flags | MSG_CMSG_CLOEXEC);
691
if (rc == -1)
692
return UV__ERR(errno);
693
return rc;
694
#else
695
struct cmsghdr* cmsg;
696
int* pfd;
697
int* end;
698
ssize_t rc;
699
rc = recvmsg(fd, msg, flags);
700
if (rc == -1)
701
return UV__ERR(errno);
702
if (msg->msg_controllen == 0)
703
return rc;
704
for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg))
705
if (cmsg->cmsg_type == SCM_RIGHTS)
706
for (pfd = (int*) CMSG_DATA(cmsg),
707
end = (int*) ((char*) cmsg + cmsg->cmsg_len);
708
pfd < end;
709
pfd += 1)
710
uv__cloexec(*pfd, 1);
711
return rc;
712
#endif
713
}
714
715
716
int uv_cwd(char* buffer, size_t* size) {
717
char scratch[1 + UV__PATH_MAX];
718
719
if (buffer == NULL || size == NULL)
720
return UV_EINVAL;
721
722
/* Try to read directly into the user's buffer first... */
723
if (getcwd(buffer, *size) != NULL)
724
goto fixup;
725
726
if (errno != ERANGE)
727
return UV__ERR(errno);
728
729
/* ...or into scratch space if the user's buffer is too small
730
* so we can report how much space to provide on the next try.
731
*/
732
if (getcwd(scratch, sizeof(scratch)) == NULL)
733
return UV__ERR(errno);
734
735
buffer = scratch;
736
737
fixup:
738
739
*size = strlen(buffer);
740
741
if (*size > 1 && buffer[*size - 1] == '/') {
742
*size -= 1;
743
buffer[*size] = '\0';
744
}
745
746
if (buffer == scratch) {
747
*size += 1;
748
return UV_ENOBUFS;
749
}
750
751
return 0;
752
}
753
754
755
int uv_chdir(const char* dir) {
756
if (chdir(dir))
757
return UV__ERR(errno);
758
759
return 0;
760
}
761
762
763
void uv_disable_stdio_inheritance(void) {
764
int fd;
765
766
/* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
767
* first 16 file descriptors. After that, bail out after the first error.
768
*/
769
for (fd = 0; ; fd++)
770
if (uv__cloexec(fd, 1) && fd > 15)
771
break;
772
}
773
774
775
int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
776
int fd_out;
777
778
switch (handle->type) {
779
case UV_TCP:
780
case UV_NAMED_PIPE:
781
case UV_TTY:
782
fd_out = uv__stream_fd((uv_stream_t*) handle);
783
break;
784
785
case UV_UDP:
786
fd_out = ((uv_udp_t *) handle)->io_watcher.fd;
787
break;
788
789
case UV_POLL:
790
fd_out = ((uv_poll_t *) handle)->io_watcher.fd;
791
break;
792
793
default:
794
return UV_EINVAL;
795
}
796
797
if (uv__is_closing(handle) || fd_out == -1)
798
return UV_EBADF;
799
800
*fd = fd_out;
801
return 0;
802
}
803
804
805
static void uv__run_pending(uv_loop_t* loop) {
806
QUEUE* q;
807
QUEUE pq;
808
uv__io_t* w;
809
810
QUEUE_MOVE(&loop->pending_queue, &pq);
811
812
while (!QUEUE_EMPTY(&pq)) {
813
q = QUEUE_HEAD(&pq);
814
QUEUE_REMOVE(q);
815
QUEUE_INIT(q);
816
w = QUEUE_DATA(q, uv__io_t, pending_queue);
817
w->cb(loop, w, POLLOUT);
818
}
819
}
820
821
822
static unsigned int next_power_of_two(unsigned int val) {
823
val -= 1;
824
val |= val >> 1;
825
val |= val >> 2;
826
val |= val >> 4;
827
val |= val >> 8;
828
val |= val >> 16;
829
val += 1;
830
return val;
831
}
832
833
static void maybe_resize(uv_loop_t* loop, unsigned int len) {
834
uv__io_t** watchers;
835
void* fake_watcher_list;
836
void* fake_watcher_count;
837
unsigned int nwatchers;
838
unsigned int i;
839
840
if (len <= loop->nwatchers)
841
return;
842
843
/* Preserve fake watcher list and count at the end of the watchers */
844
if (loop->watchers != NULL) {
845
fake_watcher_list = loop->watchers[loop->nwatchers];
846
fake_watcher_count = loop->watchers[loop->nwatchers + 1];
847
} else {
848
fake_watcher_list = NULL;
849
fake_watcher_count = NULL;
850
}
851
852
nwatchers = next_power_of_two(len + 2) - 2;
853
watchers = uv__reallocf(loop->watchers,
854
(nwatchers + 2) * sizeof(loop->watchers[0]));
855
856
if (watchers == NULL)
857
abort();
858
for (i = loop->nwatchers; i < nwatchers; i++)
859
watchers[i] = NULL;
860
watchers[nwatchers] = fake_watcher_list;
861
watchers[nwatchers + 1] = fake_watcher_count;
862
863
loop->watchers = watchers;
864
loop->nwatchers = nwatchers;
865
}
866
867
868
void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
869
assert(cb != NULL);
870
assert(fd >= -1);
871
QUEUE_INIT(&w->pending_queue);
872
QUEUE_INIT(&w->watcher_queue);
873
w->cb = cb;
874
w->fd = fd;
875
w->events = 0;
876
w->pevents = 0;
877
878
#if defined(UV_HAVE_KQUEUE)
879
w->rcount = 0;
880
w->wcount = 0;
881
#endif /* defined(UV_HAVE_KQUEUE) */
882
}
883
884
885
void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
886
assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
887
assert(0 != events);
888
assert(w->fd >= 0);
889
assert(w->fd < INT_MAX);
890
891
w->pevents |= events;
892
maybe_resize(loop, w->fd + 1);
893
894
#if !defined(__sun)
895
/* The event ports backend needs to rearm all file descriptors on each and
896
* every tick of the event loop but the other backends allow us to
897
* short-circuit here if the event mask is unchanged.
898
*/
899
if (w->events == w->pevents)
900
return;
901
#endif
902
903
if (QUEUE_EMPTY(&w->watcher_queue))
904
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
905
906
if (loop->watchers[w->fd] == NULL) {
907
loop->watchers[w->fd] = w;
908
loop->nfds++;
909
}
910
}
911
912
913
void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
914
assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
915
assert(0 != events);
916
917
if (w->fd == -1)
918
return;
919
920
assert(w->fd >= 0);
921
922
/* Happens when uv__io_stop() is called on a handle that was never started. */
923
if ((unsigned) w->fd >= loop->nwatchers)
924
return;
925
926
w->pevents &= ~events;
927
928
if (w->pevents == 0) {
929
QUEUE_REMOVE(&w->watcher_queue);
930
QUEUE_INIT(&w->watcher_queue);
931
w->events = 0;
932
933
if (w == loop->watchers[w->fd]) {
934
assert(loop->nfds > 0);
935
loop->watchers[w->fd] = NULL;
936
loop->nfds--;
937
}
938
}
939
else if (QUEUE_EMPTY(&w->watcher_queue))
940
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
941
}
942
943
944
void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
945
uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
946
QUEUE_REMOVE(&w->pending_queue);
947
948
/* Remove stale events for this file descriptor */
949
if (w->fd != -1)
950
uv__platform_invalidate_fd(loop, w->fd);
951
}
952
953
954
void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
955
if (QUEUE_EMPTY(&w->pending_queue))
956
QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue);
957
}
958
959
960
int uv__io_active(const uv__io_t* w, unsigned int events) {
961
assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
962
assert(0 != events);
963
return 0 != (w->pevents & events);
964
}
965
966
967
int uv__fd_exists(uv_loop_t* loop, int fd) {
968
return (unsigned) fd < loop->nwatchers && loop->watchers[fd] != NULL;
969
}
970
971
972
int uv_getrusage(uv_rusage_t* rusage) {
973
struct rusage usage;
974
975
if (getrusage(RUSAGE_SELF, &usage))
976
return UV__ERR(errno);
977
978
rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec;
979
rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec;
980
981
rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec;
982
rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec;
983
984
#if !defined(__MVS__) && !defined(__HAIKU__)
985
rusage->ru_maxrss = usage.ru_maxrss;
986
rusage->ru_ixrss = usage.ru_ixrss;
987
rusage->ru_idrss = usage.ru_idrss;
988
rusage->ru_isrss = usage.ru_isrss;
989
rusage->ru_minflt = usage.ru_minflt;
990
rusage->ru_majflt = usage.ru_majflt;
991
rusage->ru_nswap = usage.ru_nswap;
992
rusage->ru_inblock = usage.ru_inblock;
993
rusage->ru_oublock = usage.ru_oublock;
994
rusage->ru_msgsnd = usage.ru_msgsnd;
995
rusage->ru_msgrcv = usage.ru_msgrcv;
996
rusage->ru_nsignals = usage.ru_nsignals;
997
rusage->ru_nvcsw = usage.ru_nvcsw;
998
rusage->ru_nivcsw = usage.ru_nivcsw;
999
#endif
1000
1001
return 0;
1002
}
1003
1004
1005
int uv__open_cloexec(const char* path, int flags) {
1006
#if defined(O_CLOEXEC)
1007
int fd;
1008
1009
fd = open(path, flags | O_CLOEXEC);
1010
if (fd == -1)
1011
return UV__ERR(errno);
1012
1013
return fd;
1014
#else /* O_CLOEXEC */
1015
int err;
1016
int fd;
1017
1018
fd = open(path, flags);
1019
if (fd == -1)
1020
return UV__ERR(errno);
1021
1022
err = uv__cloexec(fd, 1);
1023
if (err) {
1024
uv__close(fd);
1025
return err;
1026
}
1027
1028
return fd;
1029
#endif /* O_CLOEXEC */
1030
}
1031
1032
1033
int uv__slurp(const char* filename, char* buf, size_t len) {
1034
ssize_t n;
1035
int fd;
1036
1037
assert(len > 0);
1038
1039
fd = uv__open_cloexec(filename, O_RDONLY);
1040
if (fd < 0)
1041
return fd;
1042
1043
do
1044
n = read(fd, buf, len - 1);
1045
while (n == -1 && errno == EINTR);
1046
1047
if (uv__close_nocheckstdio(fd))
1048
abort();
1049
1050
if (n < 0)
1051
return UV__ERR(errno);
1052
1053
buf[n] = '\0';
1054
1055
return 0;
1056
}
1057
1058
1059
int uv__dup2_cloexec(int oldfd, int newfd) {
1060
#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__linux__)
1061
int r;
1062
1063
r = dup3(oldfd, newfd, O_CLOEXEC);
1064
if (r == -1)
1065
return UV__ERR(errno);
1066
1067
return r;
1068
#else
1069
int err;
1070
int r;
1071
1072
r = dup2(oldfd, newfd); /* Never retry. */
1073
if (r == -1)
1074
return UV__ERR(errno);
1075
1076
err = uv__cloexec(newfd, 1);
1077
if (err != 0) {
1078
uv__close(newfd);
1079
return err;
1080
}
1081
1082
return r;
1083
#endif
1084
}
1085
1086
1087
int uv_os_homedir(char* buffer, size_t* size) {
1088
uv_passwd_t pwd;
1089
size_t len;
1090
int r;
1091
1092
/* Check if the HOME environment variable is set first. The task of
1093
performing input validation on buffer and size is taken care of by
1094
uv_os_getenv(). */
1095
r = uv_os_getenv("HOME", buffer, size);
1096
1097
if (r != UV_ENOENT)
1098
return r;
1099
1100
/* HOME is not set, so call uv__getpwuid_r() */
1101
r = uv__getpwuid_r(&pwd);
1102
1103
if (r != 0) {
1104
return r;
1105
}
1106
1107
len = strlen(pwd.homedir);
1108
1109
if (len >= *size) {
1110
*size = len + 1;
1111
uv_os_free_passwd(&pwd);
1112
return UV_ENOBUFS;
1113
}
1114
1115
memcpy(buffer, pwd.homedir, len + 1);
1116
*size = len;
1117
uv_os_free_passwd(&pwd);
1118
1119
return 0;
1120
}
1121
1122
1123
int uv_os_tmpdir(char* buffer, size_t* size) {
1124
const char* buf;
1125
size_t len;
1126
1127
if (buffer == NULL || size == NULL || *size == 0)
1128
return UV_EINVAL;
1129
1130
#define CHECK_ENV_VAR(name) \
1131
do { \
1132
buf = getenv(name); \
1133
if (buf != NULL) \
1134
goto return_buffer; \
1135
} \
1136
while (0)
1137
1138
/* Check the TMPDIR, TMP, TEMP, and TEMPDIR environment variables in order */
1139
CHECK_ENV_VAR("TMPDIR");
1140
CHECK_ENV_VAR("TMP");
1141
CHECK_ENV_VAR("TEMP");
1142
CHECK_ENV_VAR("TEMPDIR");
1143
1144
#undef CHECK_ENV_VAR
1145
1146
/* No temp environment variables defined */
1147
#if defined(__ANDROID__)
1148
buf = "/data/local/tmp";
1149
#else
1150
buf = "/tmp";
1151
#endif
1152
1153
return_buffer:
1154
len = strlen(buf);
1155
1156
if (len >= *size) {
1157
*size = len + 1;
1158
return UV_ENOBUFS;
1159
}
1160
1161
/* The returned directory should not have a trailing slash. */
1162
if (len > 1 && buf[len - 1] == '/') {
1163
len--;
1164
}
1165
1166
memcpy(buffer, buf, len + 1);
1167
buffer[len] = '\0';
1168
*size = len;
1169
1170
return 0;
1171
}
1172
1173
1174
int uv__getpwuid_r(uv_passwd_t* pwd) {
1175
struct passwd pw;
1176
struct passwd* result;
1177
char* buf;
1178
uid_t uid;
1179
size_t bufsize;
1180
size_t name_size;
1181
size_t homedir_size;
1182
size_t shell_size;
1183
int r;
1184
1185
if (pwd == NULL)
1186
return UV_EINVAL;
1187
1188
uid = geteuid();
1189
1190
/* Calling sysconf(_SC_GETPW_R_SIZE_MAX) would get the suggested size, but it
1191
* is frequently 1024 or 4096, so we can just use that directly. The pwent
1192
* will not usually be large. */
1193
for (bufsize = 2000;; bufsize *= 2) {
1194
buf = uv__malloc(bufsize);
1195
1196
if (buf == NULL)
1197
return UV_ENOMEM;
1198
1199
do
1200
r = getpwuid_r(uid, &pw, buf, bufsize, &result);
1201
while (r == EINTR);
1202
1203
if (r != 0 || result == NULL)
1204
uv__free(buf);
1205
1206
if (r != ERANGE)
1207
break;
1208
}
1209
1210
if (r != 0)
1211
return UV__ERR(r);
1212
1213
if (result == NULL)
1214
return UV_ENOENT;
1215
1216
/* Allocate memory for the username, shell, and home directory */
1217
name_size = strlen(pw.pw_name) + 1;
1218
homedir_size = strlen(pw.pw_dir) + 1;
1219
shell_size = strlen(pw.pw_shell) + 1;
1220
pwd->username = uv__malloc(name_size + homedir_size + shell_size);
1221
1222
if (pwd->username == NULL) {
1223
uv__free(buf);
1224
return UV_ENOMEM;
1225
}
1226
1227
/* Copy the username */
1228
memcpy(pwd->username, pw.pw_name, name_size);
1229
1230
/* Copy the home directory */
1231
pwd->homedir = pwd->username + name_size;
1232
memcpy(pwd->homedir, pw.pw_dir, homedir_size);
1233
1234
/* Copy the shell */
1235
pwd->shell = pwd->homedir + homedir_size;
1236
memcpy(pwd->shell, pw.pw_shell, shell_size);
1237
1238
/* Copy the uid and gid */
1239
pwd->uid = pw.pw_uid;
1240
pwd->gid = pw.pw_gid;
1241
1242
uv__free(buf);
1243
1244
return 0;
1245
}
1246
1247
1248
void uv_os_free_passwd(uv_passwd_t* pwd) {
1249
if (pwd == NULL)
1250
return;
1251
1252
/*
1253
The memory for name, shell, and homedir are allocated in a single
1254
uv__malloc() call. The base of the pointer is stored in pwd->username, so
1255
that is the field that needs to be freed.
1256
*/
1257
uv__free(pwd->username);
1258
pwd->username = NULL;
1259
pwd->shell = NULL;
1260
pwd->homedir = NULL;
1261
}
1262
1263
1264
int uv_os_get_passwd(uv_passwd_t* pwd) {
1265
return uv__getpwuid_r(pwd);
1266
}
1267
1268
1269
int uv_translate_sys_error(int sys_errno) {
1270
/* If < 0 then it's already a libuv error. */
1271
return sys_errno <= 0 ? sys_errno : -sys_errno;
1272
}
1273
1274
1275
int uv_os_environ(uv_env_item_t** envitems, int* count) {
1276
int i, j, cnt;
1277
uv_env_item_t* envitem;
1278
1279
*envitems = NULL;
1280
*count = 0;
1281
1282
for (i = 0; environ[i] != NULL; i++);
1283
1284
*envitems = uv__calloc(i, sizeof(**envitems));
1285
1286
if (*envitems == NULL)
1287
return UV_ENOMEM;
1288
1289
for (j = 0, cnt = 0; j < i; j++) {
1290
char* buf;
1291
char* ptr;
1292
1293
if (environ[j] == NULL)
1294
break;
1295
1296
buf = uv__strdup(environ[j]);
1297
if (buf == NULL)
1298
goto fail;
1299
1300
ptr = strchr(buf, '=');
1301
if (ptr == NULL) {
1302
uv__free(buf);
1303
continue;
1304
}
1305
1306
*ptr = '\0';
1307
1308
envitem = &(*envitems)[cnt];
1309
envitem->name = buf;
1310
envitem->value = ptr + 1;
1311
1312
cnt++;
1313
}
1314
1315
*count = cnt;
1316
return 0;
1317
1318
fail:
1319
for (i = 0; i < cnt; i++) {
1320
envitem = &(*envitems)[cnt];
1321
uv__free(envitem->name);
1322
}
1323
uv__free(*envitems);
1324
1325
*envitems = NULL;
1326
*count = 0;
1327
return UV_ENOMEM;
1328
}
1329
1330
1331
int uv_os_getenv(const char* name, char* buffer, size_t* size) {
1332
char* var;
1333
size_t len;
1334
1335
if (name == NULL || buffer == NULL || size == NULL || *size == 0)
1336
return UV_EINVAL;
1337
1338
var = getenv(name);
1339
1340
if (var == NULL)
1341
return UV_ENOENT;
1342
1343
len = strlen(var);
1344
1345
if (len >= *size) {
1346
*size = len + 1;
1347
return UV_ENOBUFS;
1348
}
1349
1350
memcpy(buffer, var, len + 1);
1351
*size = len;
1352
1353
return 0;
1354
}
1355
1356
1357
int uv_os_setenv(const char* name, const char* value) {
1358
if (name == NULL || value == NULL)
1359
return UV_EINVAL;
1360
1361
if (setenv(name, value, 1) != 0)
1362
return UV__ERR(errno);
1363
1364
return 0;
1365
}
1366
1367
1368
int uv_os_unsetenv(const char* name) {
1369
if (name == NULL)
1370
return UV_EINVAL;
1371
1372
if (unsetenv(name) != 0)
1373
return UV__ERR(errno);
1374
1375
return 0;
1376
}
1377
1378
1379
int uv_os_gethostname(char* buffer, size_t* size) {
1380
/*
1381
On some platforms, if the input buffer is not large enough, gethostname()
1382
succeeds, but truncates the result. libuv can detect this and return ENOBUFS
1383
instead by creating a large enough buffer and comparing the hostname length
1384
to the size input.
1385
*/
1386
char buf[UV_MAXHOSTNAMESIZE];
1387
size_t len;
1388
1389
if (buffer == NULL || size == NULL || *size == 0)
1390
return UV_EINVAL;
1391
1392
if (gethostname(buf, sizeof(buf)) != 0)
1393
return UV__ERR(errno);
1394
1395
buf[sizeof(buf) - 1] = '\0'; /* Null terminate, just to be safe. */
1396
len = strlen(buf);
1397
1398
if (len >= *size) {
1399
*size = len + 1;
1400
return UV_ENOBUFS;
1401
}
1402
1403
memcpy(buffer, buf, len + 1);
1404
*size = len;
1405
return 0;
1406
}
1407
1408
1409
int uv_cpumask_size(void) {
1410
#if defined(__linux__) || defined(__FreeBSD__)
1411
return CPU_SETSIZE;
1412
#else
1413
return UV_ENOTSUP;
1414
#endif
1415
}
1416
1417
1418
uv_os_fd_t uv_get_osfhandle(int fd) {
1419
return fd;
1420
}
1421
1422
int uv_open_osfhandle(uv_os_fd_t os_fd) {
1423
return os_fd;
1424
}
1425
1426
uv_pid_t uv_os_getpid(void) {
1427
return getpid();
1428
}
1429
1430
1431
uv_pid_t uv_os_getppid(void) {
1432
return getppid();
1433
}
1434
1435
1436
int uv_os_getpriority(uv_pid_t pid, int* priority) {
1437
int r;
1438
1439
if (priority == NULL)
1440
return UV_EINVAL;
1441
1442
errno = 0;
1443
r = getpriority(PRIO_PROCESS, (int) pid);
1444
1445
if (r == -1 && errno != 0)
1446
return UV__ERR(errno);
1447
1448
*priority = r;
1449
return 0;
1450
}
1451
1452
1453
int uv_os_setpriority(uv_pid_t pid, int priority) {
1454
if (priority < UV_PRIORITY_HIGHEST || priority > UV_PRIORITY_LOW)
1455
return UV_EINVAL;
1456
1457
if (setpriority(PRIO_PROCESS, (int) pid, priority) != 0)
1458
return UV__ERR(errno);
1459
1460
return 0;
1461
}
1462
1463
1464
int uv_os_uname(uv_utsname_t* buffer) {
1465
struct utsname buf;
1466
int r;
1467
1468
if (buffer == NULL)
1469
return UV_EINVAL;
1470
1471
if (uname(&buf) == -1) {
1472
r = UV__ERR(errno);
1473
goto error;
1474
}
1475
1476
r = uv__strscpy(buffer->sysname, buf.sysname, sizeof(buffer->sysname));
1477
if (r == UV_E2BIG)
1478
goto error;
1479
1480
#ifdef _AIX
1481
r = snprintf(buffer->release,
1482
sizeof(buffer->release),
1483
"%s.%s",
1484
buf.version,
1485
buf.release);
1486
if (r >= sizeof(buffer->release)) {
1487
r = UV_E2BIG;
1488
goto error;
1489
}
1490
#else
1491
r = uv__strscpy(buffer->release, buf.release, sizeof(buffer->release));
1492
if (r == UV_E2BIG)
1493
goto error;
1494
#endif
1495
1496
r = uv__strscpy(buffer->version, buf.version, sizeof(buffer->version));
1497
if (r == UV_E2BIG)
1498
goto error;
1499
1500
#if defined(_AIX) || defined(__PASE__)
1501
r = uv__strscpy(buffer->machine, "ppc64", sizeof(buffer->machine));
1502
#else
1503
r = uv__strscpy(buffer->machine, buf.machine, sizeof(buffer->machine));
1504
#endif
1505
1506
if (r == UV_E2BIG)
1507
goto error;
1508
1509
return 0;
1510
1511
error:
1512
buffer->sysname[0] = '\0';
1513
buffer->release[0] = '\0';
1514
buffer->version[0] = '\0';
1515
buffer->machine[0] = '\0';
1516
return r;
1517
}
1518
1519
int uv__getsockpeername(const uv_handle_t* handle,
1520
uv__peersockfunc func,
1521
struct sockaddr* name,
1522
int* namelen) {
1523
socklen_t socklen;
1524
uv_os_fd_t fd;
1525
int r;
1526
1527
r = uv_fileno(handle, &fd);
1528
if (r < 0)
1529
return r;
1530
1531
/* sizeof(socklen_t) != sizeof(int) on some systems. */
1532
socklen = (socklen_t) *namelen;
1533
1534
if (func(fd, name, &socklen))
1535
return UV__ERR(errno);
1536
1537
*namelen = (int) socklen;
1538
return 0;
1539
}
1540
1541
int uv_gettimeofday(uv_timeval64_t* tv) {
1542
struct timeval time;
1543
1544
if (tv == NULL)
1545
return UV_EINVAL;
1546
1547
if (gettimeofday(&time, NULL) != 0)
1548
return UV__ERR(errno);
1549
1550
tv->tv_sec = (int64_t) time.tv_sec;
1551
tv->tv_usec = (int32_t) time.tv_usec;
1552
return 0;
1553
}
1554
1555
void uv_sleep(unsigned int msec) {
1556
struct timespec timeout;
1557
int rc;
1558
1559
timeout.tv_sec = msec / 1000;
1560
timeout.tv_nsec = (msec % 1000) * 1000 * 1000;
1561
1562
do
1563
rc = nanosleep(&timeout, &timeout);
1564
while (rc == -1 && errno == EINTR);
1565
1566
assert(rc == 0);
1567
}
1568
1569
int uv__search_path(const char* prog, char* buf, size_t* buflen) {
1570
char abspath[UV__PATH_MAX];
1571
size_t abspath_size;
1572
char trypath[UV__PATH_MAX];
1573
char* cloned_path;
1574
char* path_env;
1575
char* token;
1576
char* itr;
1577
1578
if (buf == NULL || buflen == NULL || *buflen == 0)
1579
return UV_EINVAL;
1580
1581
/*
1582
* Possibilities for prog:
1583
* i) an absolute path such as: /home/user/myprojects/nodejs/node
1584
* ii) a relative path such as: ./node or ../myprojects/nodejs/node
1585
* iii) a bare filename such as "node", after exporting PATH variable
1586
* to its location.
1587
*/
1588
1589
/* Case i) and ii) absolute or relative paths */
1590
if (strchr(prog, '/') != NULL) {
1591
if (realpath(prog, abspath) != abspath)
1592
return UV__ERR(errno);
1593
1594
abspath_size = strlen(abspath);
1595
1596
*buflen -= 1;
1597
if (*buflen > abspath_size)
1598
*buflen = abspath_size;
1599
1600
memcpy(buf, abspath, *buflen);
1601
buf[*buflen] = '\0';
1602
1603
return 0;
1604
}
1605
1606
/* Case iii). Search PATH environment variable */
1607
cloned_path = NULL;
1608
token = NULL;
1609
path_env = getenv("PATH");
1610
1611
if (path_env == NULL)
1612
return UV_EINVAL;
1613
1614
cloned_path = uv__strdup(path_env);
1615
if (cloned_path == NULL)
1616
return UV_ENOMEM;
1617
1618
token = uv__strtok(cloned_path, ":", &itr);
1619
while (token != NULL) {
1620
snprintf(trypath, sizeof(trypath) - 1, "%s/%s", token, prog);
1621
if (realpath(trypath, abspath) == abspath) {
1622
/* Check the match is executable */
1623
if (access(abspath, X_OK) == 0) {
1624
abspath_size = strlen(abspath);
1625
1626
*buflen -= 1;
1627
if (*buflen > abspath_size)
1628
*buflen = abspath_size;
1629
1630
memcpy(buf, abspath, *buflen);
1631
buf[*buflen] = '\0';
1632
1633
uv__free(cloned_path);
1634
return 0;
1635
}
1636
}
1637
token = uv__strtok(NULL, ":", &itr);
1638
}
1639
uv__free(cloned_path);
1640
1641
/* Out of tokens (path entries), and no match found */
1642
return UV_EINVAL;
1643
}
1644
1645
1646
unsigned int uv_available_parallelism(void) {
1647
#ifdef __linux__
1648
cpu_set_t set;
1649
long rc;
1650
1651
memset(&set, 0, sizeof(set));
1652
1653
/* sysconf(_SC_NPROCESSORS_ONLN) in musl calls sched_getaffinity() but in
1654
* glibc it's... complicated... so for consistency try sched_getaffinity()
1655
* before falling back to sysconf(_SC_NPROCESSORS_ONLN).
1656
*/
1657
if (0 == sched_getaffinity(0, sizeof(set), &set))
1658
rc = CPU_COUNT(&set);
1659
else
1660
rc = sysconf(_SC_NPROCESSORS_ONLN);
1661
1662
if (rc < 1)
1663
rc = 1;
1664
1665
return (unsigned) rc;
1666
#elif defined(__MVS__)
1667
int rc;
1668
1669
rc = __get_num_online_cpus();
1670
if (rc < 1)
1671
rc = 1;
1672
1673
return (unsigned) rc;
1674
#else /* __linux__ */
1675
long rc;
1676
1677
rc = sysconf(_SC_NPROCESSORS_ONLN);
1678
if (rc < 1)
1679
rc = 1;
1680
1681
return (unsigned) rc;
1682
#endif /* __linux__ */
1683
}
1684
1685