Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Kitware
GitHub Repository: Kitware/CMake
Path: blob/master/Utilities/cmlibuv/src/unix/kqueue.c
3156 views
1
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2
* Permission is hereby granted, free of charge, to any person obtaining a copy
3
* of this software and associated documentation files (the "Software"), to
4
* deal in the Software without restriction, including without limitation the
5
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6
* sell copies of the Software, and to permit persons to whom the Software is
7
* furnished to do so, subject to the following conditions:
8
*
9
* The above copyright notice and this permission notice shall be included in
10
* all copies or substantial portions of the Software.
11
*
12
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18
* IN THE SOFTWARE.
19
*/
20
21
#include "uv.h"
22
#include "internal.h"
23
24
#include <assert.h>
25
#include <stdlib.h>
26
#include <string.h>
27
#include <errno.h>
28
29
#include <sys/sysctl.h>
30
#include <sys/types.h>
31
#include <sys/event.h>
32
#include <sys/time.h>
33
#include <unistd.h>
34
#include <fcntl.h>
35
#include <time.h>
36
37
/*
38
* Required on
39
* - Until at least FreeBSD 11.0
40
* - Older versions of Mac OS X
41
*
42
* http://www.boost.org/doc/libs/1_61_0/boost/asio/detail/kqueue_reactor.hpp
43
*/
44
#ifndef EV_OOBAND
45
#define EV_OOBAND EV_FLAG1
46
#endif
47
48
static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags);
49
50
51
int uv__kqueue_init(uv_loop_t* loop) {
52
loop->backend_fd = kqueue();
53
if (loop->backend_fd == -1)
54
return UV__ERR(errno);
55
56
uv__cloexec(loop->backend_fd, 1);
57
58
return 0;
59
}
60
61
62
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
63
static int uv__has_forked_with_cfrunloop;
64
#endif
65
66
int uv__io_fork(uv_loop_t* loop) {
67
int err;
68
loop->backend_fd = -1;
69
err = uv__kqueue_init(loop);
70
if (err)
71
return err;
72
73
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
74
if (loop->cf_state != NULL) {
75
/* We cannot start another CFRunloop and/or thread in the child
76
process; CF aborts if you try or if you try to touch the thread
77
at all to kill it. So the best we can do is ignore it from now
78
on. This means we can't watch directories in the same way
79
anymore (like other BSDs). It also means we cannot properly
80
clean up the allocated resources; calling
81
uv__fsevents_loop_delete from uv_loop_close will crash the
82
process. So we sidestep the issue by pretending like we never
83
started it in the first place.
84
*/
85
uv__store_relaxed(&uv__has_forked_with_cfrunloop, 1);
86
uv__free(loop->cf_state);
87
loop->cf_state = NULL;
88
}
89
#endif /* #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 */
90
return err;
91
}
92
93
94
int uv__io_check_fd(uv_loop_t* loop, int fd) {
95
struct kevent ev;
96
int rc;
97
98
rc = 0;
99
EV_SET(&ev, fd, EVFILT_READ, EV_ADD, 0, 0, 0);
100
if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
101
rc = UV__ERR(errno);
102
103
EV_SET(&ev, fd, EVFILT_READ, EV_DELETE, 0, 0, 0);
104
if (rc == 0)
105
if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
106
abort();
107
108
return rc;
109
}
110
111
112
void uv__io_poll(uv_loop_t* loop, int timeout) {
113
struct kevent events[1024];
114
struct kevent* ev;
115
struct timespec spec;
116
unsigned int nevents;
117
unsigned int revents;
118
QUEUE* q;
119
uv__io_t* w;
120
uv_process_t* process;
121
sigset_t* pset;
122
sigset_t set;
123
uint64_t base;
124
uint64_t diff;
125
int have_signals;
126
int filter;
127
int fflags;
128
int count;
129
int nfds;
130
int fd;
131
int op;
132
int i;
133
int user_timeout;
134
int reset_timeout;
135
136
if (loop->nfds == 0) {
137
assert(QUEUE_EMPTY(&loop->watcher_queue));
138
return;
139
}
140
141
nevents = 0;
142
143
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
144
q = QUEUE_HEAD(&loop->watcher_queue);
145
QUEUE_REMOVE(q);
146
QUEUE_INIT(q);
147
148
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
149
assert(w->pevents != 0);
150
assert(w->fd >= 0);
151
assert(w->fd < (int) loop->nwatchers);
152
153
if ((w->events & POLLIN) == 0 && (w->pevents & POLLIN) != 0) {
154
filter = EVFILT_READ;
155
fflags = 0;
156
op = EV_ADD;
157
158
if (w->cb == uv__fs_event) {
159
filter = EVFILT_VNODE;
160
fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
161
| NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
162
op = EV_ADD | EV_ONESHOT; /* Stop the event from firing repeatedly. */
163
}
164
165
EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0);
166
167
if (++nevents == ARRAY_SIZE(events)) {
168
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
169
abort();
170
nevents = 0;
171
}
172
}
173
174
if ((w->events & POLLOUT) == 0 && (w->pevents & POLLOUT) != 0) {
175
EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
176
177
if (++nevents == ARRAY_SIZE(events)) {
178
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
179
abort();
180
nevents = 0;
181
}
182
}
183
184
if ((w->events & UV__POLLPRI) == 0 && (w->pevents & UV__POLLPRI) != 0) {
185
EV_SET(events + nevents, w->fd, EV_OOBAND, EV_ADD, 0, 0, 0);
186
187
if (++nevents == ARRAY_SIZE(events)) {
188
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
189
abort();
190
nevents = 0;
191
}
192
}
193
194
w->events = w->pevents;
195
}
196
197
pset = NULL;
198
if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
199
pset = &set;
200
sigemptyset(pset);
201
sigaddset(pset, SIGPROF);
202
}
203
204
assert(timeout >= -1);
205
base = loop->time;
206
count = 48; /* Benchmarks suggest this gives the best throughput. */
207
208
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
209
reset_timeout = 1;
210
user_timeout = timeout;
211
timeout = 0;
212
} else {
213
reset_timeout = 0;
214
}
215
216
for (;; nevents = 0) {
217
/* Only need to set the provider_entry_time if timeout != 0. The function
218
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
219
*/
220
if (timeout != 0)
221
uv__metrics_set_provider_entry_time(loop);
222
223
if (timeout != -1) {
224
spec.tv_sec = timeout / 1000;
225
spec.tv_nsec = (timeout % 1000) * 1000000;
226
}
227
228
if (pset != NULL)
229
pthread_sigmask(SIG_BLOCK, pset, NULL);
230
231
nfds = kevent(loop->backend_fd,
232
events,
233
nevents,
234
events,
235
ARRAY_SIZE(events),
236
timeout == -1 ? NULL : &spec);
237
238
if (nfds == -1)
239
assert(errno == EINTR);
240
241
if (pset != NULL)
242
pthread_sigmask(SIG_UNBLOCK, pset, NULL);
243
244
/* Update loop->time unconditionally. It's tempting to skip the update when
245
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
246
* operating system didn't reschedule our process while in the syscall.
247
*/
248
uv__update_time(loop);
249
250
if (nfds == 0 || nfds == -1) {
251
/* If kqueue is empty or interrupted, we might still have children ready
252
* to reap immediately. */
253
if (loop->flags & UV_LOOP_REAP_CHILDREN) {
254
loop->flags &= ~UV_LOOP_REAP_CHILDREN;
255
uv__wait_children(loop);
256
assert((reset_timeout == 0 ? timeout : user_timeout) == 0);
257
return; /* Equivalent to fall-through behavior. */
258
}
259
260
if (reset_timeout != 0) {
261
timeout = user_timeout;
262
reset_timeout = 0;
263
} else if (nfds == 0) {
264
/* Reached the user timeout value. */
265
assert(timeout != -1);
266
return;
267
}
268
269
/* Interrupted by a signal. Update timeout and poll again. */
270
goto update_timeout;
271
}
272
273
have_signals = 0;
274
nevents = 0;
275
276
assert(loop->watchers != NULL);
277
loop->watchers[loop->nwatchers] = (void*) events;
278
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
279
for (i = 0; i < nfds; i++) {
280
ev = events + i;
281
fd = ev->ident;
282
283
/* Handle kevent NOTE_EXIT results */
284
if (ev->filter == EVFILT_PROC) {
285
QUEUE_FOREACH(q, &loop->process_handles) {
286
process = QUEUE_DATA(q, uv_process_t, queue);
287
if (process->pid == fd) {
288
process->flags |= UV_HANDLE_REAP;
289
loop->flags |= UV_LOOP_REAP_CHILDREN;
290
break;
291
}
292
}
293
nevents++;
294
continue;
295
}
296
297
/* Skip invalidated events, see uv__platform_invalidate_fd */
298
if (fd == -1)
299
continue;
300
w = loop->watchers[fd];
301
302
if (w == NULL) {
303
/* File descriptor that we've stopped watching, disarm it.
304
* TODO: batch up. */
305
struct kevent events[1];
306
307
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
308
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
309
if (errno != EBADF && errno != ENOENT)
310
abort();
311
312
continue;
313
}
314
315
if (ev->filter == EVFILT_VNODE) {
316
assert(w->events == POLLIN);
317
assert(w->pevents == POLLIN);
318
uv__metrics_update_idle_time(loop);
319
w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */
320
nevents++;
321
continue;
322
}
323
324
revents = 0;
325
326
if (ev->filter == EVFILT_READ) {
327
if (w->pevents & POLLIN) {
328
revents |= POLLIN;
329
w->rcount = ev->data;
330
} else {
331
/* TODO batch up */
332
struct kevent events[1];
333
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
334
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
335
if (errno != ENOENT)
336
abort();
337
}
338
if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP))
339
revents |= UV__POLLRDHUP;
340
}
341
342
if (ev->filter == EV_OOBAND) {
343
if (w->pevents & UV__POLLPRI) {
344
revents |= UV__POLLPRI;
345
w->rcount = ev->data;
346
} else {
347
/* TODO batch up */
348
struct kevent events[1];
349
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
350
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
351
if (errno != ENOENT)
352
abort();
353
}
354
}
355
356
if (ev->filter == EVFILT_WRITE) {
357
if (w->pevents & POLLOUT) {
358
revents |= POLLOUT;
359
w->wcount = ev->data;
360
} else {
361
/* TODO batch up */
362
struct kevent events[1];
363
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
364
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
365
if (errno != ENOENT)
366
abort();
367
}
368
}
369
370
if (ev->flags & EV_ERROR)
371
revents |= POLLERR;
372
373
if (revents == 0)
374
continue;
375
376
/* Run signal watchers last. This also affects child process watchers
377
* because those are implemented in terms of signal watchers.
378
*/
379
if (w == &loop->signal_io_watcher) {
380
have_signals = 1;
381
} else {
382
uv__metrics_update_idle_time(loop);
383
w->cb(loop, w, revents);
384
}
385
386
nevents++;
387
}
388
389
if (loop->flags & UV_LOOP_REAP_CHILDREN) {
390
loop->flags &= ~UV_LOOP_REAP_CHILDREN;
391
uv__wait_children(loop);
392
}
393
394
if (reset_timeout != 0) {
395
timeout = user_timeout;
396
reset_timeout = 0;
397
}
398
399
if (have_signals != 0) {
400
uv__metrics_update_idle_time(loop);
401
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
402
}
403
404
loop->watchers[loop->nwatchers] = NULL;
405
loop->watchers[loop->nwatchers + 1] = NULL;
406
407
if (have_signals != 0)
408
return; /* Event loop should cycle now so don't poll again. */
409
410
if (nevents != 0) {
411
if (nfds == ARRAY_SIZE(events) && --count != 0) {
412
/* Poll for more events but don't block this time. */
413
timeout = 0;
414
continue;
415
}
416
return;
417
}
418
419
update_timeout:
420
if (timeout == 0)
421
return;
422
423
if (timeout == -1)
424
continue;
425
426
assert(timeout > 0);
427
428
diff = loop->time - base;
429
if (diff >= (uint64_t) timeout)
430
return;
431
432
timeout -= diff;
433
}
434
}
435
436
437
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
438
struct kevent* events;
439
uintptr_t i;
440
uintptr_t nfds;
441
442
assert(loop->watchers != NULL);
443
assert(fd >= 0);
444
445
events = (struct kevent*) loop->watchers[loop->nwatchers];
446
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
447
if (events == NULL)
448
return;
449
450
/* Invalidate events with same file descriptor */
451
for (i = 0; i < nfds; i++)
452
if ((int) events[i].ident == fd && events[i].filter != EVFILT_PROC)
453
events[i].ident = -1;
454
}
455
456
457
static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) {
458
uv_fs_event_t* handle;
459
struct kevent ev;
460
int events;
461
const char* path;
462
#if defined(F_GETPATH)
463
/* MAXPATHLEN == PATH_MAX but the former is what XNU calls it internally. */
464
char pathbuf[MAXPATHLEN];
465
#endif
466
467
handle = container_of(w, uv_fs_event_t, event_watcher);
468
469
if (fflags & (NOTE_ATTRIB | NOTE_EXTEND))
470
events = UV_CHANGE;
471
else
472
events = UV_RENAME;
473
474
path = NULL;
475
#if defined(F_GETPATH)
476
/* Also works when the file has been unlinked from the file system. Passing
477
* in the path when the file has been deleted is arguably a little strange
478
* but it's consistent with what the inotify backend does.
479
*/
480
if (fcntl(handle->event_watcher.fd, F_GETPATH, pathbuf) == 0)
481
path = uv__basename_r(pathbuf);
482
#endif
483
handle->cb(handle, path, events, 0);
484
485
if (handle->event_watcher.fd == -1)
486
return;
487
488
/* Watcher operates in one-shot mode, re-arm it. */
489
fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
490
| NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
491
492
EV_SET(&ev, w->fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, fflags, 0, 0);
493
494
if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
495
abort();
496
}
497
498
499
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
500
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
501
return 0;
502
}
503
504
505
int uv_fs_event_start(uv_fs_event_t* handle,
506
uv_fs_event_cb cb,
507
const char* path,
508
unsigned int flags) {
509
int fd;
510
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
511
struct stat statbuf;
512
#endif
513
514
if (uv__is_active(handle))
515
return UV_EINVAL;
516
517
handle->cb = cb;
518
handle->path = uv__strdup(path);
519
if (handle->path == NULL)
520
return UV_ENOMEM;
521
522
/* TODO open asynchronously - but how do we report back errors? */
523
fd = open(handle->path, O_RDONLY);
524
if (fd == -1) {
525
uv__free(handle->path);
526
handle->path = NULL;
527
return UV__ERR(errno);
528
}
529
530
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
531
/* Nullify field to perform checks later */
532
handle->cf_cb = NULL;
533
handle->realpath = NULL;
534
handle->realpath_len = 0;
535
handle->cf_flags = flags;
536
537
if (fstat(fd, &statbuf))
538
goto fallback;
539
/* FSEvents works only with directories */
540
if (!(statbuf.st_mode & S_IFDIR))
541
goto fallback;
542
543
if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop)) {
544
int r;
545
/* The fallback fd is no longer needed */
546
uv__close_nocheckstdio(fd);
547
handle->event_watcher.fd = -1;
548
r = uv__fsevents_init(handle);
549
if (r == 0) {
550
uv__handle_start(handle);
551
} else {
552
uv__free(handle->path);
553
handle->path = NULL;
554
}
555
return r;
556
}
557
fallback:
558
#endif /* #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 */
559
560
uv__handle_start(handle);
561
uv__io_init(&handle->event_watcher, uv__fs_event, fd);
562
uv__io_start(handle->loop, &handle->event_watcher, POLLIN);
563
564
return 0;
565
}
566
567
568
int uv_fs_event_stop(uv_fs_event_t* handle) {
569
int r;
570
r = 0;
571
572
if (!uv__is_active(handle))
573
return 0;
574
575
uv__handle_stop(handle);
576
577
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
578
if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop))
579
if (handle->cf_cb != NULL)
580
r = uv__fsevents_close(handle);
581
#endif
582
583
if (handle->event_watcher.fd != -1) {
584
uv__io_close(handle->loop, &handle->event_watcher);
585
uv__close(handle->event_watcher.fd);
586
handle->event_watcher.fd = -1;
587
}
588
589
uv__free(handle->path);
590
handle->path = NULL;
591
592
return r;
593
}
594
595
596
void uv__fs_event_close(uv_fs_event_t* handle) {
597
uv_fs_event_stop(handle);
598
}
599
600