Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Kitware
GitHub Repository: Kitware/CMake
Path: blob/master/Utilities/cmlibuv/src/unix/aix.c
3156 views
1
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2
*
3
* Permission is hereby granted, free of charge, to any person obtaining a copy
4
* of this software and associated documentation files (the "Software"), to
5
* deal in the Software without restriction, including without limitation the
6
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7
* sell copies of the Software, and to permit persons to whom the Software is
8
* furnished to do so, subject to the following conditions:
9
*
10
* The above copyright notice and this permission notice shall be included in
11
* all copies or substantial portions of the Software.
12
*
13
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19
* IN THE SOFTWARE.
20
*/
21
22
#include "uv.h"
23
#include "internal.h"
24
25
#include <stdio.h>
26
#include <stdint.h>
27
#include <stdlib.h>
28
#include <string.h>
29
#include <assert.h>
30
#include <errno.h>
31
32
#include <sys/types.h>
33
#include <sys/socket.h>
34
#include <sys/ioctl.h>
35
#include <net/if.h>
36
#include <netinet/in.h>
37
#include <arpa/inet.h>
38
39
#include <sys/time.h>
40
#include <unistd.h>
41
#include <fcntl.h>
42
#include <utmp.h>
43
#include <libgen.h>
44
45
#include <sys/protosw.h>
46
#include <libperfstat.h>
47
#include <procinfo.h>
48
#include <sys/proc.h>
49
#include <sys/procfs.h>
50
51
#include <sys/poll.h>
52
53
#include <sys/pollset.h>
54
#include <ctype.h>
55
#ifdef HAVE_SYS_AHAFS_EVPRODS_H
56
#include <sys/ahafs_evProds.h>
57
#endif
58
59
#include <sys/mntctl.h>
60
#include <sys/vmount.h>
61
#include <limits.h>
62
#include <strings.h>
63
#include <sys/vnode.h>
64
65
#define RDWR_BUF_SIZE 4096
66
#define EQ(a,b) (strcmp(a,b) == 0)
67
68
char* original_exepath = NULL;
69
uv_mutex_t process_title_mutex;
70
uv_once_t process_title_mutex_once = UV_ONCE_INIT;
71
static void* args_mem = NULL;
72
static char** process_argv = NULL;
73
static int process_argc = 0;
74
static char* process_title_ptr = NULL;
75
76
void init_process_title_mutex_once(void) {
77
uv_mutex_init(&process_title_mutex);
78
}
79
80
81
int uv__platform_loop_init(uv_loop_t* loop) {
82
loop->fs_fd = -1;
83
84
/* Passing maxfd of -1 should mean the limit is determined
85
* by the user's ulimit or the global limit as per the doc */
86
loop->backend_fd = pollset_create(-1);
87
88
if (loop->backend_fd == -1)
89
return -1;
90
91
return 0;
92
}
93
94
95
void uv__platform_loop_delete(uv_loop_t* loop) {
96
if (loop->fs_fd != -1) {
97
uv__close(loop->fs_fd);
98
loop->fs_fd = -1;
99
}
100
101
if (loop->backend_fd != -1) {
102
pollset_destroy(loop->backend_fd);
103
loop->backend_fd = -1;
104
}
105
}
106
107
108
int uv__io_fork(uv_loop_t* loop) {
109
uv__platform_loop_delete(loop);
110
111
return uv__platform_loop_init(loop);
112
}
113
114
115
int uv__io_check_fd(uv_loop_t* loop, int fd) {
116
struct poll_ctl pc;
117
118
pc.events = POLLIN;
119
pc.cmd = PS_MOD; /* Equivalent to PS_ADD if the fd is not in the pollset. */
120
pc.fd = fd;
121
122
if (pollset_ctl(loop->backend_fd, &pc, 1))
123
return UV__ERR(errno);
124
125
pc.cmd = PS_DELETE;
126
if (pollset_ctl(loop->backend_fd, &pc, 1))
127
abort();
128
129
return 0;
130
}
131
132
133
void uv__io_poll(uv_loop_t* loop, int timeout) {
134
struct pollfd events[1024];
135
struct pollfd pqry;
136
struct pollfd* pe;
137
struct poll_ctl pc;
138
QUEUE* q;
139
uv__io_t* w;
140
uint64_t base;
141
uint64_t diff;
142
int have_signals;
143
int nevents;
144
int count;
145
int nfds;
146
int i;
147
int rc;
148
int add_failed;
149
int user_timeout;
150
int reset_timeout;
151
152
if (loop->nfds == 0) {
153
assert(QUEUE_EMPTY(&loop->watcher_queue));
154
return;
155
}
156
157
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
158
q = QUEUE_HEAD(&loop->watcher_queue);
159
QUEUE_REMOVE(q);
160
QUEUE_INIT(q);
161
162
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
163
assert(w->pevents != 0);
164
assert(w->fd >= 0);
165
assert(w->fd < (int) loop->nwatchers);
166
167
pc.events = w->pevents;
168
pc.fd = w->fd;
169
170
add_failed = 0;
171
if (w->events == 0) {
172
pc.cmd = PS_ADD;
173
if (pollset_ctl(loop->backend_fd, &pc, 1)) {
174
if (errno != EINVAL) {
175
assert(0 && "Failed to add file descriptor (pc.fd) to pollset");
176
abort();
177
}
178
/* Check if the fd is already in the pollset */
179
pqry.fd = pc.fd;
180
rc = pollset_query(loop->backend_fd, &pqry);
181
switch (rc) {
182
case -1:
183
assert(0 && "Failed to query pollset for file descriptor");
184
abort();
185
case 0:
186
assert(0 && "Pollset does not contain file descriptor");
187
abort();
188
}
189
/* If we got here then the pollset already contained the file descriptor even though
190
* we didn't think it should. This probably shouldn't happen, but we can continue. */
191
add_failed = 1;
192
}
193
}
194
if (w->events != 0 || add_failed) {
195
/* Modify, potentially removing events -- need to delete then add.
196
* Could maybe mod if we knew for sure no events are removed, but
197
* content of w->events is handled above as not reliable (falls back)
198
* so may require a pollset_query() which would have to be pretty cheap
199
* compared to a PS_DELETE to be worth optimizing. Alternatively, could
200
* lazily remove events, squelching them in the mean time. */
201
pc.cmd = PS_DELETE;
202
if (pollset_ctl(loop->backend_fd, &pc, 1)) {
203
assert(0 && "Failed to delete file descriptor (pc.fd) from pollset");
204
abort();
205
}
206
pc.cmd = PS_ADD;
207
if (pollset_ctl(loop->backend_fd, &pc, 1)) {
208
assert(0 && "Failed to add file descriptor (pc.fd) to pollset");
209
abort();
210
}
211
}
212
213
w->events = w->pevents;
214
}
215
216
assert(timeout >= -1);
217
base = loop->time;
218
count = 48; /* Benchmarks suggest this gives the best throughput. */
219
220
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
221
reset_timeout = 1;
222
user_timeout = timeout;
223
timeout = 0;
224
} else {
225
reset_timeout = 0;
226
}
227
228
for (;;) {
229
/* Only need to set the provider_entry_time if timeout != 0. The function
230
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
231
*/
232
if (timeout != 0)
233
uv__metrics_set_provider_entry_time(loop);
234
235
nfds = pollset_poll(loop->backend_fd,
236
events,
237
ARRAY_SIZE(events),
238
timeout);
239
240
/* Update loop->time unconditionally. It's tempting to skip the update when
241
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
242
* operating system didn't reschedule our process while in the syscall.
243
*/
244
SAVE_ERRNO(uv__update_time(loop));
245
246
if (nfds == 0) {
247
if (reset_timeout != 0) {
248
timeout = user_timeout;
249
reset_timeout = 0;
250
if (timeout == -1)
251
continue;
252
if (timeout > 0)
253
goto update_timeout;
254
}
255
256
assert(timeout != -1);
257
return;
258
}
259
260
if (nfds == -1) {
261
if (errno != EINTR) {
262
abort();
263
}
264
265
if (reset_timeout != 0) {
266
timeout = user_timeout;
267
reset_timeout = 0;
268
}
269
270
if (timeout == -1)
271
continue;
272
273
if (timeout == 0)
274
return;
275
276
/* Interrupted by a signal. Update timeout and poll again. */
277
goto update_timeout;
278
}
279
280
have_signals = 0;
281
nevents = 0;
282
283
assert(loop->watchers != NULL);
284
loop->watchers[loop->nwatchers] = (void*) events;
285
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
286
287
for (i = 0; i < nfds; i++) {
288
pe = events + i;
289
pc.cmd = PS_DELETE;
290
pc.fd = pe->fd;
291
292
/* Skip invalidated events, see uv__platform_invalidate_fd */
293
if (pc.fd == -1)
294
continue;
295
296
assert(pc.fd >= 0);
297
assert((unsigned) pc.fd < loop->nwatchers);
298
299
w = loop->watchers[pc.fd];
300
301
if (w == NULL) {
302
/* File descriptor that we've stopped watching, disarm it.
303
*
304
* Ignore all errors because we may be racing with another thread
305
* when the file descriptor is closed.
306
*/
307
pollset_ctl(loop->backend_fd, &pc, 1);
308
continue;
309
}
310
311
/* Run signal watchers last. This also affects child process watchers
312
* because those are implemented in terms of signal watchers.
313
*/
314
if (w == &loop->signal_io_watcher) {
315
have_signals = 1;
316
} else {
317
uv__metrics_update_idle_time(loop);
318
w->cb(loop, w, pe->revents);
319
}
320
321
nevents++;
322
}
323
324
if (reset_timeout != 0) {
325
timeout = user_timeout;
326
reset_timeout = 0;
327
}
328
329
if (have_signals != 0) {
330
uv__metrics_update_idle_time(loop);
331
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
332
}
333
334
loop->watchers[loop->nwatchers] = NULL;
335
loop->watchers[loop->nwatchers + 1] = NULL;
336
337
if (have_signals != 0)
338
return; /* Event loop should cycle now so don't poll again. */
339
340
if (nevents != 0) {
341
if (nfds == ARRAY_SIZE(events) && --count != 0) {
342
/* Poll for more events but don't block this time. */
343
timeout = 0;
344
continue;
345
}
346
return;
347
}
348
349
if (timeout == 0)
350
return;
351
352
if (timeout == -1)
353
continue;
354
355
update_timeout:
356
assert(timeout > 0);
357
358
diff = loop->time - base;
359
if (diff >= (uint64_t) timeout)
360
return;
361
362
timeout -= diff;
363
}
364
}
365
366
367
uint64_t uv_get_free_memory(void) {
368
perfstat_memory_total_t mem_total;
369
int result = perfstat_memory_total(NULL, &mem_total, sizeof(mem_total), 1);
370
if (result == -1) {
371
return 0;
372
}
373
return mem_total.real_free * 4096;
374
}
375
376
377
uint64_t uv_get_total_memory(void) {
378
perfstat_memory_total_t mem_total;
379
int result = perfstat_memory_total(NULL, &mem_total, sizeof(mem_total), 1);
380
if (result == -1) {
381
return 0;
382
}
383
return mem_total.real_total * 4096;
384
}
385
386
387
uint64_t uv_get_constrained_memory(void) {
388
return 0; /* Memory constraints are unknown. */
389
}
390
391
392
void uv_loadavg(double avg[3]) {
393
perfstat_cpu_total_t ps_total;
394
int result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1);
395
if (result == -1) {
396
avg[0] = 0.; avg[1] = 0.; avg[2] = 0.;
397
return;
398
}
399
avg[0] = ps_total.loadavg[0] / (double)(1 << SBITS);
400
avg[1] = ps_total.loadavg[1] / (double)(1 << SBITS);
401
avg[2] = ps_total.loadavg[2] / (double)(1 << SBITS);
402
}
403
404
405
#ifdef HAVE_SYS_AHAFS_EVPRODS_H
406
static char* uv__rawname(const char* cp, char (*dst)[FILENAME_MAX+1]) {
407
char* dp;
408
409
dp = rindex(cp, '/');
410
if (dp == 0)
411
return 0;
412
413
snprintf(*dst, sizeof(*dst), "%.*s/r%s", (int) (dp - cp), cp, dp + 1);
414
return *dst;
415
}
416
417
418
/*
419
* Determine whether given pathname is a directory
420
* Returns 0 if the path is a directory, -1 if not
421
*
422
* Note: Opportunity here for more detailed error information but
423
* that requires changing callers of this function as well
424
*/
425
static int uv__path_is_a_directory(char* filename) {
426
struct stat statbuf;
427
428
if (stat(filename, &statbuf) < 0)
429
return -1; /* failed: not a directory, assume it is a file */
430
431
if (statbuf.st_type == VDIR)
432
return 0;
433
434
return -1;
435
}
436
437
438
/*
439
* Check whether AHAFS is mounted.
440
* Returns 0 if AHAFS is mounted, or an error code < 0 on failure
441
*/
442
static int uv__is_ahafs_mounted(void){
443
char rawbuf[FILENAME_MAX+1];
444
int rv, i = 2;
445
struct vmount *p;
446
int size_multiplier = 10;
447
size_t siz = sizeof(struct vmount)*size_multiplier;
448
struct vmount *vmt;
449
const char *dev = "/aha";
450
char *obj, *stub;
451
452
p = uv__malloc(siz);
453
if (p == NULL)
454
return UV__ERR(errno);
455
456
/* Retrieve all mounted filesystems */
457
rv = mntctl(MCTL_QUERY, siz, (char*)p);
458
if (rv < 0)
459
return UV__ERR(errno);
460
if (rv == 0) {
461
/* buffer was not large enough, reallocate to correct size */
462
siz = *(int*)p;
463
uv__free(p);
464
p = uv__malloc(siz);
465
if (p == NULL)
466
return UV__ERR(errno);
467
rv = mntctl(MCTL_QUERY, siz, (char*)p);
468
if (rv < 0)
469
return UV__ERR(errno);
470
}
471
472
/* Look for dev in filesystems mount info */
473
for(vmt = p, i = 0; i < rv; i++) {
474
obj = vmt2dataptr(vmt, VMT_OBJECT); /* device */
475
stub = vmt2dataptr(vmt, VMT_STUB); /* mount point */
476
477
if (EQ(obj, dev) || EQ(uv__rawname(obj, &rawbuf), dev) || EQ(stub, dev)) {
478
uv__free(p); /* Found a match */
479
return 0;
480
}
481
vmt = (struct vmount *) ((char *) vmt + vmt->vmt_length);
482
}
483
484
/* /aha is required for monitoring filesystem changes */
485
return -1;
486
}
487
488
/*
489
* Recursive call to mkdir() to create intermediate folders, if any
490
* Returns code from mkdir call
491
*/
492
static int uv__makedir_p(const char *dir) {
493
char tmp[256];
494
char *p = NULL;
495
size_t len;
496
int err;
497
498
/* TODO(bnoordhuis) Check uv__strscpy() return value. */
499
uv__strscpy(tmp, dir, sizeof(tmp));
500
len = strlen(tmp);
501
if (tmp[len - 1] == '/')
502
tmp[len - 1] = 0;
503
for (p = tmp + 1; *p; p++) {
504
if (*p == '/') {
505
*p = 0;
506
err = mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
507
if (err != 0 && errno != EEXIST)
508
return err;
509
*p = '/';
510
}
511
}
512
return mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
513
}
514
515
/*
516
* Creates necessary subdirectories in the AIX Event Infrastructure
517
* file system for monitoring the object specified.
518
* Returns code from mkdir call
519
*/
520
static int uv__make_subdirs_p(const char *filename) {
521
char cmd[2048];
522
char *p;
523
int rc = 0;
524
525
/* Strip off the monitor file name */
526
p = strrchr(filename, '/');
527
528
if (p == NULL)
529
return 0;
530
531
if (uv__path_is_a_directory((char*)filename) == 0) {
532
sprintf(cmd, "/aha/fs/modDir.monFactory");
533
} else {
534
sprintf(cmd, "/aha/fs/modFile.monFactory");
535
}
536
537
strncat(cmd, filename, (p - filename));
538
rc = uv__makedir_p(cmd);
539
540
if (rc == -1 && errno != EEXIST){
541
return UV__ERR(errno);
542
}
543
544
return rc;
545
}
546
547
548
/*
549
* Checks if /aha is mounted, then proceeds to set up the monitoring
550
* objects for the specified file.
551
* Returns 0 on success, or an error code < 0 on failure
552
*/
553
static int uv__setup_ahafs(const char* filename, int *fd) {
554
int rc = 0;
555
char mon_file_write_string[RDWR_BUF_SIZE];
556
char mon_file[PATH_MAX];
557
int file_is_directory = 0; /* -1 == NO, 0 == YES */
558
559
/* Create monitor file name for object */
560
file_is_directory = uv__path_is_a_directory((char*)filename);
561
562
if (file_is_directory == 0)
563
sprintf(mon_file, "/aha/fs/modDir.monFactory");
564
else
565
sprintf(mon_file, "/aha/fs/modFile.monFactory");
566
567
if ((strlen(mon_file) + strlen(filename) + 5) > PATH_MAX)
568
return UV_ENAMETOOLONG;
569
570
/* Make the necessary subdirectories for the monitor file */
571
rc = uv__make_subdirs_p(filename);
572
if (rc == -1 && errno != EEXIST)
573
return rc;
574
575
strcat(mon_file, filename);
576
strcat(mon_file, ".mon");
577
578
*fd = 0; errno = 0;
579
580
/* Open the monitor file, creating it if necessary */
581
*fd = open(mon_file, O_CREAT|O_RDWR);
582
if (*fd < 0)
583
return UV__ERR(errno);
584
585
/* Write out the monitoring specifications.
586
* In this case, we are monitoring for a state change event type
587
* CHANGED=YES
588
* We will be waiting in select call, rather than a read:
589
* WAIT_TYPE=WAIT_IN_SELECT
590
* We only want minimal information for files:
591
* INFO_LVL=1
592
* For directories, we want more information to track what file
593
* caused the change
594
* INFO_LVL=2
595
*/
596
597
if (file_is_directory == 0)
598
sprintf(mon_file_write_string, "CHANGED=YES;WAIT_TYPE=WAIT_IN_SELECT;INFO_LVL=2");
599
else
600
sprintf(mon_file_write_string, "CHANGED=YES;WAIT_TYPE=WAIT_IN_SELECT;INFO_LVL=1");
601
602
rc = write(*fd, mon_file_write_string, strlen(mon_file_write_string)+1);
603
if (rc < 0 && errno != EBUSY)
604
return UV__ERR(errno);
605
606
return 0;
607
}
608
609
/*
610
* Skips a specified number of lines in the buffer passed in.
611
* Walks the buffer pointed to by p and attempts to skip n lines.
612
* Returns the total number of lines skipped
613
*/
614
static int uv__skip_lines(char **p, int n) {
615
int lines = 0;
616
617
while(n > 0) {
618
*p = strchr(*p, '\n');
619
if (!p)
620
return lines;
621
622
(*p)++;
623
n--;
624
lines++;
625
}
626
return lines;
627
}
628
629
630
/*
631
* Parse the event occurrence data to figure out what event just occurred
632
* and take proper action.
633
*
634
* The buf is a pointer to the buffer containing the event occurrence data
635
* Returns 0 on success, -1 if unrecoverable error in parsing
636
*
637
*/
638
static int uv__parse_data(char *buf, int *events, uv_fs_event_t* handle) {
639
int evp_rc, i;
640
char *p;
641
char filename[PATH_MAX]; /* To be used when handling directories */
642
643
p = buf;
644
*events = 0;
645
646
/* Clean the filename buffer*/
647
for(i = 0; i < PATH_MAX; i++) {
648
filename[i] = 0;
649
}
650
i = 0;
651
652
/* Check for BUF_WRAP */
653
if (strncmp(buf, "BUF_WRAP", strlen("BUF_WRAP")) == 0) {
654
assert(0 && "Buffer wrap detected, Some event occurrences lost!");
655
return 0;
656
}
657
658
/* Since we are using the default buffer size (4K), and have specified
659
* INFO_LVL=1, we won't see any EVENT_OVERFLOW conditions. Applications
660
* should check for this keyword if they are using an INFO_LVL of 2 or
661
* higher, and have a buffer size of <= 4K
662
*/
663
664
/* Skip to RC_FROM_EVPROD */
665
if (uv__skip_lines(&p, 9) != 9)
666
return -1;
667
668
if (sscanf(p, "RC_FROM_EVPROD=%d\nEND_EVENT_DATA", &evp_rc) == 1) {
669
if (uv__path_is_a_directory(handle->path) == 0) { /* Directory */
670
if (evp_rc == AHAFS_MODDIR_UNMOUNT || evp_rc == AHAFS_MODDIR_REMOVE_SELF) {
671
/* The directory is no longer available for monitoring */
672
*events = UV_RENAME;
673
handle->dir_filename = NULL;
674
} else {
675
/* A file was added/removed inside the directory */
676
*events = UV_CHANGE;
677
678
/* Get the EVPROD_INFO */
679
if (uv__skip_lines(&p, 1) != 1)
680
return -1;
681
682
/* Scan out the name of the file that triggered the event*/
683
if (sscanf(p, "BEGIN_EVPROD_INFO\n%sEND_EVPROD_INFO", filename) == 1) {
684
handle->dir_filename = uv__strdup((const char*)&filename);
685
} else
686
return -1;
687
}
688
} else { /* Regular File */
689
if (evp_rc == AHAFS_MODFILE_RENAME)
690
*events = UV_RENAME;
691
else
692
*events = UV_CHANGE;
693
}
694
}
695
else
696
return -1;
697
698
return 0;
699
}
700
701
702
/* This is the internal callback */
703
static void uv__ahafs_event(uv_loop_t* loop, uv__io_t* event_watch, unsigned int fflags) {
704
char result_data[RDWR_BUF_SIZE];
705
int bytes, rc = 0;
706
uv_fs_event_t* handle;
707
int events = 0;
708
char fname[PATH_MAX];
709
char *p;
710
711
handle = container_of(event_watch, uv_fs_event_t, event_watcher);
712
713
/* At this point, we assume that polling has been done on the
714
* file descriptor, so we can just read the AHAFS event occurrence
715
* data and parse its results without having to block anything
716
*/
717
bytes = pread(event_watch->fd, result_data, RDWR_BUF_SIZE, 0);
718
719
assert((bytes >= 0) && "uv__ahafs_event - Error reading monitor file");
720
721
/* In file / directory move cases, AIX Event infrastructure
722
* produces a second event with no data.
723
* Ignore it and return gracefully.
724
*/
725
if(bytes == 0)
726
return;
727
728
/* Parse the data */
729
if(bytes > 0)
730
rc = uv__parse_data(result_data, &events, handle);
731
732
/* Unrecoverable error */
733
if (rc == -1)
734
return;
735
736
/* For directory changes, the name of the files that triggered the change
737
* are never absolute pathnames
738
*/
739
if (uv__path_is_a_directory(handle->path) == 0) {
740
p = handle->dir_filename;
741
} else {
742
p = strrchr(handle->path, '/');
743
if (p == NULL)
744
p = handle->path;
745
else
746
p++;
747
}
748
749
/* TODO(bnoordhuis) Check uv__strscpy() return value. */
750
uv__strscpy(fname, p, sizeof(fname));
751
752
handle->cb(handle, fname, events, 0);
753
}
754
#endif
755
756
757
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
758
#ifdef HAVE_SYS_AHAFS_EVPRODS_H
759
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
760
return 0;
761
#else
762
return UV_ENOSYS;
763
#endif
764
}
765
766
767
int uv_fs_event_start(uv_fs_event_t* handle,
768
uv_fs_event_cb cb,
769
const char* filename,
770
unsigned int flags) {
771
#ifdef HAVE_SYS_AHAFS_EVPRODS_H
772
int fd, rc, str_offset = 0;
773
char cwd[PATH_MAX];
774
char absolute_path[PATH_MAX];
775
char readlink_cwd[PATH_MAX];
776
struct timeval zt;
777
fd_set pollfd;
778
779
780
/* Figure out whether filename is absolute or not */
781
if (filename[0] == '\0') {
782
/* Missing a pathname */
783
return UV_ENOENT;
784
}
785
else if (filename[0] == '/') {
786
/* We have absolute pathname */
787
/* TODO(bnoordhuis) Check uv__strscpy() return value. */
788
uv__strscpy(absolute_path, filename, sizeof(absolute_path));
789
} else {
790
/* We have a relative pathname, compose the absolute pathname */
791
snprintf(cwd, sizeof(cwd), "/proc/%lu/cwd", (unsigned long) getpid());
792
rc = readlink(cwd, readlink_cwd, sizeof(readlink_cwd) - 1);
793
if (rc < 0)
794
return rc;
795
/* readlink does not null terminate our string */
796
readlink_cwd[rc] = '\0';
797
798
if (filename[0] == '.' && filename[1] == '/')
799
str_offset = 2;
800
801
snprintf(absolute_path, sizeof(absolute_path), "%s%s", readlink_cwd,
802
filename + str_offset);
803
}
804
805
if (uv__is_ahafs_mounted() < 0) /* /aha checks failed */
806
return UV_ENOSYS;
807
808
/* Setup ahafs */
809
rc = uv__setup_ahafs((const char *)absolute_path, &fd);
810
if (rc != 0)
811
return rc;
812
813
/* Setup/Initialize all the libuv routines */
814
uv__handle_start(handle);
815
uv__io_init(&handle->event_watcher, uv__ahafs_event, fd);
816
handle->path = uv__strdup(filename);
817
handle->cb = cb;
818
handle->dir_filename = NULL;
819
820
uv__io_start(handle->loop, &handle->event_watcher, POLLIN);
821
822
/* AHAFS wants someone to poll for it to start mointoring.
823
* so kick-start it so that we don't miss an event in the
824
* eventuality of an event that occurs in the current loop. */
825
do {
826
memset(&zt, 0, sizeof(zt));
827
FD_ZERO(&pollfd);
828
FD_SET(fd, &pollfd);
829
rc = select(fd + 1, &pollfd, NULL, NULL, &zt);
830
} while (rc == -1 && errno == EINTR);
831
return 0;
832
#else
833
return UV_ENOSYS;
834
#endif
835
}
836
837
838
int uv_fs_event_stop(uv_fs_event_t* handle) {
839
#ifdef HAVE_SYS_AHAFS_EVPRODS_H
840
if (!uv__is_active(handle))
841
return 0;
842
843
uv__io_close(handle->loop, &handle->event_watcher);
844
uv__handle_stop(handle);
845
846
if (uv__path_is_a_directory(handle->path) == 0) {
847
uv__free(handle->dir_filename);
848
handle->dir_filename = NULL;
849
}
850
851
uv__free(handle->path);
852
handle->path = NULL;
853
uv__close(handle->event_watcher.fd);
854
handle->event_watcher.fd = -1;
855
856
return 0;
857
#else
858
return UV_ENOSYS;
859
#endif
860
}
861
862
863
void uv__fs_event_close(uv_fs_event_t* handle) {
864
#ifdef HAVE_SYS_AHAFS_EVPRODS_H
865
uv_fs_event_stop(handle);
866
#else
867
UNREACHABLE();
868
#endif
869
}
870
871
872
char** uv_setup_args(int argc, char** argv) {
873
char exepath[UV__PATH_MAX];
874
char** new_argv;
875
size_t size;
876
char* s;
877
int i;
878
879
if (argc <= 0)
880
return argv;
881
882
/* Save the original pointer to argv.
883
* AIX uses argv to read the process name.
884
* (Not the memory pointed to by argv[0..n] as on Linux.)
885
*/
886
process_argv = argv;
887
process_argc = argc;
888
889
/* Use argv[0] to determine value for uv_exepath(). */
890
size = sizeof(exepath);
891
if (uv__search_path(argv[0], exepath, &size) == 0) {
892
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
893
uv_mutex_lock(&process_title_mutex);
894
original_exepath = uv__strdup(exepath);
895
uv_mutex_unlock(&process_title_mutex);
896
}
897
898
/* Calculate how much memory we need for the argv strings. */
899
size = 0;
900
for (i = 0; i < argc; i++)
901
size += strlen(argv[i]) + 1;
902
903
/* Add space for the argv pointers. */
904
size += (argc + 1) * sizeof(char*);
905
906
new_argv = uv__malloc(size);
907
if (new_argv == NULL)
908
return argv;
909
args_mem = new_argv;
910
911
/* Copy over the strings and set up the pointer table. */
912
s = (char*) &new_argv[argc + 1];
913
for (i = 0; i < argc; i++) {
914
size = strlen(argv[i]) + 1;
915
memcpy(s, argv[i], size);
916
new_argv[i] = s;
917
s += size;
918
}
919
new_argv[i] = NULL;
920
921
return new_argv;
922
}
923
924
925
int uv_set_process_title(const char* title) {
926
char* new_title;
927
928
/* If uv_setup_args wasn't called or failed, we can't continue. */
929
if (process_argv == NULL || args_mem == NULL)
930
return UV_ENOBUFS;
931
932
/* We cannot free this pointer when libuv shuts down,
933
* the process may still be using it.
934
*/
935
new_title = uv__strdup(title);
936
if (new_title == NULL)
937
return UV_ENOMEM;
938
939
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
940
uv_mutex_lock(&process_title_mutex);
941
942
/* If this is the first time this is set,
943
* don't free and set argv[1] to NULL.
944
*/
945
if (process_title_ptr != NULL)
946
uv__free(process_title_ptr);
947
948
process_title_ptr = new_title;
949
950
process_argv[0] = process_title_ptr;
951
if (process_argc > 1)
952
process_argv[1] = NULL;
953
954
uv_mutex_unlock(&process_title_mutex);
955
956
return 0;
957
}
958
959
960
int uv_get_process_title(char* buffer, size_t size) {
961
size_t len;
962
if (buffer == NULL || size == 0)
963
return UV_EINVAL;
964
965
/* If uv_setup_args wasn't called, we can't continue. */
966
if (process_argv == NULL)
967
return UV_ENOBUFS;
968
969
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
970
uv_mutex_lock(&process_title_mutex);
971
972
len = strlen(process_argv[0]);
973
if (size <= len) {
974
uv_mutex_unlock(&process_title_mutex);
975
return UV_ENOBUFS;
976
}
977
978
memcpy(buffer, process_argv[0], len);
979
buffer[len] = '\0';
980
981
uv_mutex_unlock(&process_title_mutex);
982
983
return 0;
984
}
985
986
987
void uv__process_title_cleanup(void) {
988
uv__free(args_mem); /* Keep valgrind happy. */
989
args_mem = NULL;
990
}
991
992
993
int uv_resident_set_memory(size_t* rss) {
994
char pp[64];
995
psinfo_t psinfo;
996
int err;
997
int fd;
998
999
snprintf(pp, sizeof(pp), "/proc/%lu/psinfo", (unsigned long) getpid());
1000
1001
fd = open(pp, O_RDONLY);
1002
if (fd == -1)
1003
return UV__ERR(errno);
1004
1005
/* FIXME(bnoordhuis) Handle EINTR. */
1006
err = UV_EINVAL;
1007
if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo)) {
1008
*rss = (size_t)psinfo.pr_rssize * 1024;
1009
err = 0;
1010
}
1011
uv__close(fd);
1012
1013
return err;
1014
}
1015
1016
1017
int uv_uptime(double* uptime) {
1018
struct utmp *utmp_buf;
1019
size_t entries = 0;
1020
time_t boot_time;
1021
1022
boot_time = 0;
1023
utmpname(UTMP_FILE);
1024
1025
setutent();
1026
1027
while ((utmp_buf = getutent()) != NULL) {
1028
if (utmp_buf->ut_user[0] && utmp_buf->ut_type == USER_PROCESS)
1029
++entries;
1030
if (utmp_buf->ut_type == BOOT_TIME)
1031
boot_time = utmp_buf->ut_time;
1032
}
1033
1034
endutent();
1035
1036
if (boot_time == 0)
1037
return UV_ENOSYS;
1038
1039
*uptime = time(NULL) - boot_time;
1040
return 0;
1041
}
1042
1043
1044
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
1045
uv_cpu_info_t* cpu_info;
1046
perfstat_cpu_total_t ps_total;
1047
perfstat_cpu_t* ps_cpus;
1048
perfstat_id_t cpu_id;
1049
int result, ncpus, idx = 0;
1050
1051
result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1);
1052
if (result == -1) {
1053
return UV_ENOSYS;
1054
}
1055
1056
ncpus = result = perfstat_cpu(NULL, NULL, sizeof(perfstat_cpu_t), 0);
1057
if (result == -1) {
1058
return UV_ENOSYS;
1059
}
1060
1061
ps_cpus = (perfstat_cpu_t*) uv__malloc(ncpus * sizeof(perfstat_cpu_t));
1062
if (!ps_cpus) {
1063
return UV_ENOMEM;
1064
}
1065
1066
/* TODO(bnoordhuis) Check uv__strscpy() return value. */
1067
uv__strscpy(cpu_id.name, FIRST_CPU, sizeof(cpu_id.name));
1068
result = perfstat_cpu(&cpu_id, ps_cpus, sizeof(perfstat_cpu_t), ncpus);
1069
if (result == -1) {
1070
uv__free(ps_cpus);
1071
return UV_ENOSYS;
1072
}
1073
1074
*cpu_infos = (uv_cpu_info_t*) uv__malloc(ncpus * sizeof(uv_cpu_info_t));
1075
if (!*cpu_infos) {
1076
uv__free(ps_cpus);
1077
return UV_ENOMEM;
1078
}
1079
1080
*count = ncpus;
1081
1082
cpu_info = *cpu_infos;
1083
while (idx < ncpus) {
1084
cpu_info->speed = (int)(ps_total.processorHZ / 1000000);
1085
cpu_info->model = uv__strdup(ps_total.description);
1086
cpu_info->cpu_times.user = ps_cpus[idx].user;
1087
cpu_info->cpu_times.sys = ps_cpus[idx].sys;
1088
cpu_info->cpu_times.idle = ps_cpus[idx].idle;
1089
cpu_info->cpu_times.irq = ps_cpus[idx].wait;
1090
cpu_info->cpu_times.nice = 0;
1091
cpu_info++;
1092
idx++;
1093
}
1094
1095
uv__free(ps_cpus);
1096
return 0;
1097
}
1098
1099
1100
int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
1101
uv_interface_address_t* address;
1102
int sockfd, sock6fd, inet6, i, r, size = 1;
1103
struct ifconf ifc;
1104
struct ifreq *ifr, *p, flg;
1105
struct in6_ifreq if6;
1106
struct sockaddr_dl* sa_addr;
1107
1108
ifc.ifc_req = NULL;
1109
sock6fd = -1;
1110
r = 0;
1111
*count = 0;
1112
*addresses = NULL;
1113
1114
if (0 > (sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP))) {
1115
r = UV__ERR(errno);
1116
goto cleanup;
1117
}
1118
1119
if (0 > (sock6fd = socket(AF_INET6, SOCK_DGRAM, IPPROTO_IP))) {
1120
r = UV__ERR(errno);
1121
goto cleanup;
1122
}
1123
1124
if (ioctl(sockfd, SIOCGSIZIFCONF, &size) == -1) {
1125
r = UV__ERR(errno);
1126
goto cleanup;
1127
}
1128
1129
ifc.ifc_req = (struct ifreq*)uv__malloc(size);
1130
if (ifc.ifc_req == NULL) {
1131
r = UV_ENOMEM;
1132
goto cleanup;
1133
}
1134
ifc.ifc_len = size;
1135
if (ioctl(sockfd, SIOCGIFCONF, &ifc) == -1) {
1136
r = UV__ERR(errno);
1137
goto cleanup;
1138
}
1139
1140
#define ADDR_SIZE(p) MAX((p).sa_len, sizeof(p))
1141
1142
/* Count all up and running ipv4/ipv6 addresses */
1143
ifr = ifc.ifc_req;
1144
while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
1145
p = ifr;
1146
ifr = (struct ifreq*)
1147
((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
1148
1149
if (!(p->ifr_addr.sa_family == AF_INET6 ||
1150
p->ifr_addr.sa_family == AF_INET))
1151
continue;
1152
1153
memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
1154
if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) {
1155
r = UV__ERR(errno);
1156
goto cleanup;
1157
}
1158
1159
if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
1160
continue;
1161
1162
(*count)++;
1163
}
1164
1165
if (*count == 0)
1166
goto cleanup;
1167
1168
/* Alloc the return interface structs */
1169
*addresses = uv__calloc(*count, sizeof(**addresses));
1170
if (!(*addresses)) {
1171
r = UV_ENOMEM;
1172
goto cleanup;
1173
}
1174
address = *addresses;
1175
1176
ifr = ifc.ifc_req;
1177
while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
1178
p = ifr;
1179
ifr = (struct ifreq*)
1180
((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
1181
1182
if (!(p->ifr_addr.sa_family == AF_INET6 ||
1183
p->ifr_addr.sa_family == AF_INET))
1184
continue;
1185
1186
inet6 = (p->ifr_addr.sa_family == AF_INET6);
1187
1188
memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
1189
if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1)
1190
goto syserror;
1191
1192
if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
1193
continue;
1194
1195
/* All conditions above must match count loop */
1196
1197
address->name = uv__strdup(p->ifr_name);
1198
1199
if (inet6)
1200
address->address.address6 = *((struct sockaddr_in6*) &p->ifr_addr);
1201
else
1202
address->address.address4 = *((struct sockaddr_in*) &p->ifr_addr);
1203
1204
if (inet6) {
1205
memset(&if6, 0, sizeof(if6));
1206
r = uv__strscpy(if6.ifr_name, p->ifr_name, sizeof(if6.ifr_name));
1207
if (r == UV_E2BIG)
1208
goto cleanup;
1209
r = 0;
1210
memcpy(&if6.ifr_Addr, &p->ifr_addr, sizeof(if6.ifr_Addr));
1211
if (ioctl(sock6fd, SIOCGIFNETMASK6, &if6) == -1)
1212
goto syserror;
1213
address->netmask.netmask6 = *((struct sockaddr_in6*) &if6.ifr_Addr);
1214
/* Explicitly set family as the ioctl call appears to return it as 0. */
1215
address->netmask.netmask6.sin6_family = AF_INET6;
1216
} else {
1217
if (ioctl(sockfd, SIOCGIFNETMASK, p) == -1)
1218
goto syserror;
1219
address->netmask.netmask4 = *((struct sockaddr_in*) &p->ifr_addr);
1220
/* Explicitly set family as the ioctl call appears to return it as 0. */
1221
address->netmask.netmask4.sin_family = AF_INET;
1222
}
1223
1224
address->is_internal = flg.ifr_flags & IFF_LOOPBACK ? 1 : 0;
1225
1226
address++;
1227
}
1228
1229
/* Fill in physical addresses. */
1230
ifr = ifc.ifc_req;
1231
while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
1232
p = ifr;
1233
ifr = (struct ifreq*)
1234
((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
1235
1236
if (p->ifr_addr.sa_family != AF_LINK)
1237
continue;
1238
1239
address = *addresses;
1240
for (i = 0; i < *count; i++) {
1241
if (strcmp(address->name, p->ifr_name) == 0) {
1242
sa_addr = (struct sockaddr_dl*) &p->ifr_addr;
1243
memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
1244
}
1245
address++;
1246
}
1247
}
1248
1249
#undef ADDR_SIZE
1250
goto cleanup;
1251
1252
syserror:
1253
uv_free_interface_addresses(*addresses, *count);
1254
*addresses = NULL;
1255
*count = 0;
1256
r = UV_ENOSYS;
1257
1258
cleanup:
1259
if (sockfd != -1)
1260
uv__close(sockfd);
1261
if (sock6fd != -1)
1262
uv__close(sock6fd);
1263
uv__free(ifc.ifc_req);
1264
return r;
1265
}
1266
1267
1268
void uv_free_interface_addresses(uv_interface_address_t* addresses,
1269
int count) {
1270
int i;
1271
1272
for (i = 0; i < count; ++i) {
1273
uv__free(addresses[i].name);
1274
}
1275
1276
uv__free(addresses);
1277
}
1278
1279
1280
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
1281
struct pollfd* events;
1282
uintptr_t i;
1283
uintptr_t nfds;
1284
struct poll_ctl pc;
1285
1286
assert(loop->watchers != NULL);
1287
assert(fd >= 0);
1288
1289
events = (struct pollfd*) loop->watchers[loop->nwatchers];
1290
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
1291
1292
if (events != NULL)
1293
/* Invalidate events with same file descriptor */
1294
for (i = 0; i < nfds; i++)
1295
if ((int) events[i].fd == fd)
1296
events[i].fd = -1;
1297
1298
/* Remove the file descriptor from the poll set */
1299
pc.events = 0;
1300
pc.cmd = PS_DELETE;
1301
pc.fd = fd;
1302
if(loop->backend_fd >= 0)
1303
pollset_ctl(loop->backend_fd, &pc, 1);
1304
}
1305
1306