Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Kitware
GitHub Repository: Kitware/CMake
Path: blob/master/Utilities/cmlibuv/src/unix/linux-core.c
3156 views
1
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2
* Permission is hereby granted, free of charge, to any person obtaining a copy
3
* of this software and associated documentation files (the "Software"), to
4
* deal in the Software without restriction, including without limitation the
5
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6
* sell copies of the Software, and to permit persons to whom the Software is
7
* furnished to do so, subject to the following conditions:
8
*
9
* The above copyright notice and this permission notice shall be included in
10
* all copies or substantial portions of the Software.
11
*
12
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18
* IN THE SOFTWARE.
19
*/
20
21
/* We lean on the fact that POLL{IN,OUT,ERR,HUP} correspond with their
22
* EPOLL* counterparts. We use the POLL* variants in this file because that
23
* is what libuv uses elsewhere.
24
*/
25
26
#include "uv.h"
27
#include "internal.h"
28
29
#include <inttypes.h>
30
#include <stdint.h>
31
#include <stdio.h>
32
#include <stdlib.h>
33
#include <string.h>
34
#include <assert.h>
35
#include <errno.h>
36
37
#include <net/if.h>
38
#include <sys/epoll.h>
39
#include <sys/param.h>
40
#include <sys/prctl.h>
41
#include <sys/sysinfo.h>
42
#include <unistd.h>
43
#include <fcntl.h>
44
#include <time.h>
45
46
#define HAVE_IFADDRS_H 1
47
48
# if defined(__ANDROID_API__) && __ANDROID_API__ < 24
49
# undef HAVE_IFADDRS_H
50
#endif
51
52
#ifdef __UCLIBC__
53
# if __UCLIBC_MAJOR__ < 0 && __UCLIBC_MINOR__ < 9 && __UCLIBC_SUBLEVEL__ < 32
54
# undef HAVE_IFADDRS_H
55
# endif
56
#endif
57
58
#ifdef HAVE_IFADDRS_H
59
# include <ifaddrs.h>
60
# include <sys/socket.h>
61
# include <net/ethernet.h>
62
# include <netpacket/packet.h>
63
#endif /* HAVE_IFADDRS_H */
64
65
/* Available from 2.6.32 onwards. */
66
#ifndef CLOCK_MONOTONIC_COARSE
67
# define CLOCK_MONOTONIC_COARSE 6
68
#endif
69
70
/* This is rather annoying: CLOCK_BOOTTIME lives in <linux/time.h> but we can't
71
* include that file because it conflicts with <time.h>. We'll just have to
72
* define it ourselves.
73
*/
74
#ifndef CLOCK_BOOTTIME
75
# define CLOCK_BOOTTIME 7
76
#endif
77
78
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci);
79
static int read_times(FILE* statfile_fp,
80
unsigned int numcpus,
81
uv_cpu_info_t* ci);
82
static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci);
83
static uint64_t read_cpufreq(unsigned int cpunum);
84
85
int uv__platform_loop_init(uv_loop_t* loop) {
86
87
loop->inotify_fd = -1;
88
loop->inotify_watchers = NULL;
89
90
return uv__epoll_init(loop);
91
}
92
93
94
int uv__io_fork(uv_loop_t* loop) {
95
int err;
96
void* old_watchers;
97
98
old_watchers = loop->inotify_watchers;
99
100
uv__close(loop->backend_fd);
101
loop->backend_fd = -1;
102
uv__platform_loop_delete(loop);
103
104
err = uv__platform_loop_init(loop);
105
if (err)
106
return err;
107
108
return uv__inotify_fork(loop, old_watchers);
109
}
110
111
112
void uv__platform_loop_delete(uv_loop_t* loop) {
113
if (loop->inotify_fd == -1) return;
114
uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN);
115
uv__close(loop->inotify_fd);
116
loop->inotify_fd = -1;
117
}
118
119
120
121
uint64_t uv__hrtime(uv_clocktype_t type) {
122
static clock_t fast_clock_id = -1;
123
struct timespec t;
124
clock_t clock_id;
125
126
/* Prefer CLOCK_MONOTONIC_COARSE if available but only when it has
127
* millisecond granularity or better. CLOCK_MONOTONIC_COARSE is
128
* serviced entirely from the vDSO, whereas CLOCK_MONOTONIC may
129
* decide to make a costly system call.
130
*/
131
/* TODO(bnoordhuis) Use CLOCK_MONOTONIC_COARSE for UV_CLOCK_PRECISE
132
* when it has microsecond granularity or better (unlikely).
133
*/
134
clock_id = CLOCK_MONOTONIC;
135
if (type != UV_CLOCK_FAST)
136
goto done;
137
138
clock_id = uv__load_relaxed(&fast_clock_id);
139
if (clock_id != -1)
140
goto done;
141
142
clock_id = CLOCK_MONOTONIC;
143
if (0 == clock_getres(CLOCK_MONOTONIC_COARSE, &t))
144
if (t.tv_nsec <= 1 * 1000 * 1000)
145
clock_id = CLOCK_MONOTONIC_COARSE;
146
147
uv__store_relaxed(&fast_clock_id, clock_id);
148
149
done:
150
151
if (clock_gettime(clock_id, &t))
152
return 0; /* Not really possible. */
153
154
return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec;
155
}
156
157
158
int uv_resident_set_memory(size_t* rss) {
159
char buf[1024];
160
const char* s;
161
ssize_t n;
162
long val;
163
int fd;
164
int i;
165
166
do
167
fd = open("/proc/self/stat", O_RDONLY);
168
while (fd == -1 && errno == EINTR);
169
170
if (fd == -1)
171
return UV__ERR(errno);
172
173
do
174
n = read(fd, buf, sizeof(buf) - 1);
175
while (n == -1 && errno == EINTR);
176
177
uv__close(fd);
178
if (n == -1)
179
return UV__ERR(errno);
180
buf[n] = '\0';
181
182
s = strchr(buf, ' ');
183
if (s == NULL)
184
goto err;
185
186
s += 1;
187
if (*s != '(')
188
goto err;
189
190
s = strchr(s, ')');
191
if (s == NULL)
192
goto err;
193
194
for (i = 1; i <= 22; i++) {
195
s = strchr(s + 1, ' ');
196
if (s == NULL)
197
goto err;
198
}
199
200
errno = 0;
201
val = strtol(s, NULL, 10);
202
if (errno != 0)
203
goto err;
204
if (val < 0)
205
goto err;
206
207
*rss = val * getpagesize();
208
return 0;
209
210
err:
211
return UV_EINVAL;
212
}
213
214
int uv_uptime(double* uptime) {
215
static volatile int no_clock_boottime;
216
char buf[128];
217
struct timespec now;
218
int r;
219
220
/* Try /proc/uptime first, then fallback to clock_gettime(). */
221
222
if (0 == uv__slurp("/proc/uptime", buf, sizeof(buf)))
223
if (1 == sscanf(buf, "%lf", uptime))
224
return 0;
225
226
/* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available
227
* (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system
228
* is suspended.
229
*/
230
if (no_clock_boottime) {
231
retry_clock_gettime: r = clock_gettime(CLOCK_MONOTONIC, &now);
232
}
233
else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) {
234
no_clock_boottime = 1;
235
goto retry_clock_gettime;
236
}
237
238
if (r)
239
return UV__ERR(errno);
240
241
*uptime = now.tv_sec;
242
return 0;
243
}
244
245
246
static int uv__cpu_num(FILE* statfile_fp, unsigned int* numcpus) {
247
unsigned int num;
248
char buf[1024];
249
250
if (!fgets(buf, sizeof(buf), statfile_fp))
251
return UV_EIO;
252
253
num = 0;
254
while (fgets(buf, sizeof(buf), statfile_fp)) {
255
if (strncmp(buf, "cpu", 3))
256
break;
257
num++;
258
}
259
260
if (num == 0)
261
return UV_EIO;
262
263
*numcpus = num;
264
return 0;
265
}
266
267
268
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
269
unsigned int numcpus;
270
uv_cpu_info_t* ci;
271
int err;
272
FILE* statfile_fp;
273
274
*cpu_infos = NULL;
275
*count = 0;
276
277
statfile_fp = uv__open_file("/proc/stat");
278
if (statfile_fp == NULL)
279
return UV__ERR(errno);
280
281
err = uv__cpu_num(statfile_fp, &numcpus);
282
if (err < 0)
283
goto out;
284
285
err = UV_ENOMEM;
286
ci = uv__calloc(numcpus, sizeof(*ci));
287
if (ci == NULL)
288
goto out;
289
290
err = read_models(numcpus, ci);
291
if (err == 0)
292
err = read_times(statfile_fp, numcpus, ci);
293
294
if (err) {
295
uv_free_cpu_info(ci, numcpus);
296
goto out;
297
}
298
299
/* read_models() on x86 also reads the CPU speed from /proc/cpuinfo.
300
* We don't check for errors here. Worst case, the field is left zero.
301
*/
302
if (ci[0].speed == 0)
303
read_speeds(numcpus, ci);
304
305
*cpu_infos = ci;
306
*count = numcpus;
307
err = 0;
308
309
out:
310
311
if (fclose(statfile_fp))
312
if (errno != EINTR && errno != EINPROGRESS)
313
abort();
314
315
return err;
316
}
317
318
319
static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) {
320
unsigned int num;
321
322
for (num = 0; num < numcpus; num++)
323
ci[num].speed = read_cpufreq(num) / 1000;
324
}
325
326
327
/* Also reads the CPU frequency on ppc and x86. The other architectures only
328
* have a BogoMIPS field, which may not be very accurate.
329
*
330
* Note: Simply returns on error, uv_cpu_info() takes care of the cleanup.
331
*/
332
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
333
#if defined(__PPC__)
334
static const char model_marker[] = "cpu\t\t: ";
335
static const char speed_marker[] = "clock\t\t: ";
336
#else
337
static const char model_marker[] = "model name\t: ";
338
static const char speed_marker[] = "cpu MHz\t\t: ";
339
#endif
340
const char* inferred_model;
341
unsigned int model_idx;
342
unsigned int speed_idx;
343
unsigned int part_idx;
344
char buf[1024];
345
char* model;
346
FILE* fp;
347
int model_id;
348
349
/* Most are unused on non-ARM, non-MIPS and non-x86 architectures. */
350
(void) &model_marker;
351
(void) &speed_marker;
352
(void) &speed_idx;
353
(void) &part_idx;
354
(void) &model;
355
(void) &buf;
356
(void) &fp;
357
(void) &model_id;
358
359
model_idx = 0;
360
speed_idx = 0;
361
part_idx = 0;
362
363
#if defined(__arm__) || \
364
defined(__i386__) || \
365
defined(__mips__) || \
366
defined(__aarch64__) || \
367
defined(__PPC__) || \
368
defined(__x86_64__)
369
fp = uv__open_file("/proc/cpuinfo");
370
if (fp == NULL)
371
return UV__ERR(errno);
372
373
while (fgets(buf, sizeof(buf), fp)) {
374
if (model_idx < numcpus) {
375
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
376
model = buf + sizeof(model_marker) - 1;
377
model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
378
if (model == NULL) {
379
fclose(fp);
380
return UV_ENOMEM;
381
}
382
ci[model_idx++].model = model;
383
continue;
384
}
385
}
386
#if defined(__arm__) || defined(__mips__) || defined(__aarch64__)
387
if (model_idx < numcpus) {
388
#if defined(__arm__)
389
/* Fallback for pre-3.8 kernels. */
390
static const char model_marker[] = "Processor\t: ";
391
#elif defined(__aarch64__)
392
static const char part_marker[] = "CPU part\t: ";
393
394
/* Adapted from: https://github.com/karelzak/util-linux */
395
struct vendor_part {
396
const int id;
397
const char* name;
398
};
399
400
static const struct vendor_part arm_chips[] = {
401
{ 0x811, "ARM810" },
402
{ 0x920, "ARM920" },
403
{ 0x922, "ARM922" },
404
{ 0x926, "ARM926" },
405
{ 0x940, "ARM940" },
406
{ 0x946, "ARM946" },
407
{ 0x966, "ARM966" },
408
{ 0xa20, "ARM1020" },
409
{ 0xa22, "ARM1022" },
410
{ 0xa26, "ARM1026" },
411
{ 0xb02, "ARM11 MPCore" },
412
{ 0xb36, "ARM1136" },
413
{ 0xb56, "ARM1156" },
414
{ 0xb76, "ARM1176" },
415
{ 0xc05, "Cortex-A5" },
416
{ 0xc07, "Cortex-A7" },
417
{ 0xc08, "Cortex-A8" },
418
{ 0xc09, "Cortex-A9" },
419
{ 0xc0d, "Cortex-A17" }, /* Originally A12 */
420
{ 0xc0f, "Cortex-A15" },
421
{ 0xc0e, "Cortex-A17" },
422
{ 0xc14, "Cortex-R4" },
423
{ 0xc15, "Cortex-R5" },
424
{ 0xc17, "Cortex-R7" },
425
{ 0xc18, "Cortex-R8" },
426
{ 0xc20, "Cortex-M0" },
427
{ 0xc21, "Cortex-M1" },
428
{ 0xc23, "Cortex-M3" },
429
{ 0xc24, "Cortex-M4" },
430
{ 0xc27, "Cortex-M7" },
431
{ 0xc60, "Cortex-M0+" },
432
{ 0xd01, "Cortex-A32" },
433
{ 0xd03, "Cortex-A53" },
434
{ 0xd04, "Cortex-A35" },
435
{ 0xd05, "Cortex-A55" },
436
{ 0xd06, "Cortex-A65" },
437
{ 0xd07, "Cortex-A57" },
438
{ 0xd08, "Cortex-A72" },
439
{ 0xd09, "Cortex-A73" },
440
{ 0xd0a, "Cortex-A75" },
441
{ 0xd0b, "Cortex-A76" },
442
{ 0xd0c, "Neoverse-N1" },
443
{ 0xd0d, "Cortex-A77" },
444
{ 0xd0e, "Cortex-A76AE" },
445
{ 0xd13, "Cortex-R52" },
446
{ 0xd20, "Cortex-M23" },
447
{ 0xd21, "Cortex-M33" },
448
{ 0xd41, "Cortex-A78" },
449
{ 0xd42, "Cortex-A78AE" },
450
{ 0xd4a, "Neoverse-E1" },
451
{ 0xd4b, "Cortex-A78C" },
452
};
453
454
if (strncmp(buf, part_marker, sizeof(part_marker) - 1) == 0) {
455
model = buf + sizeof(part_marker) - 1;
456
457
errno = 0;
458
model_id = strtol(model, NULL, 16);
459
if ((errno != 0) || model_id < 0) {
460
fclose(fp);
461
return UV_EINVAL;
462
}
463
464
for (part_idx = 0; part_idx < ARRAY_SIZE(arm_chips); part_idx++) {
465
if (model_id == arm_chips[part_idx].id) {
466
model = uv__strdup(arm_chips[part_idx].name);
467
if (model == NULL) {
468
fclose(fp);
469
return UV_ENOMEM;
470
}
471
ci[model_idx++].model = model;
472
break;
473
}
474
}
475
}
476
#else /* defined(__mips__) */
477
static const char model_marker[] = "cpu model\t\t: ";
478
#endif
479
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
480
model = buf + sizeof(model_marker) - 1;
481
model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
482
if (model == NULL) {
483
fclose(fp);
484
return UV_ENOMEM;
485
}
486
ci[model_idx++].model = model;
487
continue;
488
}
489
}
490
#else /* !__arm__ && !__mips__ && !__aarch64__ */
491
if (speed_idx < numcpus) {
492
if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) {
493
ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
494
continue;
495
}
496
}
497
#endif /* __arm__ || __mips__ || __aarch64__ */
498
}
499
500
fclose(fp);
501
#endif /* __arm__ || __i386__ || __mips__ || __PPC__ || __x86_64__ || __aarch__ */
502
503
/* Now we want to make sure that all the models contain *something* because
504
* it's not safe to leave them as null. Copy the last entry unless there
505
* isn't one, in that case we simply put "unknown" into everything.
506
*/
507
inferred_model = "unknown";
508
if (model_idx > 0)
509
inferred_model = ci[model_idx - 1].model;
510
511
while (model_idx < numcpus) {
512
model = uv__strndup(inferred_model, strlen(inferred_model));
513
if (model == NULL)
514
return UV_ENOMEM;
515
ci[model_idx++].model = model;
516
}
517
518
return 0;
519
}
520
521
522
static int read_times(FILE* statfile_fp,
523
unsigned int numcpus,
524
uv_cpu_info_t* ci) {
525
struct uv_cpu_times_s ts;
526
unsigned int ticks;
527
unsigned int multiplier;
528
uint64_t user;
529
uint64_t nice;
530
uint64_t sys;
531
uint64_t idle;
532
uint64_t dummy;
533
uint64_t irq;
534
uint64_t num;
535
uint64_t len;
536
char buf[1024];
537
538
ticks = (unsigned int)sysconf(_SC_CLK_TCK);
539
assert(ticks != (unsigned int) -1);
540
assert(ticks != 0);
541
multiplier = ((uint64_t)1000L / ticks);
542
543
rewind(statfile_fp);
544
545
if (!fgets(buf, sizeof(buf), statfile_fp))
546
abort();
547
548
num = 0;
549
550
while (fgets(buf, sizeof(buf), statfile_fp)) {
551
if (num >= numcpus)
552
break;
553
554
if (strncmp(buf, "cpu", 3))
555
break;
556
557
/* skip "cpu<num> " marker */
558
{
559
unsigned int n;
560
int r = sscanf(buf, "cpu%u ", &n);
561
assert(r == 1);
562
(void) r; /* silence build warning */
563
for (len = sizeof("cpu0"); n /= 10; len++);
564
}
565
566
/* Line contains user, nice, system, idle, iowait, irq, softirq, steal,
567
* guest, guest_nice but we're only interested in the first four + irq.
568
*
569
* Don't use %*s to skip fields or %ll to read straight into the uint64_t
570
* fields, they're not allowed in C89 mode.
571
*/
572
if (6 != sscanf(buf + len,
573
"%" PRIu64 " %" PRIu64 " %" PRIu64
574
"%" PRIu64 " %" PRIu64 " %" PRIu64,
575
&user,
576
&nice,
577
&sys,
578
&idle,
579
&dummy,
580
&irq))
581
abort();
582
583
ts.user = user * multiplier;
584
ts.nice = nice * multiplier;
585
ts.sys = sys * multiplier;
586
ts.idle = idle * multiplier;
587
ts.irq = irq * multiplier;
588
ci[num++].cpu_times = ts;
589
}
590
assert(num == numcpus);
591
592
return 0;
593
}
594
595
596
static uint64_t read_cpufreq(unsigned int cpunum) {
597
uint64_t val;
598
char buf[1024];
599
FILE* fp;
600
601
snprintf(buf,
602
sizeof(buf),
603
"/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq",
604
cpunum);
605
606
fp = uv__open_file(buf);
607
if (fp == NULL)
608
return 0;
609
610
if (fscanf(fp, "%" PRIu64, &val) != 1)
611
val = 0;
612
613
fclose(fp);
614
615
return val;
616
}
617
618
619
#ifdef HAVE_IFADDRS_H
620
static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
621
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
622
return 1;
623
if (ent->ifa_addr == NULL)
624
return 1;
625
/*
626
* On Linux getifaddrs returns information related to the raw underlying
627
* devices. We're not interested in this information yet.
628
*/
629
if (ent->ifa_addr->sa_family == PF_PACKET)
630
return exclude_type;
631
return !exclude_type;
632
}
633
#endif
634
635
int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
636
#ifndef HAVE_IFADDRS_H
637
*count = 0;
638
*addresses = NULL;
639
return UV_ENOSYS;
640
#else
641
struct ifaddrs *addrs, *ent;
642
uv_interface_address_t* address;
643
int i;
644
struct sockaddr_ll *sll;
645
646
*count = 0;
647
*addresses = NULL;
648
649
if (getifaddrs(&addrs))
650
return UV__ERR(errno);
651
652
/* Count the number of interfaces */
653
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
654
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
655
continue;
656
657
(*count)++;
658
}
659
660
if (*count == 0) {
661
freeifaddrs(addrs);
662
return 0;
663
}
664
665
/* Make sure the memory is initiallized to zero using calloc() */
666
*addresses = uv__calloc(*count, sizeof(**addresses));
667
if (!(*addresses)) {
668
freeifaddrs(addrs);
669
return UV_ENOMEM;
670
}
671
672
address = *addresses;
673
674
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
675
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
676
continue;
677
678
address->name = uv__strdup(ent->ifa_name);
679
680
if (ent->ifa_addr->sa_family == AF_INET6) {
681
address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
682
} else {
683
address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
684
}
685
686
if (ent->ifa_netmask->sa_family == AF_INET6) {
687
address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
688
} else {
689
address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
690
}
691
692
address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
693
694
address++;
695
}
696
697
/* Fill in physical addresses for each interface */
698
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
699
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFPHYS))
700
continue;
701
702
address = *addresses;
703
704
for (i = 0; i < (*count); i++) {
705
size_t namelen = strlen(ent->ifa_name);
706
/* Alias interface share the same physical address */
707
if (strncmp(address->name, ent->ifa_name, namelen) == 0 &&
708
(address->name[namelen] == 0 || address->name[namelen] == ':')) {
709
sll = (struct sockaddr_ll*)ent->ifa_addr;
710
memcpy(address->phys_addr, sll->sll_addr, sizeof(address->phys_addr));
711
}
712
address++;
713
}
714
}
715
716
freeifaddrs(addrs);
717
718
return 0;
719
#endif
720
}
721
722
723
void uv_free_interface_addresses(uv_interface_address_t* addresses,
724
int count) {
725
int i;
726
727
for (i = 0; i < count; i++) {
728
uv__free(addresses[i].name);
729
}
730
731
uv__free(addresses);
732
}
733
734
735
void uv__set_process_title(const char* title) {
736
#if defined(PR_SET_NAME)
737
prctl(PR_SET_NAME, title); /* Only copies first 16 characters. */
738
#endif
739
}
740
741
742
static uint64_t uv__read_proc_meminfo(const char* what) {
743
uint64_t rc;
744
char* p;
745
char buf[4096]; /* Large enough to hold all of /proc/meminfo. */
746
747
if (uv__slurp("/proc/meminfo", buf, sizeof(buf)))
748
return 0;
749
750
p = strstr(buf, what);
751
752
if (p == NULL)
753
return 0;
754
755
p += strlen(what);
756
757
rc = 0;
758
sscanf(p, "%" PRIu64 " kB", &rc);
759
760
return rc * 1024;
761
}
762
763
764
uint64_t uv_get_free_memory(void) {
765
struct sysinfo info;
766
uint64_t rc;
767
768
rc = uv__read_proc_meminfo("MemAvailable:");
769
770
if (rc != 0)
771
return rc;
772
773
if (0 == sysinfo(&info))
774
return (uint64_t) info.freeram * info.mem_unit;
775
776
return 0;
777
}
778
779
780
uint64_t uv_get_total_memory(void) {
781
struct sysinfo info;
782
uint64_t rc;
783
784
rc = uv__read_proc_meminfo("MemTotal:");
785
786
if (rc != 0)
787
return rc;
788
789
if (0 == sysinfo(&info))
790
return (uint64_t) info.totalram * info.mem_unit;
791
792
return 0;
793
}
794
795
796
static uint64_t uv__read_cgroups_uint64(const char* cgroup, const char* param) {
797
char filename[256];
798
char buf[32]; /* Large enough to hold an encoded uint64_t. */
799
uint64_t rc;
800
801
rc = 0;
802
snprintf(filename, sizeof(filename), "/sys/fs/cgroup/%s/%s", cgroup, param);
803
if (0 == uv__slurp(filename, buf, sizeof(buf)))
804
sscanf(buf, "%" PRIu64, &rc);
805
806
return rc;
807
}
808
809
810
uint64_t uv_get_constrained_memory(void) {
811
/*
812
* This might return 0 if there was a problem getting the memory limit from
813
* cgroups. This is OK because a return value of 0 signifies that the memory
814
* limit is unknown.
815
*/
816
return uv__read_cgroups_uint64("memory", "memory.limit_in_bytes");
817
}
818
819
820
void uv_loadavg(double avg[3]) {
821
struct sysinfo info;
822
char buf[128]; /* Large enough to hold all of /proc/loadavg. */
823
824
if (0 == uv__slurp("/proc/loadavg", buf, sizeof(buf)))
825
if (3 == sscanf(buf, "%lf %lf %lf", &avg[0], &avg[1], &avg[2]))
826
return;
827
828
if (sysinfo(&info) < 0)
829
return;
830
831
avg[0] = (double) info.loads[0] / 65536.0;
832
avg[1] = (double) info.loads[1] / 65536.0;
833
avg[2] = (double) info.loads[2] / 65536.0;
834
}
835
836