Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Kitware
GitHub Repository: Kitware/CMake
Path: blob/master/Utilities/cmlibuv/src/uv-common.c
3153 views
1
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2
*
3
* Permission is hereby granted, free of charge, to any person obtaining a copy
4
* of this software and associated documentation files (the "Software"), to
5
* deal in the Software without restriction, including without limitation the
6
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7
* sell copies of the Software, and to permit persons to whom the Software is
8
* furnished to do so, subject to the following conditions:
9
*
10
* The above copyright notice and this permission notice shall be included in
11
* all copies or substantial portions of the Software.
12
*
13
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19
* IN THE SOFTWARE.
20
*/
21
22
#include "uv.h"
23
#include "uv-common.h"
24
25
#include <assert.h>
26
#include <errno.h>
27
#include <stdarg.h>
28
#include <stddef.h> /* NULL */
29
#include <stdio.h>
30
#include <stdlib.h> /* malloc */
31
#include <string.h> /* memset */
32
33
#if defined(_WIN32)
34
# include <malloc.h> /* malloc */
35
#else
36
# include <net/if.h> /* if_nametoindex */
37
# include <sys/un.h> /* AF_UNIX, sockaddr_un */
38
#endif
39
40
41
typedef struct {
42
uv_malloc_func local_malloc;
43
uv_realloc_func local_realloc;
44
uv_calloc_func local_calloc;
45
uv_free_func local_free;
46
} uv__allocator_t;
47
48
static uv__allocator_t uv__allocator = {
49
malloc,
50
realloc,
51
calloc,
52
free,
53
};
54
55
char* uv__strdup(const char* s) {
56
size_t len = strlen(s) + 1;
57
char* m = uv__malloc(len);
58
if (m == NULL)
59
return NULL;
60
return memcpy(m, s, len);
61
}
62
63
char* uv__strndup(const char* s, size_t n) {
64
char* m;
65
size_t len = strlen(s);
66
if (n < len)
67
len = n;
68
m = uv__malloc(len + 1);
69
if (m == NULL)
70
return NULL;
71
m[len] = '\0';
72
return memcpy(m, s, len);
73
}
74
75
void* uv__malloc(size_t size) {
76
if (size > 0)
77
return uv__allocator.local_malloc(size);
78
return NULL;
79
}
80
81
void uv__free(void* ptr) {
82
int saved_errno;
83
84
/* Libuv expects that free() does not clobber errno. The system allocator
85
* honors that assumption but custom allocators may not be so careful.
86
*/
87
saved_errno = errno;
88
uv__allocator.local_free(ptr);
89
errno = saved_errno;
90
}
91
92
void* uv__calloc(size_t count, size_t size) {
93
return uv__allocator.local_calloc(count, size);
94
}
95
96
void* uv__realloc(void* ptr, size_t size) {
97
if (size > 0)
98
return uv__allocator.local_realloc(ptr, size);
99
uv__free(ptr);
100
return NULL;
101
}
102
103
void* uv__reallocf(void* ptr, size_t size) {
104
void* newptr;
105
106
newptr = uv__realloc(ptr, size);
107
if (newptr == NULL)
108
if (size > 0)
109
uv__free(ptr);
110
111
return newptr;
112
}
113
114
int uv_replace_allocator(uv_malloc_func malloc_func,
115
uv_realloc_func realloc_func,
116
uv_calloc_func calloc_func,
117
uv_free_func free_func) {
118
if (malloc_func == NULL || realloc_func == NULL ||
119
calloc_func == NULL || free_func == NULL) {
120
return UV_EINVAL;
121
}
122
123
uv__allocator.local_malloc = malloc_func;
124
uv__allocator.local_realloc = realloc_func;
125
uv__allocator.local_calloc = calloc_func;
126
uv__allocator.local_free = free_func;
127
128
return 0;
129
}
130
131
#define XX(uc, lc) case UV_##uc: return sizeof(uv_##lc##_t);
132
133
size_t uv_handle_size(uv_handle_type type) {
134
switch (type) {
135
UV_HANDLE_TYPE_MAP(XX)
136
default:
137
return -1;
138
}
139
}
140
141
size_t uv_req_size(uv_req_type type) {
142
switch(type) {
143
UV_REQ_TYPE_MAP(XX)
144
default:
145
return -1;
146
}
147
}
148
149
#undef XX
150
151
152
size_t uv_loop_size(void) {
153
return sizeof(uv_loop_t);
154
}
155
156
157
uv_buf_t uv_buf_init(char* base, unsigned int len) {
158
uv_buf_t buf;
159
buf.base = base;
160
buf.len = len;
161
return buf;
162
}
163
164
165
static const char* uv__unknown_err_code(int err) {
166
char buf[32];
167
char* copy;
168
169
snprintf(buf, sizeof(buf), "Unknown system error %d", err);
170
copy = uv__strdup(buf);
171
172
return copy != NULL ? copy : "Unknown system error";
173
}
174
175
#define UV_ERR_NAME_GEN_R(name, _) \
176
case UV_## name: \
177
uv__strscpy(buf, #name, buflen); break;
178
char* uv_err_name_r(int err, char* buf, size_t buflen) {
179
switch (err) {
180
UV_ERRNO_MAP(UV_ERR_NAME_GEN_R)
181
default: snprintf(buf, buflen, "Unknown system error %d", err);
182
}
183
return buf;
184
}
185
#undef UV_ERR_NAME_GEN_R
186
187
188
#define UV_ERR_NAME_GEN(name, _) case UV_ ## name: return #name;
189
const char* uv_err_name(int err) {
190
switch (err) {
191
UV_ERRNO_MAP(UV_ERR_NAME_GEN)
192
}
193
return uv__unknown_err_code(err);
194
}
195
#undef UV_ERR_NAME_GEN
196
197
198
#define UV_STRERROR_GEN_R(name, msg) \
199
case UV_ ## name: \
200
snprintf(buf, buflen, "%s", msg); break;
201
char* uv_strerror_r(int err, char* buf, size_t buflen) {
202
switch (err) {
203
UV_ERRNO_MAP(UV_STRERROR_GEN_R)
204
default: snprintf(buf, buflen, "Unknown system error %d", err);
205
}
206
return buf;
207
}
208
#undef UV_STRERROR_GEN_R
209
210
211
#define UV_STRERROR_GEN(name, msg) case UV_ ## name: return msg;
212
const char* uv_strerror(int err) {
213
switch (err) {
214
UV_ERRNO_MAP(UV_STRERROR_GEN)
215
}
216
return uv__unknown_err_code(err);
217
}
218
#undef UV_STRERROR_GEN
219
220
#if !defined(CMAKE_BOOTSTRAP) || defined(_WIN32)
221
222
int uv_ip4_addr(const char* ip, int port, struct sockaddr_in* addr) {
223
memset(addr, 0, sizeof(*addr));
224
addr->sin_family = AF_INET;
225
addr->sin_port = htons(port);
226
#ifdef SIN6_LEN
227
addr->sin_len = sizeof(*addr);
228
#endif
229
return uv_inet_pton(AF_INET, ip, &(addr->sin_addr.s_addr));
230
}
231
232
233
int uv_ip6_addr(const char* ip, int port, struct sockaddr_in6* addr) {
234
char address_part[40];
235
size_t address_part_size;
236
const char* zone_index;
237
238
memset(addr, 0, sizeof(*addr));
239
addr->sin6_family = AF_INET6;
240
addr->sin6_port = htons(port);
241
#ifdef SIN6_LEN
242
addr->sin6_len = sizeof(*addr);
243
#endif
244
245
zone_index = strchr(ip, '%');
246
if (zone_index != NULL) {
247
address_part_size = zone_index - ip;
248
if (address_part_size >= sizeof(address_part))
249
address_part_size = sizeof(address_part) - 1;
250
251
memcpy(address_part, ip, address_part_size);
252
address_part[address_part_size] = '\0';
253
ip = address_part;
254
255
zone_index++; /* skip '%' */
256
/* NOTE: unknown interface (id=0) is silently ignored */
257
#ifdef _WIN32
258
addr->sin6_scope_id = atoi(zone_index);
259
#else
260
addr->sin6_scope_id = if_nametoindex(zone_index);
261
#endif
262
}
263
264
return uv_inet_pton(AF_INET6, ip, &addr->sin6_addr);
265
}
266
267
268
int uv_ip4_name(const struct sockaddr_in* src, char* dst, size_t size) {
269
return uv_inet_ntop(AF_INET, &src->sin_addr, dst, size);
270
}
271
272
273
int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size) {
274
return uv_inet_ntop(AF_INET6, &src->sin6_addr, dst, size);
275
}
276
277
278
int uv_ip_name(const struct sockaddr *src, char *dst, size_t size) {
279
switch (src->sa_family) {
280
case AF_INET:
281
return uv_inet_ntop(AF_INET, &((struct sockaddr_in *)src)->sin_addr,
282
dst, size);
283
case AF_INET6:
284
return uv_inet_ntop(AF_INET6, &((struct sockaddr_in6 *)src)->sin6_addr,
285
dst, size);
286
default:
287
return UV_EAFNOSUPPORT;
288
}
289
}
290
291
292
int uv_tcp_bind(uv_tcp_t* handle,
293
const struct sockaddr* addr,
294
unsigned int flags) {
295
unsigned int addrlen;
296
297
if (handle->type != UV_TCP)
298
return UV_EINVAL;
299
if (uv__is_closing(handle)) {
300
return UV_EINVAL;
301
}
302
if (addr->sa_family == AF_INET)
303
addrlen = sizeof(struct sockaddr_in);
304
else if (addr->sa_family == AF_INET6)
305
addrlen = sizeof(struct sockaddr_in6);
306
else
307
return UV_EINVAL;
308
309
return uv__tcp_bind(handle, addr, addrlen, flags);
310
}
311
312
313
int uv_udp_init_ex(uv_loop_t* loop, uv_udp_t* handle, unsigned flags) {
314
unsigned extra_flags;
315
int domain;
316
int rc;
317
318
/* Use the lower 8 bits for the domain. */
319
domain = flags & 0xFF;
320
if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC)
321
return UV_EINVAL;
322
323
/* Use the higher bits for extra flags. */
324
extra_flags = flags & ~0xFF;
325
if (extra_flags & ~UV_UDP_RECVMMSG)
326
return UV_EINVAL;
327
328
rc = uv__udp_init_ex(loop, handle, flags, domain);
329
330
if (rc == 0)
331
if (extra_flags & UV_UDP_RECVMMSG)
332
handle->flags |= UV_HANDLE_UDP_RECVMMSG;
333
334
return rc;
335
}
336
337
338
int uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) {
339
return uv_udp_init_ex(loop, handle, AF_UNSPEC);
340
}
341
342
343
int uv_udp_bind(uv_udp_t* handle,
344
const struct sockaddr* addr,
345
unsigned int flags) {
346
unsigned int addrlen;
347
348
if (handle->type != UV_UDP)
349
return UV_EINVAL;
350
351
if (addr->sa_family == AF_INET)
352
addrlen = sizeof(struct sockaddr_in);
353
else if (addr->sa_family == AF_INET6)
354
addrlen = sizeof(struct sockaddr_in6);
355
else
356
return UV_EINVAL;
357
358
return uv__udp_bind(handle, addr, addrlen, flags);
359
}
360
361
362
int uv_tcp_connect(uv_connect_t* req,
363
uv_tcp_t* handle,
364
const struct sockaddr* addr,
365
uv_connect_cb cb) {
366
unsigned int addrlen;
367
368
if (handle->type != UV_TCP)
369
return UV_EINVAL;
370
371
if (addr->sa_family == AF_INET)
372
addrlen = sizeof(struct sockaddr_in);
373
else if (addr->sa_family == AF_INET6)
374
addrlen = sizeof(struct sockaddr_in6);
375
else
376
return UV_EINVAL;
377
378
return uv__tcp_connect(req, handle, addr, addrlen, cb);
379
}
380
381
382
int uv_udp_connect(uv_udp_t* handle, const struct sockaddr* addr) {
383
unsigned int addrlen;
384
385
if (handle->type != UV_UDP)
386
return UV_EINVAL;
387
388
/* Disconnect the handle */
389
if (addr == NULL) {
390
if (!(handle->flags & UV_HANDLE_UDP_CONNECTED))
391
return UV_ENOTCONN;
392
393
return uv__udp_disconnect(handle);
394
}
395
396
if (addr->sa_family == AF_INET)
397
addrlen = sizeof(struct sockaddr_in);
398
else if (addr->sa_family == AF_INET6)
399
addrlen = sizeof(struct sockaddr_in6);
400
else
401
return UV_EINVAL;
402
403
if (handle->flags & UV_HANDLE_UDP_CONNECTED)
404
return UV_EISCONN;
405
406
return uv__udp_connect(handle, addr, addrlen);
407
}
408
409
410
int uv__udp_is_connected(uv_udp_t* handle) {
411
struct sockaddr_storage addr;
412
int addrlen;
413
if (handle->type != UV_UDP)
414
return 0;
415
416
addrlen = sizeof(addr);
417
if (uv_udp_getpeername(handle, (struct sockaddr*) &addr, &addrlen) != 0)
418
return 0;
419
420
return addrlen > 0;
421
}
422
423
424
int uv__udp_check_before_send(uv_udp_t* handle, const struct sockaddr* addr) {
425
unsigned int addrlen;
426
427
if (handle->type != UV_UDP)
428
return UV_EINVAL;
429
430
if (addr != NULL && (handle->flags & UV_HANDLE_UDP_CONNECTED))
431
return UV_EISCONN;
432
433
if (addr == NULL && !(handle->flags & UV_HANDLE_UDP_CONNECTED))
434
return UV_EDESTADDRREQ;
435
436
if (addr != NULL) {
437
if (addr->sa_family == AF_INET)
438
addrlen = sizeof(struct sockaddr_in);
439
else if (addr->sa_family == AF_INET6)
440
addrlen = sizeof(struct sockaddr_in6);
441
#if defined(AF_UNIX) && !defined(_WIN32)
442
else if (addr->sa_family == AF_UNIX)
443
addrlen = sizeof(struct sockaddr_un);
444
#endif
445
else
446
return UV_EINVAL;
447
} else {
448
addrlen = 0;
449
}
450
451
return addrlen;
452
}
453
454
455
int uv_udp_send(uv_udp_send_t* req,
456
uv_udp_t* handle,
457
const uv_buf_t bufs[],
458
unsigned int nbufs,
459
const struct sockaddr* addr,
460
uv_udp_send_cb send_cb) {
461
int addrlen;
462
463
addrlen = uv__udp_check_before_send(handle, addr);
464
if (addrlen < 0)
465
return addrlen;
466
467
return uv__udp_send(req, handle, bufs, nbufs, addr, addrlen, send_cb);
468
}
469
470
471
int uv_udp_try_send(uv_udp_t* handle,
472
const uv_buf_t bufs[],
473
unsigned int nbufs,
474
const struct sockaddr* addr) {
475
int addrlen;
476
477
addrlen = uv__udp_check_before_send(handle, addr);
478
if (addrlen < 0)
479
return addrlen;
480
481
return uv__udp_try_send(handle, bufs, nbufs, addr, addrlen);
482
}
483
484
485
int uv_udp_recv_start(uv_udp_t* handle,
486
uv_alloc_cb alloc_cb,
487
uv_udp_recv_cb recv_cb) {
488
if (handle->type != UV_UDP || alloc_cb == NULL || recv_cb == NULL)
489
return UV_EINVAL;
490
else
491
return uv__udp_recv_start(handle, alloc_cb, recv_cb);
492
}
493
494
495
int uv_udp_recv_stop(uv_udp_t* handle) {
496
if (handle->type != UV_UDP)
497
return UV_EINVAL;
498
else
499
return uv__udp_recv_stop(handle);
500
}
501
502
#endif
503
504
void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) {
505
QUEUE queue;
506
QUEUE* q;
507
uv_handle_t* h;
508
509
QUEUE_MOVE(&loop->handle_queue, &queue);
510
while (!QUEUE_EMPTY(&queue)) {
511
q = QUEUE_HEAD(&queue);
512
h = QUEUE_DATA(q, uv_handle_t, handle_queue);
513
514
QUEUE_REMOVE(q);
515
QUEUE_INSERT_TAIL(&loop->handle_queue, q);
516
517
if (h->flags & UV_HANDLE_INTERNAL) continue;
518
walk_cb(h, arg);
519
}
520
}
521
522
523
static void uv__print_handles(uv_loop_t* loop, int only_active, FILE* stream) {
524
const char* type;
525
QUEUE* q;
526
uv_handle_t* h;
527
528
if (loop == NULL)
529
loop = uv_default_loop();
530
531
QUEUE_FOREACH(q, &loop->handle_queue) {
532
h = QUEUE_DATA(q, uv_handle_t, handle_queue);
533
534
if (only_active && !uv__is_active(h))
535
continue;
536
537
switch (h->type) {
538
#define X(uc, lc) case UV_##uc: type = #lc; break;
539
UV_HANDLE_TYPE_MAP(X)
540
#undef X
541
default: type = "<unknown>";
542
}
543
544
fprintf(stream,
545
"[%c%c%c] %-8s %p\n",
546
"R-"[!(h->flags & UV_HANDLE_REF)],
547
"A-"[!(h->flags & UV_HANDLE_ACTIVE)],
548
"I-"[!(h->flags & UV_HANDLE_INTERNAL)],
549
type,
550
(void*)h);
551
}
552
}
553
554
555
void uv_print_all_handles(uv_loop_t* loop, FILE* stream) {
556
uv__print_handles(loop, 0, stream);
557
}
558
559
560
void uv_print_active_handles(uv_loop_t* loop, FILE* stream) {
561
uv__print_handles(loop, 1, stream);
562
}
563
564
565
void uv_ref(uv_handle_t* handle) {
566
uv__handle_ref(handle);
567
}
568
569
570
void uv_unref(uv_handle_t* handle) {
571
uv__handle_unref(handle);
572
}
573
574
575
int uv_has_ref(const uv_handle_t* handle) {
576
return uv__has_ref(handle);
577
}
578
579
580
void uv_stop(uv_loop_t* loop) {
581
loop->stop_flag = 1;
582
}
583
584
585
uint64_t uv_now(const uv_loop_t* loop) {
586
return loop->time;
587
}
588
589
590
591
size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs) {
592
unsigned int i;
593
size_t bytes;
594
595
bytes = 0;
596
for (i = 0; i < nbufs; i++)
597
bytes += (size_t) bufs[i].len;
598
599
return bytes;
600
}
601
602
int uv_recv_buffer_size(uv_handle_t* handle, int* value) {
603
return uv__socket_sockopt(handle, SO_RCVBUF, value);
604
}
605
606
int uv_send_buffer_size(uv_handle_t* handle, int *value) {
607
return uv__socket_sockopt(handle, SO_SNDBUF, value);
608
}
609
610
int uv_fs_event_getpath(uv_fs_event_t* handle, char* buffer, size_t* size) {
611
size_t required_len;
612
613
if (!uv__is_active(handle)) {
614
*size = 0;
615
return UV_EINVAL;
616
}
617
618
required_len = strlen(handle->path);
619
if (required_len >= *size) {
620
*size = required_len + 1;
621
return UV_ENOBUFS;
622
}
623
624
memcpy(buffer, handle->path, required_len);
625
*size = required_len;
626
buffer[required_len] = '\0';
627
628
return 0;
629
}
630
631
/* The windows implementation does not have the same structure layout as
632
* the unix implementation (nbufs is not directly inside req but is
633
* contained in a nested union/struct) so this function locates it.
634
*/
635
static unsigned int* uv__get_nbufs(uv_fs_t* req) {
636
#ifdef _WIN32
637
return &req->fs.info.nbufs;
638
#else
639
return &req->nbufs;
640
#endif
641
}
642
643
/* uv_fs_scandir() uses the system allocator to allocate memory on non-Windows
644
* systems. So, the memory should be released using free(). On Windows,
645
* uv__malloc() is used, so use uv__free() to free memory.
646
*/
647
#ifdef _WIN32
648
# define uv__fs_scandir_free uv__free
649
#else
650
# define uv__fs_scandir_free free
651
#endif
652
653
void uv__fs_scandir_cleanup(uv_fs_t* req) {
654
uv__dirent_t** dents;
655
656
unsigned int* nbufs = uv__get_nbufs(req);
657
658
dents = req->ptr;
659
if (*nbufs > 0 && *nbufs != (unsigned int) req->result)
660
(*nbufs)--;
661
for (; *nbufs < (unsigned int) req->result; (*nbufs)++)
662
uv__fs_scandir_free(dents[*nbufs]);
663
664
uv__fs_scandir_free(req->ptr);
665
req->ptr = NULL;
666
}
667
668
669
int uv_fs_scandir_next(uv_fs_t* req, uv_dirent_t* ent) {
670
uv__dirent_t** dents;
671
uv__dirent_t* dent;
672
unsigned int* nbufs;
673
674
/* Check to see if req passed */
675
if (req->result < 0)
676
return req->result;
677
678
/* Ptr will be null if req was canceled or no files found */
679
if (!req->ptr)
680
return UV_EOF;
681
682
nbufs = uv__get_nbufs(req);
683
assert(nbufs);
684
685
dents = req->ptr;
686
687
/* Free previous entity */
688
if (*nbufs > 0)
689
uv__fs_scandir_free(dents[*nbufs - 1]);
690
691
/* End was already reached */
692
if (*nbufs == (unsigned int) req->result) {
693
uv__fs_scandir_free(dents);
694
req->ptr = NULL;
695
return UV_EOF;
696
}
697
698
dent = dents[(*nbufs)++];
699
700
ent->name = dent->d_name;
701
ent->type = uv__fs_get_dirent_type(dent);
702
703
return 0;
704
}
705
706
uv_dirent_type_t uv__fs_get_dirent_type(uv__dirent_t* dent) {
707
uv_dirent_type_t type;
708
709
#ifdef HAVE_DIRENT_TYPES
710
switch (dent->d_type) {
711
case UV__DT_DIR:
712
type = UV_DIRENT_DIR;
713
break;
714
case UV__DT_FILE:
715
type = UV_DIRENT_FILE;
716
break;
717
case UV__DT_LINK:
718
type = UV_DIRENT_LINK;
719
break;
720
case UV__DT_FIFO:
721
type = UV_DIRENT_FIFO;
722
break;
723
case UV__DT_SOCKET:
724
type = UV_DIRENT_SOCKET;
725
break;
726
case UV__DT_CHAR:
727
type = UV_DIRENT_CHAR;
728
break;
729
case UV__DT_BLOCK:
730
type = UV_DIRENT_BLOCK;
731
break;
732
default:
733
type = UV_DIRENT_UNKNOWN;
734
}
735
#else
736
type = UV_DIRENT_UNKNOWN;
737
#endif
738
739
return type;
740
}
741
742
void uv__fs_readdir_cleanup(uv_fs_t* req) {
743
uv_dir_t* dir;
744
uv_dirent_t* dirents;
745
int i;
746
747
if (req->ptr == NULL)
748
return;
749
750
dir = req->ptr;
751
dirents = dir->dirents;
752
req->ptr = NULL;
753
754
if (dirents == NULL)
755
return;
756
757
for (i = 0; i < req->result; ++i) {
758
uv__free((char*) dirents[i].name);
759
dirents[i].name = NULL;
760
}
761
}
762
763
764
int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...) {
765
va_list ap;
766
int err;
767
768
va_start(ap, option);
769
/* Any platform-agnostic options should be handled here. */
770
err = uv__loop_configure(loop, option, ap);
771
va_end(ap);
772
773
return err;
774
}
775
776
777
static uv_loop_t default_loop_struct;
778
static uv_loop_t* default_loop_ptr;
779
780
781
uv_loop_t* uv_default_loop(void) {
782
if (default_loop_ptr != NULL)
783
return default_loop_ptr;
784
785
if (uv_loop_init(&default_loop_struct))
786
return NULL;
787
788
default_loop_ptr = &default_loop_struct;
789
return default_loop_ptr;
790
}
791
792
793
uv_loop_t* uv_loop_new(void) {
794
uv_loop_t* loop;
795
796
loop = uv__malloc(sizeof(*loop));
797
if (loop == NULL)
798
return NULL;
799
800
if (uv_loop_init(loop)) {
801
uv__free(loop);
802
return NULL;
803
}
804
805
return loop;
806
}
807
808
809
int uv_loop_close(uv_loop_t* loop) {
810
QUEUE* q;
811
uv_handle_t* h;
812
#ifndef NDEBUG
813
void* saved_data;
814
#endif
815
816
if (uv__has_active_reqs(loop))
817
return UV_EBUSY;
818
819
QUEUE_FOREACH(q, &loop->handle_queue) {
820
h = QUEUE_DATA(q, uv_handle_t, handle_queue);
821
if (!(h->flags & UV_HANDLE_INTERNAL))
822
return UV_EBUSY;
823
}
824
825
uv__loop_close(loop);
826
827
#ifndef NDEBUG
828
saved_data = loop->data;
829
memset(loop, -1, sizeof(*loop));
830
loop->data = saved_data;
831
#endif
832
if (loop == default_loop_ptr)
833
default_loop_ptr = NULL;
834
835
return 0;
836
}
837
838
839
void uv_loop_delete(uv_loop_t* loop) {
840
uv_loop_t* default_loop;
841
int err;
842
843
default_loop = default_loop_ptr;
844
845
err = uv_loop_close(loop);
846
(void) err; /* Squelch compiler warnings. */
847
assert(err == 0);
848
if (loop != default_loop)
849
uv__free(loop);
850
}
851
852
853
int uv_read_start(uv_stream_t* stream,
854
uv_alloc_cb alloc_cb,
855
uv_read_cb read_cb) {
856
if (stream == NULL || alloc_cb == NULL || read_cb == NULL)
857
return UV_EINVAL;
858
859
if (stream->flags & UV_HANDLE_CLOSING)
860
return UV_EINVAL;
861
862
if (stream->flags & UV_HANDLE_READING)
863
return UV_EALREADY;
864
865
if (!(stream->flags & UV_HANDLE_READABLE))
866
return UV_ENOTCONN;
867
868
return uv__read_start(stream, alloc_cb, read_cb);
869
}
870
871
872
void uv_os_free_environ(uv_env_item_t* envitems, int count) {
873
int i;
874
875
for (i = 0; i < count; i++) {
876
uv__free(envitems[i].name);
877
}
878
879
uv__free(envitems);
880
}
881
882
883
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
884
int i;
885
886
for (i = 0; i < count; i++)
887
uv__free(cpu_infos[i].model);
888
889
uv__free(cpu_infos);
890
}
891
892
893
/* Also covers __clang__ and __INTEL_COMPILER. Disabled on Windows because
894
* threads have already been forcibly terminated by the operating system
895
* by the time destructors run, ergo, it's not safe to try to clean them up.
896
*/
897
#if defined(__GNUC__) && !defined(_WIN32)
898
__attribute__((destructor))
899
#endif
900
void uv_library_shutdown(void) {
901
static int was_shutdown;
902
903
if (uv__load_relaxed(&was_shutdown))
904
return;
905
906
uv__process_title_cleanup();
907
uv__signal_cleanup();
908
#ifdef __MVS__
909
/* TODO(itodorov) - zos: revisit when Woz compiler is available. */
910
uv__os390_cleanup();
911
#else
912
uv__threadpool_cleanup();
913
#endif
914
uv__store_relaxed(&was_shutdown, 1);
915
}
916
917
918
void uv__metrics_update_idle_time(uv_loop_t* loop) {
919
uv__loop_metrics_t* loop_metrics;
920
uint64_t entry_time;
921
uint64_t exit_time;
922
923
if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
924
return;
925
926
loop_metrics = uv__get_loop_metrics(loop);
927
928
/* The thread running uv__metrics_update_idle_time() is always the same
929
* thread that sets provider_entry_time. So it's unnecessary to lock before
930
* retrieving this value.
931
*/
932
if (loop_metrics->provider_entry_time == 0)
933
return;
934
935
exit_time = uv_hrtime();
936
937
uv_mutex_lock(&loop_metrics->lock);
938
entry_time = loop_metrics->provider_entry_time;
939
loop_metrics->provider_entry_time = 0;
940
loop_metrics->provider_idle_time += exit_time - entry_time;
941
uv_mutex_unlock(&loop_metrics->lock);
942
}
943
944
945
void uv__metrics_set_provider_entry_time(uv_loop_t* loop) {
946
uv__loop_metrics_t* loop_metrics;
947
uint64_t now;
948
949
if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
950
return;
951
952
now = uv_hrtime();
953
loop_metrics = uv__get_loop_metrics(loop);
954
uv_mutex_lock(&loop_metrics->lock);
955
loop_metrics->provider_entry_time = now;
956
uv_mutex_unlock(&loop_metrics->lock);
957
}
958
959
960
uint64_t uv_metrics_idle_time(uv_loop_t* loop) {
961
uv__loop_metrics_t* loop_metrics;
962
uint64_t entry_time;
963
uint64_t idle_time;
964
965
loop_metrics = uv__get_loop_metrics(loop);
966
uv_mutex_lock(&loop_metrics->lock);
967
idle_time = loop_metrics->provider_idle_time;
968
entry_time = loop_metrics->provider_entry_time;
969
uv_mutex_unlock(&loop_metrics->lock);
970
971
if (entry_time > 0)
972
idle_time += uv_hrtime() - entry_time;
973
return idle_time;
974
}
975
976