Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Kitware
GitHub Repository: Kitware/CMake
Path: blob/master/Utilities/cmlibuv/src/win/udp.c
3153 views
1
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2
*
3
* Permission is hereby granted, free of charge, to any person obtaining a copy
4
* of this software and associated documentation files (the "Software"), to
5
* deal in the Software without restriction, including without limitation the
6
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7
* sell copies of the Software, and to permit persons to whom the Software is
8
* furnished to do so, subject to the following conditions:
9
*
10
* The above copyright notice and this permission notice shall be included in
11
* all copies or substantial portions of the Software.
12
*
13
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19
* IN THE SOFTWARE.
20
*/
21
22
#include <assert.h>
23
#include <stdlib.h>
24
25
#include "uv.h"
26
#include "internal.h"
27
#include "handle-inl.h"
28
#include "stream-inl.h"
29
#include "req-inl.h"
30
31
32
/*
33
* Threshold of active udp streams for which to preallocate udp read buffers.
34
*/
35
const unsigned int uv_active_udp_streams_threshold = 0;
36
37
/* A zero-size buffer for use by uv_udp_read */
38
static char uv_zero_[] = "";
39
int uv_udp_getpeername(const uv_udp_t* handle,
40
struct sockaddr* name,
41
int* namelen) {
42
43
return uv__getsockpeername((const uv_handle_t*) handle,
44
getpeername,
45
name,
46
namelen,
47
0);
48
}
49
50
51
int uv_udp_getsockname(const uv_udp_t* handle,
52
struct sockaddr* name,
53
int* namelen) {
54
55
return uv__getsockpeername((const uv_handle_t*) handle,
56
getsockname,
57
name,
58
namelen,
59
0);
60
}
61
62
63
static int uv__udp_set_socket(uv_loop_t* loop, uv_udp_t* handle, SOCKET socket,
64
int family) {
65
DWORD yes = 1;
66
WSAPROTOCOL_INFOW info;
67
int opt_len;
68
69
if (handle->socket != INVALID_SOCKET)
70
return UV_EBUSY;
71
72
/* Set the socket to nonblocking mode */
73
if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR) {
74
return WSAGetLastError();
75
}
76
77
/* Make the socket non-inheritable */
78
if (!SetHandleInformation((HANDLE)socket, HANDLE_FLAG_INHERIT, 0)) {
79
return GetLastError();
80
}
81
82
/* Associate it with the I/O completion port. Use uv_handle_t pointer as
83
* completion key. */
84
if (CreateIoCompletionPort((HANDLE)socket,
85
loop->iocp,
86
(ULONG_PTR)socket,
87
0) == NULL) {
88
return GetLastError();
89
}
90
91
/* All known Windows that support SetFileCompletionNotificationModes have a
92
* bug that makes it impossible to use this function in conjunction with
93
* datagram sockets. We can work around that but only if the user is using
94
* the default UDP driver (AFD) and has no other. LSPs stacked on top. Here
95
* we check whether that is the case. */
96
opt_len = (int) sizeof info;
97
if (getsockopt(
98
socket, SOL_SOCKET, SO_PROTOCOL_INFOW, (char*) &info, &opt_len) ==
99
SOCKET_ERROR) {
100
return GetLastError();
101
}
102
103
if (info.ProtocolChain.ChainLen == 1) {
104
if (SetFileCompletionNotificationModes(
105
(HANDLE) socket,
106
FILE_SKIP_SET_EVENT_ON_HANDLE |
107
FILE_SKIP_COMPLETION_PORT_ON_SUCCESS)) {
108
handle->flags |= UV_HANDLE_SYNC_BYPASS_IOCP;
109
handle->func_wsarecv = uv__wsarecv_workaround;
110
handle->func_wsarecvfrom = uv__wsarecvfrom_workaround;
111
} else if (GetLastError() != ERROR_INVALID_FUNCTION) {
112
return GetLastError();
113
}
114
}
115
116
handle->socket = socket;
117
118
if (family == AF_INET6) {
119
handle->flags |= UV_HANDLE_IPV6;
120
} else {
121
assert(!(handle->flags & UV_HANDLE_IPV6));
122
}
123
124
return 0;
125
}
126
127
128
int uv__udp_init_ex(uv_loop_t* loop,
129
uv_udp_t* handle,
130
unsigned flags,
131
int domain) {
132
uv__handle_init(loop, (uv_handle_t*) handle, UV_UDP);
133
handle->socket = INVALID_SOCKET;
134
handle->reqs_pending = 0;
135
handle->activecnt = 0;
136
handle->func_wsarecv = WSARecv;
137
handle->func_wsarecvfrom = WSARecvFrom;
138
handle->send_queue_size = 0;
139
handle->send_queue_count = 0;
140
UV_REQ_INIT(&handle->recv_req, UV_UDP_RECV);
141
handle->recv_req.data = handle;
142
143
/* If anything fails beyond this point we need to remove the handle from
144
* the handle queue, since it was added by uv__handle_init.
145
*/
146
147
if (domain != AF_UNSPEC) {
148
SOCKET sock;
149
DWORD err;
150
151
sock = socket(domain, SOCK_DGRAM, 0);
152
if (sock == INVALID_SOCKET) {
153
err = WSAGetLastError();
154
QUEUE_REMOVE(&handle->handle_queue);
155
return uv_translate_sys_error(err);
156
}
157
158
err = uv__udp_set_socket(handle->loop, handle, sock, domain);
159
if (err) {
160
closesocket(sock);
161
QUEUE_REMOVE(&handle->handle_queue);
162
return uv_translate_sys_error(err);
163
}
164
}
165
166
return 0;
167
}
168
169
170
void uv__udp_close(uv_loop_t* loop, uv_udp_t* handle) {
171
uv_udp_recv_stop(handle);
172
closesocket(handle->socket);
173
handle->socket = INVALID_SOCKET;
174
175
uv__handle_closing(handle);
176
177
if (handle->reqs_pending == 0) {
178
uv__want_endgame(loop, (uv_handle_t*) handle);
179
}
180
}
181
182
183
void uv__udp_endgame(uv_loop_t* loop, uv_udp_t* handle) {
184
if (handle->flags & UV_HANDLE_CLOSING &&
185
handle->reqs_pending == 0) {
186
assert(!(handle->flags & UV_HANDLE_CLOSED));
187
uv__handle_close(handle);
188
}
189
}
190
191
192
int uv_udp_using_recvmmsg(const uv_udp_t* handle) {
193
return 0;
194
}
195
196
197
static int uv__udp_maybe_bind(uv_udp_t* handle,
198
const struct sockaddr* addr,
199
unsigned int addrlen,
200
unsigned int flags) {
201
int r;
202
int err;
203
DWORD no = 0;
204
205
if (handle->flags & UV_HANDLE_BOUND)
206
return 0;
207
208
if ((flags & UV_UDP_IPV6ONLY) && addr->sa_family != AF_INET6) {
209
/* UV_UDP_IPV6ONLY is supported only for IPV6 sockets */
210
return ERROR_INVALID_PARAMETER;
211
}
212
213
if (handle->socket == INVALID_SOCKET) {
214
SOCKET sock = socket(addr->sa_family, SOCK_DGRAM, 0);
215
if (sock == INVALID_SOCKET) {
216
return WSAGetLastError();
217
}
218
219
err = uv__udp_set_socket(handle->loop, handle, sock, addr->sa_family);
220
if (err) {
221
closesocket(sock);
222
return err;
223
}
224
}
225
226
if (flags & UV_UDP_REUSEADDR) {
227
DWORD yes = 1;
228
/* Set SO_REUSEADDR on the socket. */
229
if (setsockopt(handle->socket,
230
SOL_SOCKET,
231
SO_REUSEADDR,
232
(char*) &yes,
233
sizeof yes) == SOCKET_ERROR) {
234
err = WSAGetLastError();
235
return err;
236
}
237
}
238
239
if (addr->sa_family == AF_INET6)
240
handle->flags |= UV_HANDLE_IPV6;
241
242
if (addr->sa_family == AF_INET6 && !(flags & UV_UDP_IPV6ONLY)) {
243
/* On windows IPV6ONLY is on by default. If the user doesn't specify it
244
* libuv turns it off. */
245
246
/* TODO: how to handle errors? This may fail if there is no ipv4 stack
247
* available, or when run on XP/2003 which have no support for dualstack
248
* sockets. For now we're silently ignoring the error. */
249
setsockopt(handle->socket,
250
IPPROTO_IPV6,
251
IPV6_V6ONLY,
252
(char*) &no,
253
sizeof no);
254
}
255
256
r = bind(handle->socket, addr, addrlen);
257
if (r == SOCKET_ERROR) {
258
return WSAGetLastError();
259
}
260
261
handle->flags |= UV_HANDLE_BOUND;
262
263
return 0;
264
}
265
266
267
static void uv__udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
268
uv_req_t* req;
269
uv_buf_t buf;
270
DWORD bytes, flags;
271
int result;
272
273
assert(handle->flags & UV_HANDLE_READING);
274
assert(!(handle->flags & UV_HANDLE_READ_PENDING));
275
276
req = &handle->recv_req;
277
memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
278
279
/*
280
* Preallocate a read buffer if the number of active streams is below
281
* the threshold.
282
*/
283
if (loop->active_udp_streams < uv_active_udp_streams_threshold) {
284
handle->flags &= ~UV_HANDLE_ZERO_READ;
285
286
handle->recv_buffer = uv_buf_init(NULL, 0);
287
handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &handle->recv_buffer);
288
if (handle->recv_buffer.base == NULL || handle->recv_buffer.len == 0) {
289
handle->recv_cb(handle, UV_ENOBUFS, &handle->recv_buffer, NULL, 0);
290
return;
291
}
292
assert(handle->recv_buffer.base != NULL);
293
294
buf = handle->recv_buffer;
295
memset(&handle->recv_from, 0, sizeof handle->recv_from);
296
handle->recv_from_len = sizeof handle->recv_from;
297
flags = 0;
298
299
result = handle->func_wsarecvfrom(handle->socket,
300
(WSABUF*) &buf,
301
1,
302
&bytes,
303
&flags,
304
(struct sockaddr*) &handle->recv_from,
305
&handle->recv_from_len,
306
&req->u.io.overlapped,
307
NULL);
308
309
if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
310
/* Process the req without IOCP. */
311
handle->flags |= UV_HANDLE_READ_PENDING;
312
req->u.io.overlapped.InternalHigh = bytes;
313
handle->reqs_pending++;
314
uv__insert_pending_req(loop, req);
315
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
316
/* The req will be processed with IOCP. */
317
handle->flags |= UV_HANDLE_READ_PENDING;
318
handle->reqs_pending++;
319
} else {
320
/* Make this req pending reporting an error. */
321
SET_REQ_ERROR(req, WSAGetLastError());
322
uv__insert_pending_req(loop, req);
323
handle->reqs_pending++;
324
}
325
326
} else {
327
handle->flags |= UV_HANDLE_ZERO_READ;
328
329
buf.base = (char*) uv_zero_;
330
buf.len = 0;
331
flags = MSG_PEEK;
332
333
result = handle->func_wsarecv(handle->socket,
334
(WSABUF*) &buf,
335
1,
336
&bytes,
337
&flags,
338
&req->u.io.overlapped,
339
NULL);
340
341
if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
342
/* Process the req without IOCP. */
343
handle->flags |= UV_HANDLE_READ_PENDING;
344
req->u.io.overlapped.InternalHigh = bytes;
345
handle->reqs_pending++;
346
uv__insert_pending_req(loop, req);
347
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
348
/* The req will be processed with IOCP. */
349
handle->flags |= UV_HANDLE_READ_PENDING;
350
handle->reqs_pending++;
351
} else {
352
/* Make this req pending reporting an error. */
353
SET_REQ_ERROR(req, WSAGetLastError());
354
uv__insert_pending_req(loop, req);
355
handle->reqs_pending++;
356
}
357
}
358
}
359
360
361
int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb,
362
uv_udp_recv_cb recv_cb) {
363
uv_loop_t* loop = handle->loop;
364
int err;
365
366
if (handle->flags & UV_HANDLE_READING) {
367
return UV_EALREADY;
368
}
369
370
err = uv__udp_maybe_bind(handle,
371
(const struct sockaddr*) &uv_addr_ip4_any_,
372
sizeof(uv_addr_ip4_any_),
373
0);
374
if (err)
375
return uv_translate_sys_error(err);
376
377
handle->flags |= UV_HANDLE_READING;
378
INCREASE_ACTIVE_COUNT(loop, handle);
379
loop->active_udp_streams++;
380
381
handle->recv_cb = recv_cb;
382
handle->alloc_cb = alloc_cb;
383
384
/* If reading was stopped and then started again, there could still be a recv
385
* request pending. */
386
if (!(handle->flags & UV_HANDLE_READ_PENDING))
387
uv__udp_queue_recv(loop, handle);
388
389
return 0;
390
}
391
392
393
int uv__udp_recv_stop(uv_udp_t* handle) {
394
if (handle->flags & UV_HANDLE_READING) {
395
handle->flags &= ~UV_HANDLE_READING;
396
handle->loop->active_udp_streams--;
397
DECREASE_ACTIVE_COUNT(loop, handle);
398
}
399
400
return 0;
401
}
402
403
404
static int uv__send(uv_udp_send_t* req,
405
uv_udp_t* handle,
406
const uv_buf_t bufs[],
407
unsigned int nbufs,
408
const struct sockaddr* addr,
409
unsigned int addrlen,
410
uv_udp_send_cb cb) {
411
uv_loop_t* loop = handle->loop;
412
DWORD result, bytes;
413
414
UV_REQ_INIT(req, UV_UDP_SEND);
415
req->handle = handle;
416
req->cb = cb;
417
memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
418
419
result = WSASendTo(handle->socket,
420
(WSABUF*)bufs,
421
nbufs,
422
&bytes,
423
0,
424
addr,
425
addrlen,
426
&req->u.io.overlapped,
427
NULL);
428
429
if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
430
/* Request completed immediately. */
431
req->u.io.queued_bytes = 0;
432
handle->reqs_pending++;
433
handle->send_queue_size += req->u.io.queued_bytes;
434
handle->send_queue_count++;
435
REGISTER_HANDLE_REQ(loop, handle, req);
436
uv__insert_pending_req(loop, (uv_req_t*)req);
437
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
438
/* Request queued by the kernel. */
439
req->u.io.queued_bytes = uv__count_bufs(bufs, nbufs);
440
handle->reqs_pending++;
441
handle->send_queue_size += req->u.io.queued_bytes;
442
handle->send_queue_count++;
443
REGISTER_HANDLE_REQ(loop, handle, req);
444
} else {
445
/* Send failed due to an error. */
446
return WSAGetLastError();
447
}
448
449
return 0;
450
}
451
452
453
void uv__process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle,
454
uv_req_t* req) {
455
uv_buf_t buf;
456
int partial;
457
458
assert(handle->type == UV_UDP);
459
460
handle->flags &= ~UV_HANDLE_READ_PENDING;
461
462
if (!REQ_SUCCESS(req)) {
463
DWORD err = GET_REQ_SOCK_ERROR(req);
464
if (err == WSAEMSGSIZE) {
465
/* Not a real error, it just indicates that the received packet was
466
* bigger than the receive buffer. */
467
} else if (err == WSAECONNRESET || err == WSAENETRESET) {
468
/* A previous sendto operation failed; ignore this error. If zero-reading
469
* we need to call WSARecv/WSARecvFrom _without_ the. MSG_PEEK flag to
470
* clear out the error queue. For nonzero reads, immediately queue a new
471
* receive. */
472
if (!(handle->flags & UV_HANDLE_ZERO_READ)) {
473
goto done;
474
}
475
} else {
476
/* A real error occurred. Report the error to the user only if we're
477
* currently reading. */
478
if (handle->flags & UV_HANDLE_READING) {
479
uv_udp_recv_stop(handle);
480
buf = (handle->flags & UV_HANDLE_ZERO_READ) ?
481
uv_buf_init(NULL, 0) : handle->recv_buffer;
482
handle->recv_cb(handle, uv_translate_sys_error(err), &buf, NULL, 0);
483
}
484
goto done;
485
}
486
}
487
488
if (!(handle->flags & UV_HANDLE_ZERO_READ)) {
489
/* Successful read */
490
partial = !REQ_SUCCESS(req);
491
handle->recv_cb(handle,
492
req->u.io.overlapped.InternalHigh,
493
&handle->recv_buffer,
494
(const struct sockaddr*) &handle->recv_from,
495
partial ? UV_UDP_PARTIAL : 0);
496
} else if (handle->flags & UV_HANDLE_READING) {
497
DWORD bytes, err, flags;
498
struct sockaddr_storage from;
499
int from_len;
500
501
/* Do a nonblocking receive.
502
* TODO: try to read multiple datagrams at once. FIONREAD maybe? */
503
buf = uv_buf_init(NULL, 0);
504
handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &buf);
505
if (buf.base == NULL || buf.len == 0) {
506
handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
507
goto done;
508
}
509
assert(buf.base != NULL);
510
511
memset(&from, 0, sizeof from);
512
from_len = sizeof from;
513
514
flags = 0;
515
516
if (WSARecvFrom(handle->socket,
517
(WSABUF*)&buf,
518
1,
519
&bytes,
520
&flags,
521
(struct sockaddr*) &from,
522
&from_len,
523
NULL,
524
NULL) != SOCKET_ERROR) {
525
526
/* Message received */
527
handle->recv_cb(handle, bytes, &buf, (const struct sockaddr*) &from, 0);
528
} else {
529
err = WSAGetLastError();
530
if (err == WSAEMSGSIZE) {
531
/* Message truncated */
532
handle->recv_cb(handle,
533
bytes,
534
&buf,
535
(const struct sockaddr*) &from,
536
UV_UDP_PARTIAL);
537
} else if (err == WSAEWOULDBLOCK) {
538
/* Kernel buffer empty */
539
handle->recv_cb(handle, 0, &buf, NULL, 0);
540
} else if (err == WSAECONNRESET || err == WSAENETRESET) {
541
/* WSAECONNRESET/WSANETRESET is ignored because this just indicates
542
* that a previous sendto operation failed.
543
*/
544
handle->recv_cb(handle, 0, &buf, NULL, 0);
545
} else {
546
/* Any other error that we want to report back to the user. */
547
uv_udp_recv_stop(handle);
548
handle->recv_cb(handle, uv_translate_sys_error(err), &buf, NULL, 0);
549
}
550
}
551
}
552
553
done:
554
/* Post another read if still reading and not closing. */
555
if ((handle->flags & UV_HANDLE_READING) &&
556
!(handle->flags & UV_HANDLE_READ_PENDING)) {
557
uv__udp_queue_recv(loop, handle);
558
}
559
560
DECREASE_PENDING_REQ_COUNT(handle);
561
}
562
563
564
void uv__process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle,
565
uv_udp_send_t* req) {
566
int err;
567
568
assert(handle->type == UV_UDP);
569
570
assert(handle->send_queue_size >= req->u.io.queued_bytes);
571
assert(handle->send_queue_count >= 1);
572
handle->send_queue_size -= req->u.io.queued_bytes;
573
handle->send_queue_count--;
574
575
UNREGISTER_HANDLE_REQ(loop, handle, req);
576
577
if (req->cb) {
578
err = 0;
579
if (!REQ_SUCCESS(req)) {
580
err = GET_REQ_SOCK_ERROR(req);
581
}
582
req->cb(req, uv_translate_sys_error(err));
583
}
584
585
DECREASE_PENDING_REQ_COUNT(handle);
586
}
587
588
589
static int uv__udp_set_membership4(uv_udp_t* handle,
590
const struct sockaddr_in* multicast_addr,
591
const char* interface_addr,
592
uv_membership membership) {
593
int err;
594
int optname;
595
struct ip_mreq mreq;
596
597
if (handle->flags & UV_HANDLE_IPV6)
598
return UV_EINVAL;
599
600
/* If the socket is unbound, bind to inaddr_any. */
601
err = uv__udp_maybe_bind(handle,
602
(const struct sockaddr*) &uv_addr_ip4_any_,
603
sizeof(uv_addr_ip4_any_),
604
UV_UDP_REUSEADDR);
605
if (err)
606
return uv_translate_sys_error(err);
607
608
memset(&mreq, 0, sizeof mreq);
609
610
if (interface_addr) {
611
err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
612
if (err)
613
return err;
614
} else {
615
mreq.imr_interface.s_addr = htonl(INADDR_ANY);
616
}
617
618
mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
619
620
switch (membership) {
621
case UV_JOIN_GROUP:
622
optname = IP_ADD_MEMBERSHIP;
623
break;
624
case UV_LEAVE_GROUP:
625
optname = IP_DROP_MEMBERSHIP;
626
break;
627
default:
628
return UV_EINVAL;
629
}
630
631
if (setsockopt(handle->socket,
632
IPPROTO_IP,
633
optname,
634
(char*) &mreq,
635
sizeof mreq) == SOCKET_ERROR) {
636
return uv_translate_sys_error(WSAGetLastError());
637
}
638
639
return 0;
640
}
641
642
643
int uv__udp_set_membership6(uv_udp_t* handle,
644
const struct sockaddr_in6* multicast_addr,
645
const char* interface_addr,
646
uv_membership membership) {
647
int optname;
648
int err;
649
struct ipv6_mreq mreq;
650
struct sockaddr_in6 addr6;
651
652
if ((handle->flags & UV_HANDLE_BOUND) && !(handle->flags & UV_HANDLE_IPV6))
653
return UV_EINVAL;
654
655
err = uv__udp_maybe_bind(handle,
656
(const struct sockaddr*) &uv_addr_ip6_any_,
657
sizeof(uv_addr_ip6_any_),
658
UV_UDP_REUSEADDR);
659
660
if (err)
661
return uv_translate_sys_error(err);
662
663
memset(&mreq, 0, sizeof(mreq));
664
665
if (interface_addr) {
666
if (uv_ip6_addr(interface_addr, 0, &addr6))
667
return UV_EINVAL;
668
mreq.ipv6mr_interface = addr6.sin6_scope_id;
669
} else {
670
mreq.ipv6mr_interface = 0;
671
}
672
673
mreq.ipv6mr_multiaddr = multicast_addr->sin6_addr;
674
675
switch (membership) {
676
case UV_JOIN_GROUP:
677
optname = IPV6_ADD_MEMBERSHIP;
678
break;
679
case UV_LEAVE_GROUP:
680
optname = IPV6_DROP_MEMBERSHIP;
681
break;
682
default:
683
return UV_EINVAL;
684
}
685
686
if (setsockopt(handle->socket,
687
IPPROTO_IPV6,
688
optname,
689
(char*) &mreq,
690
sizeof mreq) == SOCKET_ERROR) {
691
return uv_translate_sys_error(WSAGetLastError());
692
}
693
694
return 0;
695
}
696
697
698
static int uv__udp_set_source_membership4(uv_udp_t* handle,
699
const struct sockaddr_in* multicast_addr,
700
const char* interface_addr,
701
const struct sockaddr_in* source_addr,
702
uv_membership membership) {
703
struct ip_mreq_source mreq;
704
int optname;
705
int err;
706
707
if (handle->flags & UV_HANDLE_IPV6)
708
return UV_EINVAL;
709
710
/* If the socket is unbound, bind to inaddr_any. */
711
err = uv__udp_maybe_bind(handle,
712
(const struct sockaddr*) &uv_addr_ip4_any_,
713
sizeof(uv_addr_ip4_any_),
714
UV_UDP_REUSEADDR);
715
if (err)
716
return uv_translate_sys_error(err);
717
718
memset(&mreq, 0, sizeof(mreq));
719
720
if (interface_addr != NULL) {
721
err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
722
if (err)
723
return err;
724
} else {
725
mreq.imr_interface.s_addr = htonl(INADDR_ANY);
726
}
727
728
mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
729
mreq.imr_sourceaddr.s_addr = source_addr->sin_addr.s_addr;
730
731
if (membership == UV_JOIN_GROUP)
732
optname = IP_ADD_SOURCE_MEMBERSHIP;
733
else if (membership == UV_LEAVE_GROUP)
734
optname = IP_DROP_SOURCE_MEMBERSHIP;
735
else
736
return UV_EINVAL;
737
738
if (setsockopt(handle->socket,
739
IPPROTO_IP,
740
optname,
741
(char*) &mreq,
742
sizeof(mreq)) == SOCKET_ERROR) {
743
return uv_translate_sys_error(WSAGetLastError());
744
}
745
746
return 0;
747
}
748
749
750
int uv__udp_set_source_membership6(uv_udp_t* handle,
751
const struct sockaddr_in6* multicast_addr,
752
const char* interface_addr,
753
const struct sockaddr_in6* source_addr,
754
uv_membership membership) {
755
struct group_source_req mreq;
756
struct sockaddr_in6 addr6;
757
int optname;
758
int err;
759
760
STATIC_ASSERT(sizeof(mreq.gsr_group) >= sizeof(*multicast_addr));
761
STATIC_ASSERT(sizeof(mreq.gsr_source) >= sizeof(*source_addr));
762
763
if ((handle->flags & UV_HANDLE_BOUND) && !(handle->flags & UV_HANDLE_IPV6))
764
return UV_EINVAL;
765
766
err = uv__udp_maybe_bind(handle,
767
(const struct sockaddr*) &uv_addr_ip6_any_,
768
sizeof(uv_addr_ip6_any_),
769
UV_UDP_REUSEADDR);
770
771
if (err)
772
return uv_translate_sys_error(err);
773
774
memset(&mreq, 0, sizeof(mreq));
775
776
if (interface_addr != NULL) {
777
err = uv_ip6_addr(interface_addr, 0, &addr6);
778
if (err)
779
return err;
780
mreq.gsr_interface = addr6.sin6_scope_id;
781
} else {
782
mreq.gsr_interface = 0;
783
}
784
785
memcpy(&mreq.gsr_group, multicast_addr, sizeof(*multicast_addr));
786
memcpy(&mreq.gsr_source, source_addr, sizeof(*source_addr));
787
788
if (membership == UV_JOIN_GROUP)
789
optname = MCAST_JOIN_SOURCE_GROUP;
790
else if (membership == UV_LEAVE_GROUP)
791
optname = MCAST_LEAVE_SOURCE_GROUP;
792
else
793
return UV_EINVAL;
794
795
if (setsockopt(handle->socket,
796
IPPROTO_IPV6,
797
optname,
798
(char*) &mreq,
799
sizeof(mreq)) == SOCKET_ERROR) {
800
return uv_translate_sys_error(WSAGetLastError());
801
}
802
803
return 0;
804
}
805
806
807
int uv_udp_set_membership(uv_udp_t* handle,
808
const char* multicast_addr,
809
const char* interface_addr,
810
uv_membership membership) {
811
struct sockaddr_in addr4;
812
struct sockaddr_in6 addr6;
813
814
if (uv_ip4_addr(multicast_addr, 0, &addr4) == 0)
815
return uv__udp_set_membership4(handle, &addr4, interface_addr, membership);
816
else if (uv_ip6_addr(multicast_addr, 0, &addr6) == 0)
817
return uv__udp_set_membership6(handle, &addr6, interface_addr, membership);
818
else
819
return UV_EINVAL;
820
}
821
822
823
int uv_udp_set_source_membership(uv_udp_t* handle,
824
const char* multicast_addr,
825
const char* interface_addr,
826
const char* source_addr,
827
uv_membership membership) {
828
int err;
829
struct sockaddr_storage mcast_addr;
830
struct sockaddr_in* mcast_addr4;
831
struct sockaddr_in6* mcast_addr6;
832
struct sockaddr_storage src_addr;
833
struct sockaddr_in* src_addr4;
834
struct sockaddr_in6* src_addr6;
835
836
mcast_addr4 = (struct sockaddr_in*)&mcast_addr;
837
mcast_addr6 = (struct sockaddr_in6*)&mcast_addr;
838
src_addr4 = (struct sockaddr_in*)&src_addr;
839
src_addr6 = (struct sockaddr_in6*)&src_addr;
840
841
err = uv_ip4_addr(multicast_addr, 0, mcast_addr4);
842
if (err) {
843
err = uv_ip6_addr(multicast_addr, 0, mcast_addr6);
844
if (err)
845
return err;
846
err = uv_ip6_addr(source_addr, 0, src_addr6);
847
if (err)
848
return err;
849
return uv__udp_set_source_membership6(handle,
850
mcast_addr6,
851
interface_addr,
852
src_addr6,
853
membership);
854
}
855
856
err = uv_ip4_addr(source_addr, 0, src_addr4);
857
if (err)
858
return err;
859
return uv__udp_set_source_membership4(handle,
860
mcast_addr4,
861
interface_addr,
862
src_addr4,
863
membership);
864
}
865
866
867
int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) {
868
struct sockaddr_storage addr_st;
869
struct sockaddr_in* addr4;
870
struct sockaddr_in6* addr6;
871
872
addr4 = (struct sockaddr_in*) &addr_st;
873
addr6 = (struct sockaddr_in6*) &addr_st;
874
875
if (!interface_addr) {
876
memset(&addr_st, 0, sizeof addr_st);
877
if (handle->flags & UV_HANDLE_IPV6) {
878
addr_st.ss_family = AF_INET6;
879
addr6->sin6_scope_id = 0;
880
} else {
881
addr_st.ss_family = AF_INET;
882
addr4->sin_addr.s_addr = htonl(INADDR_ANY);
883
}
884
} else if (uv_ip4_addr(interface_addr, 0, addr4) == 0) {
885
/* nothing, address was parsed */
886
} else if (uv_ip6_addr(interface_addr, 0, addr6) == 0) {
887
/* nothing, address was parsed */
888
} else {
889
return UV_EINVAL;
890
}
891
892
if (handle->socket == INVALID_SOCKET)
893
return UV_EBADF;
894
895
if (addr_st.ss_family == AF_INET) {
896
if (setsockopt(handle->socket,
897
IPPROTO_IP,
898
IP_MULTICAST_IF,
899
(char*) &addr4->sin_addr,
900
sizeof(addr4->sin_addr)) == SOCKET_ERROR) {
901
return uv_translate_sys_error(WSAGetLastError());
902
}
903
} else if (addr_st.ss_family == AF_INET6) {
904
if (setsockopt(handle->socket,
905
IPPROTO_IPV6,
906
IPV6_MULTICAST_IF,
907
(char*) &addr6->sin6_scope_id,
908
sizeof(addr6->sin6_scope_id)) == SOCKET_ERROR) {
909
return uv_translate_sys_error(WSAGetLastError());
910
}
911
} else {
912
assert(0 && "unexpected address family");
913
abort();
914
}
915
916
return 0;
917
}
918
919
920
int uv_udp_set_broadcast(uv_udp_t* handle, int value) {
921
BOOL optval = (BOOL) value;
922
923
if (handle->socket == INVALID_SOCKET)
924
return UV_EBADF;
925
926
if (setsockopt(handle->socket,
927
SOL_SOCKET,
928
SO_BROADCAST,
929
(char*) &optval,
930
sizeof optval)) {
931
return uv_translate_sys_error(WSAGetLastError());
932
}
933
934
return 0;
935
}
936
937
938
int uv__udp_is_bound(uv_udp_t* handle) {
939
struct sockaddr_storage addr;
940
int addrlen;
941
942
addrlen = sizeof(addr);
943
if (uv_udp_getsockname(handle, (struct sockaddr*) &addr, &addrlen) != 0)
944
return 0;
945
946
return addrlen > 0;
947
}
948
949
950
int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
951
WSAPROTOCOL_INFOW protocol_info;
952
int opt_len;
953
int err;
954
955
/* Detect the address family of the socket. */
956
opt_len = (int) sizeof protocol_info;
957
if (getsockopt(sock,
958
SOL_SOCKET,
959
SO_PROTOCOL_INFOW,
960
(char*) &protocol_info,
961
&opt_len) == SOCKET_ERROR) {
962
return uv_translate_sys_error(GetLastError());
963
}
964
965
err = uv__udp_set_socket(handle->loop,
966
handle,
967
sock,
968
protocol_info.iAddressFamily);
969
if (err)
970
return uv_translate_sys_error(err);
971
972
if (uv__udp_is_bound(handle))
973
handle->flags |= UV_HANDLE_BOUND;
974
975
if (uv__udp_is_connected(handle))
976
handle->flags |= UV_HANDLE_UDP_CONNECTED;
977
978
return 0;
979
}
980
981
982
#define SOCKOPT_SETTER(name, option4, option6, validate) \
983
int uv_udp_set_##name(uv_udp_t* handle, int value) { \
984
DWORD optval = (DWORD) value; \
985
\
986
if (!(validate(value))) { \
987
return UV_EINVAL; \
988
} \
989
\
990
if (handle->socket == INVALID_SOCKET) \
991
return UV_EBADF; \
992
\
993
if (!(handle->flags & UV_HANDLE_IPV6)) { \
994
/* Set IPv4 socket option */ \
995
if (setsockopt(handle->socket, \
996
IPPROTO_IP, \
997
option4, \
998
(char*) &optval, \
999
sizeof optval)) { \
1000
return uv_translate_sys_error(WSAGetLastError()); \
1001
} \
1002
} else { \
1003
/* Set IPv6 socket option */ \
1004
if (setsockopt(handle->socket, \
1005
IPPROTO_IPV6, \
1006
option6, \
1007
(char*) &optval, \
1008
sizeof optval)) { \
1009
return uv_translate_sys_error(WSAGetLastError()); \
1010
} \
1011
} \
1012
return 0; \
1013
}
1014
1015
#define VALIDATE_TTL(value) ((value) >= 1 && (value) <= 255)
1016
#define VALIDATE_MULTICAST_TTL(value) ((value) >= -1 && (value) <= 255)
1017
#define VALIDATE_MULTICAST_LOOP(value) (1)
1018
1019
SOCKOPT_SETTER(ttl,
1020
IP_TTL,
1021
IPV6_HOPLIMIT,
1022
VALIDATE_TTL)
1023
SOCKOPT_SETTER(multicast_ttl,
1024
IP_MULTICAST_TTL,
1025
IPV6_MULTICAST_HOPS,
1026
VALIDATE_MULTICAST_TTL)
1027
SOCKOPT_SETTER(multicast_loop,
1028
IP_MULTICAST_LOOP,
1029
IPV6_MULTICAST_LOOP,
1030
VALIDATE_MULTICAST_LOOP)
1031
1032
#undef SOCKOPT_SETTER
1033
#undef VALIDATE_TTL
1034
#undef VALIDATE_MULTICAST_TTL
1035
#undef VALIDATE_MULTICAST_LOOP
1036
1037
1038
/* This function is an egress point, i.e. it returns libuv errors rather than
1039
* system errors.
1040
*/
1041
int uv__udp_bind(uv_udp_t* handle,
1042
const struct sockaddr* addr,
1043
unsigned int addrlen,
1044
unsigned int flags) {
1045
int err;
1046
1047
err = uv__udp_maybe_bind(handle, addr, addrlen, flags);
1048
if (err)
1049
return uv_translate_sys_error(err);
1050
1051
return 0;
1052
}
1053
1054
1055
int uv__udp_connect(uv_udp_t* handle,
1056
const struct sockaddr* addr,
1057
unsigned int addrlen) {
1058
const struct sockaddr* bind_addr;
1059
int err;
1060
1061
if (!(handle->flags & UV_HANDLE_BOUND)) {
1062
if (addrlen == sizeof(uv_addr_ip4_any_))
1063
bind_addr = (const struct sockaddr*) &uv_addr_ip4_any_;
1064
else if (addrlen == sizeof(uv_addr_ip6_any_))
1065
bind_addr = (const struct sockaddr*) &uv_addr_ip6_any_;
1066
else
1067
return UV_EINVAL;
1068
1069
err = uv__udp_maybe_bind(handle, bind_addr, addrlen, 0);
1070
if (err)
1071
return uv_translate_sys_error(err);
1072
}
1073
1074
err = connect(handle->socket, addr, addrlen);
1075
if (err)
1076
return uv_translate_sys_error(WSAGetLastError());
1077
1078
handle->flags |= UV_HANDLE_UDP_CONNECTED;
1079
1080
return 0;
1081
}
1082
1083
1084
int uv__udp_disconnect(uv_udp_t* handle) {
1085
int err;
1086
struct sockaddr_storage addr;
1087
1088
memset(&addr, 0, sizeof(addr));
1089
1090
err = connect(handle->socket, (struct sockaddr*) &addr, sizeof(addr));
1091
if (err)
1092
return uv_translate_sys_error(WSAGetLastError());
1093
1094
handle->flags &= ~UV_HANDLE_UDP_CONNECTED;
1095
return 0;
1096
}
1097
1098
1099
/* This function is an egress point, i.e. it returns libuv errors rather than
1100
* system errors.
1101
*/
1102
int uv__udp_send(uv_udp_send_t* req,
1103
uv_udp_t* handle,
1104
const uv_buf_t bufs[],
1105
unsigned int nbufs,
1106
const struct sockaddr* addr,
1107
unsigned int addrlen,
1108
uv_udp_send_cb send_cb) {
1109
const struct sockaddr* bind_addr;
1110
int err;
1111
1112
if (!(handle->flags & UV_HANDLE_BOUND)) {
1113
if (addrlen == sizeof(uv_addr_ip4_any_))
1114
bind_addr = (const struct sockaddr*) &uv_addr_ip4_any_;
1115
else if (addrlen == sizeof(uv_addr_ip6_any_))
1116
bind_addr = (const struct sockaddr*) &uv_addr_ip6_any_;
1117
else
1118
return UV_EINVAL;
1119
1120
err = uv__udp_maybe_bind(handle, bind_addr, addrlen, 0);
1121
if (err)
1122
return uv_translate_sys_error(err);
1123
}
1124
1125
err = uv__send(req, handle, bufs, nbufs, addr, addrlen, send_cb);
1126
if (err)
1127
return uv_translate_sys_error(err);
1128
1129
return 0;
1130
}
1131
1132
1133
int uv__udp_try_send(uv_udp_t* handle,
1134
const uv_buf_t bufs[],
1135
unsigned int nbufs,
1136
const struct sockaddr* addr,
1137
unsigned int addrlen) {
1138
DWORD bytes;
1139
const struct sockaddr* bind_addr;
1140
struct sockaddr_storage converted;
1141
int err;
1142
1143
assert(nbufs > 0);
1144
1145
if (addr != NULL) {
1146
err = uv__convert_to_localhost_if_unspecified(addr, &converted);
1147
if (err)
1148
return err;
1149
addr = (const struct sockaddr*) &converted;
1150
}
1151
1152
/* Already sending a message.*/
1153
if (handle->send_queue_count != 0)
1154
return UV_EAGAIN;
1155
1156
if (!(handle->flags & UV_HANDLE_BOUND)) {
1157
if (addrlen == sizeof(uv_addr_ip4_any_))
1158
bind_addr = (const struct sockaddr*) &uv_addr_ip4_any_;
1159
else if (addrlen == sizeof(uv_addr_ip6_any_))
1160
bind_addr = (const struct sockaddr*) &uv_addr_ip6_any_;
1161
else
1162
return UV_EINVAL;
1163
err = uv__udp_maybe_bind(handle, bind_addr, addrlen, 0);
1164
if (err)
1165
return uv_translate_sys_error(err);
1166
}
1167
1168
err = WSASendTo(handle->socket,
1169
(WSABUF*)bufs,
1170
nbufs,
1171
&bytes,
1172
0,
1173
addr,
1174
addrlen,
1175
NULL,
1176
NULL);
1177
1178
if (err)
1179
return uv_translate_sys_error(WSAGetLastError());
1180
1181
return bytes;
1182
}
1183
1184