Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/java.base/macosx/native/libnet/bsd_close.c
41119 views
1
/*
2
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation. Oracle designates this
8
* particular file as subject to the "Classpath" exception as provided
9
* by Oracle in the LICENSE file that accompanied this code.
10
*
11
* This code is distributed in the hope that it will be useful, but WITHOUT
12
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14
* version 2 for more details (a copy is included in the LICENSE file that
15
* accompanied this code).
16
*
17
* You should have received a copy of the GNU General Public License version
18
* 2 along with this work; if not, write to the Free Software Foundation,
19
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20
*
21
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22
* or visit www.oracle.com if you need additional information or have any
23
* questions.
24
*/
25
26
#include <assert.h>
27
#include <limits.h>
28
#include <stdio.h>
29
#include <stdlib.h>
30
#include <sys/param.h>
31
#include <signal.h>
32
#include <pthread.h>
33
#include <sys/types.h>
34
#include <sys/socket.h>
35
#include <sys/select.h>
36
#include <sys/time.h>
37
#include <sys/resource.h>
38
#include <sys/uio.h>
39
#include <unistd.h>
40
#include <errno.h>
41
#include <poll.h>
42
#include "jvm.h"
43
#include "net_util.h"
44
45
/*
46
* Stack allocated by thread when doing blocking operation
47
*/
48
typedef struct threadEntry {
49
pthread_t thr; /* this thread */
50
struct threadEntry *next; /* next thread */
51
int intr; /* interrupted */
52
} threadEntry_t;
53
54
/*
55
* Heap allocated during initialized - one entry per fd
56
*/
57
typedef struct {
58
pthread_mutex_t lock; /* fd lock */
59
threadEntry_t *threads; /* threads blocked on fd */
60
} fdEntry_t;
61
62
/*
63
* Signal to unblock thread
64
*/
65
static int sigWakeup = SIGIO;
66
67
/*
68
* fdTable holds one entry per file descriptor, up to a certain
69
* maximum.
70
* Theoretically, the number of possible file descriptors can get
71
* large, though usually it does not. Entries for small value file
72
* descriptors are kept in a simple table, which covers most scenarios.
73
* Entries for large value file descriptors are kept in an overflow
74
* table, which is organized as a sparse two dimensional array whose
75
* slabs are allocated on demand. This covers all corner cases while
76
* keeping memory consumption reasonable.
77
*/
78
79
/* Base table for low value file descriptors */
80
static fdEntry_t* fdTable = NULL;
81
/* Maximum size of base table (in number of entries). */
82
static const int fdTableMaxSize = 0x1000; /* 4K */
83
/* Actual size of base table (in number of entries) */
84
static int fdTableLen = 0;
85
/* Max. theoretical number of file descriptors on system. */
86
static int fdLimit = 0;
87
88
/* Overflow table, should base table not be large enough. Organized as
89
* an array of n slabs, each holding 64k entries.
90
*/
91
static fdEntry_t** fdOverflowTable = NULL;
92
/* Number of slabs in the overflow table */
93
static int fdOverflowTableLen = 0;
94
/* Number of entries in one slab */
95
static const int fdOverflowTableSlabSize = 0x10000; /* 64k */
96
pthread_mutex_t fdOverflowTableLock = PTHREAD_MUTEX_INITIALIZER;
97
98
/*
99
* Null signal handler
100
*/
101
static void sig_wakeup(int sig) {
102
}
103
104
/*
105
* Initialization routine (executed when library is loaded)
106
* Allocate fd tables and sets up signal handler.
107
*/
108
static void __attribute((constructor)) init() {
109
struct rlimit nbr_files;
110
sigset_t sigset;
111
struct sigaction sa;
112
int i = 0;
113
114
/* Determine the maximum number of possible file descriptors. */
115
if (-1 == getrlimit(RLIMIT_NOFILE, &nbr_files)) {
116
fprintf(stderr, "library initialization failed - "
117
"unable to get max # of allocated fds\n");
118
abort();
119
}
120
if (nbr_files.rlim_max != RLIM_INFINITY) {
121
fdLimit = nbr_files.rlim_max;
122
} else {
123
/* We just do not know. */
124
fdLimit = INT_MAX;
125
}
126
127
/* Allocate table for low value file descriptors. */
128
fdTableLen = fdLimit < fdTableMaxSize ? fdLimit : fdTableMaxSize;
129
fdTable = (fdEntry_t*) calloc(fdTableLen, sizeof(fdEntry_t));
130
if (fdTable == NULL) {
131
fprintf(stderr, "library initialization failed - "
132
"unable to allocate file descriptor table - out of memory");
133
abort();
134
} else {
135
for (i = 0; i < fdTableLen; i ++) {
136
pthread_mutex_init(&fdTable[i].lock, NULL);
137
}
138
}
139
140
/* Allocate overflow table, if needed */
141
if (fdLimit > fdTableMaxSize) {
142
fdOverflowTableLen = ((fdLimit - fdTableMaxSize) / fdOverflowTableSlabSize) + 1;
143
fdOverflowTable = (fdEntry_t**) calloc(fdOverflowTableLen, sizeof(fdEntry_t*));
144
if (fdOverflowTable == NULL) {
145
fprintf(stderr, "library initialization failed - "
146
"unable to allocate file descriptor overflow table - out of memory");
147
abort();
148
}
149
}
150
151
/*
152
* Setup the signal handler
153
*/
154
sa.sa_handler = sig_wakeup;
155
sa.sa_flags = 0;
156
sigemptyset(&sa.sa_mask);
157
sigaction(sigWakeup, &sa, NULL);
158
159
sigemptyset(&sigset);
160
sigaddset(&sigset, sigWakeup);
161
sigprocmask(SIG_UNBLOCK, &sigset, NULL);
162
}
163
164
/*
165
* Return the fd table for this fd.
166
*/
167
static inline fdEntry_t *getFdEntry(int fd)
168
{
169
fdEntry_t* result = NULL;
170
171
if (fd < 0) {
172
return NULL;
173
}
174
175
/* This should not happen. If it does, our assumption about
176
* max. fd value was wrong. */
177
assert(fd < fdLimit);
178
179
if (fd < fdTableMaxSize) {
180
/* fd is in base table. */
181
assert(fd < fdTableLen);
182
result = &fdTable[fd];
183
} else {
184
/* fd is in overflow table. */
185
const int indexInOverflowTable = fd - fdTableMaxSize;
186
const int rootindex = indexInOverflowTable / fdOverflowTableSlabSize;
187
const int slabindex = indexInOverflowTable % fdOverflowTableSlabSize;
188
fdEntry_t* slab = NULL;
189
assert(rootindex < fdOverflowTableLen);
190
assert(slabindex < fdOverflowTableSlabSize);
191
pthread_mutex_lock(&fdOverflowTableLock);
192
/* Allocate new slab in overflow table if needed */
193
if (fdOverflowTable[rootindex] == NULL) {
194
fdEntry_t* const newSlab =
195
(fdEntry_t*)calloc(fdOverflowTableSlabSize, sizeof(fdEntry_t));
196
if (newSlab == NULL) {
197
fprintf(stderr, "Unable to allocate file descriptor overflow"
198
" table slab - out of memory");
199
pthread_mutex_unlock(&fdOverflowTableLock);
200
abort();
201
} else {
202
int i;
203
for (i = 0; i < fdOverflowTableSlabSize; i ++) {
204
pthread_mutex_init(&newSlab[i].lock, NULL);
205
}
206
fdOverflowTable[rootindex] = newSlab;
207
}
208
}
209
pthread_mutex_unlock(&fdOverflowTableLock);
210
slab = fdOverflowTable[rootindex];
211
result = &slab[slabindex];
212
}
213
214
return result;
215
216
}
217
218
219
/*
220
* Start a blocking operation :-
221
* Insert thread onto thread list for the fd.
222
*/
223
static inline void startOp(fdEntry_t *fdEntry, threadEntry_t *self)
224
{
225
self->thr = pthread_self();
226
self->intr = 0;
227
228
pthread_mutex_lock(&(fdEntry->lock));
229
{
230
self->next = fdEntry->threads;
231
fdEntry->threads = self;
232
}
233
pthread_mutex_unlock(&(fdEntry->lock));
234
}
235
236
/*
237
* End a blocking operation :-
238
* Remove thread from thread list for the fd
239
* If fd has been interrupted then set errno to EBADF
240
*/
241
static inline void endOp
242
(fdEntry_t *fdEntry, threadEntry_t *self)
243
{
244
int orig_errno = errno;
245
pthread_mutex_lock(&(fdEntry->lock));
246
{
247
threadEntry_t *curr, *prev=NULL;
248
curr = fdEntry->threads;
249
while (curr != NULL) {
250
if (curr == self) {
251
if (curr->intr) {
252
orig_errno = EBADF;
253
}
254
if (prev == NULL) {
255
fdEntry->threads = curr->next;
256
} else {
257
prev->next = curr->next;
258
}
259
break;
260
}
261
prev = curr;
262
curr = curr->next;
263
}
264
}
265
pthread_mutex_unlock(&(fdEntry->lock));
266
errno = orig_errno;
267
}
268
269
/*
270
* Close or dup2 a file descriptor ensuring that all threads blocked on
271
* the file descriptor are notified via a wakeup signal.
272
*
273
* fd1 < 0 => close(fd2)
274
* fd1 >= 0 => dup2(fd1, fd2)
275
*
276
* Returns -1 with errno set if operation fails.
277
*/
278
static int closefd(int fd1, int fd2) {
279
int rv, orig_errno;
280
fdEntry_t *fdEntry = getFdEntry(fd2);
281
if (fdEntry == NULL) {
282
errno = EBADF;
283
return -1;
284
}
285
286
/*
287
* Lock the fd to hold-off additional I/O on this fd.
288
*/
289
pthread_mutex_lock(&(fdEntry->lock));
290
291
{
292
/*
293
* Send a wakeup signal to all threads blocked on this
294
* file descriptor.
295
*/
296
threadEntry_t *curr = fdEntry->threads;
297
while (curr != NULL) {
298
curr->intr = 1;
299
pthread_kill( curr->thr, sigWakeup );
300
curr = curr->next;
301
}
302
303
/*
304
* And close/dup the file descriptor
305
* (restart if interrupted by signal)
306
*/
307
do {
308
if (fd1 < 0) {
309
rv = close(fd2);
310
} else {
311
rv = dup2(fd1, fd2);
312
}
313
} while (rv == -1 && errno == EINTR);
314
315
}
316
317
/*
318
* Unlock without destroying errno
319
*/
320
orig_errno = errno;
321
pthread_mutex_unlock(&(fdEntry->lock));
322
errno = orig_errno;
323
324
return rv;
325
}
326
327
/*
328
* Wrapper for dup2 - same semantics as dup2 system call except
329
* that any threads blocked in an I/O system call on fd2 will be
330
* preempted and return -1/EBADF;
331
*/
332
int NET_Dup2(int fd, int fd2) {
333
if (fd < 0) {
334
errno = EBADF;
335
return -1;
336
}
337
return closefd(fd, fd2);
338
}
339
340
/*
341
* Wrapper for close - same semantics as close system call
342
* except that any threads blocked in an I/O on fd will be
343
* preempted and the I/O system call will return -1/EBADF.
344
*/
345
int NET_SocketClose(int fd) {
346
return closefd(-1, fd);
347
}
348
349
/************** Basic I/O operations here ***************/
350
351
/*
352
* Macro to perform a blocking IO operation. Restarts
353
* automatically if interrupted by signal (other than
354
* our wakeup signal)
355
*/
356
#define BLOCKING_IO_RETURN_INT(FD, FUNC) { \
357
int ret; \
358
threadEntry_t self; \
359
fdEntry_t *fdEntry = getFdEntry(FD); \
360
if (fdEntry == NULL) { \
361
errno = EBADF; \
362
return -1; \
363
} \
364
do { \
365
startOp(fdEntry, &self); \
366
ret = FUNC; \
367
endOp(fdEntry, &self); \
368
} while (ret == -1 && errno == EINTR); \
369
return ret; \
370
}
371
372
int NET_Read(int s, void* buf, size_t len) {
373
BLOCKING_IO_RETURN_INT( s, recv(s, buf, len, 0) );
374
}
375
376
int NET_NonBlockingRead(int s, void* buf, size_t len) {
377
BLOCKING_IO_RETURN_INT( s, recv(s, buf, len, MSG_DONTWAIT));
378
}
379
380
int NET_RecvFrom(int s, void *buf, int len, unsigned int flags,
381
struct sockaddr *from, socklen_t *fromlen) {
382
BLOCKING_IO_RETURN_INT( s, recvfrom(s, buf, len, flags, from, fromlen) );
383
}
384
385
int NET_Send(int s, void *msg, int len, unsigned int flags) {
386
BLOCKING_IO_RETURN_INT( s, send(s, msg, len, flags) );
387
}
388
389
int NET_SendTo(int s, const void *msg, int len, unsigned int
390
flags, const struct sockaddr *to, int tolen) {
391
BLOCKING_IO_RETURN_INT( s, sendto(s, msg, len, flags, to, tolen) );
392
}
393
394
int NET_Accept(int s, struct sockaddr *addr, socklen_t *addrlen) {
395
BLOCKING_IO_RETURN_INT( s, accept(s, addr, addrlen) );
396
}
397
398
int NET_Connect(int s, struct sockaddr *addr, int addrlen) {
399
BLOCKING_IO_RETURN_INT( s, connect(s, addr, addrlen) );
400
}
401
402
int NET_Poll(struct pollfd *ufds, unsigned int nfds, int timeout) {
403
BLOCKING_IO_RETURN_INT( ufds[0].fd, poll(ufds, nfds, timeout) );
404
}
405
406
/*
407
* Wrapper for select(s, timeout). We are using select() on Mac OS due to Bug 7131399.
408
* Auto restarts with adjusted timeout if interrupted by
409
* signal other than our wakeup signal.
410
*/
411
int NET_Timeout(JNIEnv *env, int s, long timeout, jlong nanoTimeStamp) {
412
struct timeval t, *tp = &t;
413
fd_set fds;
414
fd_set* fdsp = NULL;
415
int allocated = 0;
416
threadEntry_t self;
417
fdEntry_t *fdEntry = getFdEntry(s);
418
419
/*
420
* Check that fd hasn't been closed.
421
*/
422
if (fdEntry == NULL) {
423
errno = EBADF;
424
return -1;
425
}
426
427
/*
428
* Pick up current time as may need to adjust timeout
429
*/
430
if (timeout > 0) {
431
/* Timed */
432
t.tv_sec = timeout / 1000;
433
t.tv_usec = (timeout % 1000) * 1000;
434
} else if (timeout < 0) {
435
/* Blocking */
436
tp = 0;
437
} else {
438
/* Poll */
439
t.tv_sec = 0;
440
t.tv_usec = 0;
441
}
442
443
if (s < FD_SETSIZE) {
444
fdsp = &fds;
445
FD_ZERO(fdsp);
446
} else {
447
int length = (howmany(s+1, NFDBITS)) * sizeof(int);
448
fdsp = (fd_set *) calloc(1, length);
449
if (fdsp == NULL) {
450
return -1; // errno will be set to ENOMEM
451
}
452
allocated = 1;
453
}
454
FD_SET(s, fdsp);
455
456
jlong prevNanoTime = nanoTimeStamp;
457
jlong nanoTimeout = (jlong) timeout * NET_NSEC_PER_MSEC;
458
for(;;) {
459
int rv;
460
461
/*
462
* call select on the fd. If interrupted by our wakeup signal
463
* errno will be set to EBADF.
464
*/
465
466
startOp(fdEntry, &self);
467
rv = select(s+1, fdsp, 0, 0, tp);
468
endOp(fdEntry, &self);
469
470
/*
471
* If interrupted then adjust timeout. If timeout
472
* has expired return 0 (indicating timeout expired).
473
*/
474
if (rv < 0 && errno == EINTR) {
475
if (timeout > 0) {
476
jlong newNanoTime = JVM_NanoTime(env, 0);
477
nanoTimeout -= newNanoTime - prevNanoTime;
478
if (nanoTimeout < NET_NSEC_PER_MSEC) {
479
if (allocated != 0)
480
free(fdsp);
481
return 0;
482
}
483
prevNanoTime = newNanoTime;
484
t.tv_sec = nanoTimeout / NET_NSEC_PER_SEC;
485
t.tv_usec = (nanoTimeout % NET_NSEC_PER_SEC) / NET_NSEC_PER_USEC;
486
} else {
487
continue; // timeout is -1, so loop again.
488
}
489
} else {
490
if (allocated != 0)
491
free(fdsp);
492
return rv;
493
}
494
}
495
}
496
497