Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/jdk/src/aix/native/java/net/aix_close.c
47458 views
1
/*
2
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2016, SAP SE and/or its affiliates. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation. Oracle designates this
9
* particular file as subject to the "Classpath" exception as provided
10
* by Oracle in the LICENSE file that accompanied this code.
11
*
12
* This code is distributed in the hope that it will be useful, but WITHOUT
13
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15
* version 2 for more details (a copy is included in the LICENSE file that
16
* accompanied this code).
17
*
18
* You should have received a copy of the GNU General Public License version
19
* 2 along with this work; if not, write to the Free Software Foundation,
20
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21
*
22
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
23
* or visit www.oracle.com if you need additional information or have any
24
* questions.
25
*/
26
27
/*
28
* This file contains implementations of NET_... functions. The NET_.. functions are
29
* wrappers for common file- and socket functions plus provisions for non-blocking IO.
30
*
31
* (basically, the layers remember all file descriptors waiting for a particular fd;
32
* all threads waiting on a certain fd can be woken up by sending them a signal; this
33
* is done e.g. when the fd is closed.)
34
*
35
* This was originally copied from the linux_close.c implementation.
36
*
37
* Side Note: This coding needs initialization. Under Linux this is done
38
* automatically via __attribute((constructor)), on AIX this is done manually
39
* (see aix_close_init).
40
*
41
*/
42
43
#include <assert.h>
44
#include <limits.h>
45
#include <stdio.h>
46
#include <stdlib.h>
47
#include <signal.h>
48
#include <pthread.h>
49
#include <sys/types.h>
50
#include <sys/socket.h>
51
#include <sys/time.h>
52
#include <sys/resource.h>
53
#include <sys/uio.h>
54
#include <unistd.h>
55
#include <errno.h>
56
57
#include <sys/poll.h>
58
59
/*
60
* Stack allocated by thread when doing blocking operation
61
*/
62
typedef struct threadEntry {
63
pthread_t thr; /* this thread */
64
struct threadEntry *next; /* next thread */
65
int intr; /* interrupted */
66
} threadEntry_t;
67
68
/*
69
* Heap allocated during initialized - one entry per fd
70
*/
71
typedef struct {
72
pthread_mutex_t lock; /* fd lock */
73
threadEntry_t *threads; /* threads blocked on fd */
74
} fdEntry_t;
75
76
/*
77
* Signal to unblock thread
78
*/
79
static int sigWakeup = (SIGRTMAX - 1);
80
81
/*
82
* fdTable holds one entry per file descriptor, up to a certain
83
* maximum.
84
* Theoretically, the number of possible file descriptors can get
85
* large, though usually it does not. Entries for small value file
86
* descriptors are kept in a simple table, which covers most scenarios.
87
* Entries for large value file descriptors are kept in an overflow
88
* table, which is organized as a sparse two dimensional array whose
89
* slabs are allocated on demand. This covers all corner cases while
90
* keeping memory consumption reasonable.
91
*/
92
93
/* Base table for low value file descriptors */
94
static fdEntry_t* fdTable = NULL;
95
/* Maximum size of base table (in number of entries). */
96
static const int fdTableMaxSize = 0x1000; /* 4K */
97
/* Actual size of base table (in number of entries) */
98
static int fdTableLen = 0;
99
/* Max. theoretical number of file descriptors on system. */
100
static int fdLimit = 0;
101
102
/* Overflow table, should base table not be large enough. Organized as
103
* an array of n slabs, each holding 64k entries.
104
*/
105
static fdEntry_t** fdOverflowTable = NULL;
106
/* Number of slabs in the overflow table */
107
static int fdOverflowTableLen = 0;
108
/* Number of entries in one slab */
109
static const int fdOverflowTableSlabSize = 0x10000; /* 64k */
110
pthread_mutex_t fdOverflowTableLock = PTHREAD_MUTEX_INITIALIZER;
111
112
/*
113
* Null signal handler
114
*/
115
static void sig_wakeup(int sig) {
116
}
117
118
/*
119
* Initialization routine (executed when library is loaded)
120
* Allocate fd tables and sets up signal handler.
121
*
122
* On AIX we don't have __attribute((constructor)) so we need to initialize
123
* manually (from JNI_OnLoad() in 'src/share/native/java/net/net_util.c')
124
*/
125
void aix_close_init() {
126
struct rlimit nbr_files;
127
sigset_t sigset;
128
struct sigaction sa;
129
int i = 0;
130
131
/* Determine the maximum number of possible file descriptors. */
132
if (-1 == getrlimit(RLIMIT_NOFILE, &nbr_files)) {
133
fprintf(stderr, "library initialization failed - "
134
"unable to get max # of allocated fds\n");
135
abort();
136
}
137
if (nbr_files.rlim_max != RLIM_INFINITY) {
138
fdLimit = nbr_files.rlim_max;
139
} else {
140
/* We just do not know. */
141
fdLimit = INT_MAX;
142
}
143
144
/* Allocate table for low value file descriptors. */
145
fdTableLen = fdLimit < fdTableMaxSize ? fdLimit : fdTableMaxSize;
146
fdTable = (fdEntry_t*) calloc(fdTableLen, sizeof(fdEntry_t));
147
if (fdTable == NULL) {
148
fprintf(stderr, "library initialization failed - "
149
"unable to allocate file descriptor table - out of memory");
150
abort();
151
} else {
152
for (i = 0; i < fdTableLen; i ++) {
153
pthread_mutex_init(&fdTable[i].lock, NULL);
154
}
155
}
156
157
/* Allocate overflow table, if needed */
158
if (fdLimit > fdTableMaxSize) {
159
fdOverflowTableLen = ((fdLimit - fdTableMaxSize) / fdOverflowTableSlabSize) + 1;
160
fdOverflowTable = (fdEntry_t**) calloc(fdOverflowTableLen, sizeof(fdEntry_t*));
161
if (fdOverflowTable == NULL) {
162
fprintf(stderr, "library initialization failed - "
163
"unable to allocate file descriptor overflow table - out of memory");
164
abort();
165
}
166
}
167
168
/*
169
* Setup the signal handler
170
*/
171
sa.sa_handler = sig_wakeup;
172
sa.sa_flags = 0;
173
sigemptyset(&sa.sa_mask);
174
sigaction(sigWakeup, &sa, NULL);
175
176
sigemptyset(&sigset);
177
sigaddset(&sigset, sigWakeup);
178
sigprocmask(SIG_UNBLOCK, &sigset, NULL);
179
}
180
181
/*
182
* Return the fd table for this fd.
183
*/
184
static inline fdEntry_t *getFdEntry(int fd)
185
{
186
fdEntry_t* result = NULL;
187
188
if (fd < 0) {
189
return NULL;
190
}
191
192
/* This should not happen. If it does, our assumption about
193
* max. fd value was wrong. */
194
assert(fd < fdLimit);
195
196
if (fd < fdTableMaxSize) {
197
/* fd is in base table. */
198
assert(fd < fdTableLen);
199
result = &fdTable[fd];
200
} else {
201
/* fd is in overflow table. */
202
const int indexInOverflowTable = fd - fdTableMaxSize;
203
const int rootindex = indexInOverflowTable / fdOverflowTableSlabSize;
204
const int slabindex = indexInOverflowTable % fdOverflowTableSlabSize;
205
fdEntry_t* slab = NULL;
206
assert(rootindex < fdOverflowTableLen);
207
assert(slabindex < fdOverflowTableSlabSize);
208
pthread_mutex_lock(&fdOverflowTableLock);
209
/* Allocate new slab in overflow table if needed */
210
if (fdOverflowTable[rootindex] == NULL) {
211
fdEntry_t* const newSlab =
212
(fdEntry_t*)calloc(fdOverflowTableSlabSize, sizeof(fdEntry_t));
213
if (newSlab == NULL) {
214
fprintf(stderr, "Unable to allocate file descriptor overflow"
215
" table slab - out of memory");
216
pthread_mutex_unlock(&fdOverflowTableLock);
217
abort();
218
} else {
219
int i;
220
for (i = 0; i < fdOverflowTableSlabSize; i ++) {
221
pthread_mutex_init(&newSlab[i].lock, NULL);
222
}
223
fdOverflowTable[rootindex] = newSlab;
224
}
225
}
226
pthread_mutex_unlock(&fdOverflowTableLock);
227
slab = fdOverflowTable[rootindex];
228
result = &slab[slabindex];
229
}
230
231
return result;
232
233
}
234
235
236
/*
237
* Start a blocking operation :-
238
* Insert thread onto thread list for the fd.
239
*/
240
static inline void startOp(fdEntry_t *fdEntry, threadEntry_t *self)
241
{
242
self->thr = pthread_self();
243
self->intr = 0;
244
245
pthread_mutex_lock(&(fdEntry->lock));
246
{
247
self->next = fdEntry->threads;
248
fdEntry->threads = self;
249
}
250
pthread_mutex_unlock(&(fdEntry->lock));
251
}
252
253
/*
254
* End a blocking operation :-
255
* Remove thread from thread list for the fd
256
* If fd has been interrupted then set errno to EBADF
257
*/
258
static inline void endOp
259
(fdEntry_t *fdEntry, threadEntry_t *self)
260
{
261
int orig_errno = errno;
262
pthread_mutex_lock(&(fdEntry->lock));
263
{
264
threadEntry_t *curr, *prev=NULL;
265
curr = fdEntry->threads;
266
while (curr != NULL) {
267
if (curr == self) {
268
if (curr->intr) {
269
orig_errno = EBADF;
270
}
271
if (prev == NULL) {
272
fdEntry->threads = curr->next;
273
} else {
274
prev->next = curr->next;
275
}
276
break;
277
}
278
prev = curr;
279
curr = curr->next;
280
}
281
}
282
pthread_mutex_unlock(&(fdEntry->lock));
283
errno = orig_errno;
284
}
285
286
/*
287
* Close or dup2 a file descriptor ensuring that all threads blocked on
288
* the file descriptor are notified via a wakeup signal.
289
*
290
* fd1 < 0 => close(fd2)
291
* fd1 >= 0 => dup2(fd1, fd2)
292
*
293
* Returns -1 with errno set if operation fails.
294
*/
295
static int closefd(int fd1, int fd2) {
296
int rv, orig_errno;
297
fdEntry_t *fdEntry = getFdEntry(fd2);
298
if (fdEntry == NULL) {
299
errno = EBADF;
300
return -1;
301
}
302
303
/*
304
* Lock the fd to hold-off additional I/O on this fd.
305
*/
306
pthread_mutex_lock(&(fdEntry->lock));
307
308
{
309
/* On fast machines we see that we enter dup2 before the
310
* accepting thread had a chance to get and process the signal.
311
* So in case we woke a thread up, give it some time to cope.
312
* Also see https://bugs.openjdk.java.net/browse/JDK-8006395 */
313
int num_woken = 0;
314
315
/*
316
* Send a wakeup signal to all threads blocked on this
317
* file descriptor.
318
*/
319
threadEntry_t *curr = fdEntry->threads;
320
while (curr != NULL) {
321
curr->intr = 1;
322
pthread_kill( curr->thr, sigWakeup );
323
num_woken ++;
324
curr = curr->next;
325
}
326
327
if (num_woken > 0) {
328
usleep(num_woken * 50);
329
}
330
331
/*
332
* And close/dup the file descriptor
333
* (restart if interrupted by signal)
334
*/
335
do {
336
if (fd1 < 0) {
337
rv = close(fd2);
338
} else {
339
rv = dup2(fd1, fd2);
340
}
341
} while (rv == -1 && errno == EINTR);
342
}
343
344
/*
345
* Unlock without destroying errno
346
*/
347
orig_errno = errno;
348
pthread_mutex_unlock(&(fdEntry->lock));
349
errno = orig_errno;
350
351
return rv;
352
}
353
354
/*
355
* Wrapper for dup2 - same semantics as dup2 system call except
356
* that any threads blocked in an I/O system call on fd2 will be
357
* preempted and return -1/EBADF;
358
*/
359
int NET_Dup2(int fd, int fd2) {
360
if (fd < 0) {
361
errno = EBADF;
362
return -1;
363
}
364
return closefd(fd, fd2);
365
}
366
367
/*
368
* Wrapper for close - same semantics as close system call
369
* except that any threads blocked in an I/O on fd will be
370
* preempted and the I/O system call will return -1/EBADF.
371
*/
372
int NET_SocketClose(int fd) {
373
return closefd(-1, fd);
374
}
375
376
/************** Basic I/O operations here ***************/
377
378
/*
379
* Macro to perform a blocking IO operation. Restarts
380
* automatically if interrupted by signal (other than
381
* our wakeup signal)
382
*/
383
#define BLOCKING_IO_RETURN_INT(FD, FUNC) { \
384
int ret; \
385
threadEntry_t self; \
386
fdEntry_t *fdEntry = getFdEntry(FD); \
387
if (fdEntry == NULL) { \
388
errno = EBADF; \
389
return -1; \
390
} \
391
do { \
392
startOp(fdEntry, &self); \
393
ret = FUNC; \
394
endOp(fdEntry, &self); \
395
} while (ret == -1 && errno == EINTR); \
396
return ret; \
397
}
398
399
int NET_Read(int s, void* buf, size_t len) {
400
BLOCKING_IO_RETURN_INT( s, recv(s, buf, len, 0) );
401
}
402
403
int NET_NonBlockingRead(int s, void* buf, size_t len) {
404
BLOCKING_IO_RETURN_INT(s, recv(s, buf, len, MSG_NONBLOCK));
405
}
406
407
int NET_ReadV(int s, const struct iovec * vector, int count) {
408
BLOCKING_IO_RETURN_INT( s, readv(s, vector, count) );
409
}
410
411
int NET_RecvFrom(int s, void *buf, int len, unsigned int flags,
412
struct sockaddr *from, int *fromlen) {
413
socklen_t socklen = *fromlen;
414
BLOCKING_IO_RETURN_INT( s, recvfrom(s, buf, len, flags, from, &socklen) );
415
*fromlen = socklen;
416
}
417
418
int NET_Send(int s, void *msg, int len, unsigned int flags) {
419
BLOCKING_IO_RETURN_INT( s, send(s, msg, len, flags) );
420
}
421
422
int NET_WriteV(int s, const struct iovec * vector, int count) {
423
BLOCKING_IO_RETURN_INT( s, writev(s, vector, count) );
424
}
425
426
int NET_SendTo(int s, const void *msg, int len, unsigned int
427
flags, const struct sockaddr *to, int tolen) {
428
BLOCKING_IO_RETURN_INT( s, sendto(s, msg, len, flags, to, tolen) );
429
}
430
431
int NET_Accept(int s, struct sockaddr *addr, int *addrlen) {
432
socklen_t socklen = *addrlen;
433
BLOCKING_IO_RETURN_INT( s, accept(s, addr, &socklen) );
434
*addrlen = socklen;
435
}
436
437
int NET_Connect(int s, struct sockaddr *addr, int addrlen) {
438
int crc = -1, prc = -1;
439
threadEntry_t self;
440
fdEntry_t* fdEntry = getFdEntry(s);
441
442
if (fdEntry == NULL) {
443
errno = EBADF;
444
return -1;
445
}
446
447
/* On AIX, when the system call connect() is interrupted, the connection
448
* is not aborted and it will be established asynchronously by the kernel.
449
* Hence, no need to restart connect() when EINTR is received
450
*/
451
startOp(fdEntry, &self);
452
crc = connect(s, addr, addrlen);
453
endOp(fdEntry, &self);
454
455
if (crc == -1 && errno == EINTR) {
456
struct pollfd s_pollfd;
457
int sockopt_arg = 0;
458
socklen_t len;
459
460
s_pollfd.fd = s;
461
s_pollfd.events = POLLOUT | POLLERR;
462
463
/* poll the file descriptor */
464
do {
465
startOp(fdEntry, &self);
466
prc = poll(&s_pollfd, 1, -1);
467
endOp(fdEntry, &self);
468
} while (prc == -1 && errno == EINTR);
469
470
if (prc < 0)
471
return prc;
472
473
len = sizeof(sockopt_arg);
474
475
/* Check whether the connection has been established */
476
if (getsockopt(s, SOL_SOCKET, SO_ERROR, &sockopt_arg, &len) == -1)
477
return -1;
478
479
if (sockopt_arg != 0 ) {
480
errno = sockopt_arg;
481
return -1;
482
}
483
} else {
484
return crc;
485
}
486
487
/* At this point, fd is connected. Set successful return code */
488
return 0;
489
}
490
491
#ifndef USE_SELECT
492
int NET_Poll(struct pollfd *ufds, unsigned int nfds, int timeout) {
493
BLOCKING_IO_RETURN_INT( ufds[0].fd, poll(ufds, nfds, timeout) );
494
}
495
#else
496
int NET_Select(int s, fd_set *readfds, fd_set *writefds,
497
fd_set *exceptfds, struct timeval *timeout) {
498
BLOCKING_IO_RETURN_INT( s-1,
499
select(s, readfds, writefds, exceptfds, timeout) );
500
}
501
#endif
502
503
/*
504
* Wrapper for poll(s, timeout).
505
* Auto restarts with adjusted timeout if interrupted by
506
* signal other than our wakeup signal.
507
*/
508
int NET_Timeout0(int s, long timeout, long currentTime) {
509
long prevtime = currentTime, newtime;
510
struct timeval t;
511
fdEntry_t *fdEntry = getFdEntry(s);
512
513
/*
514
* Check that fd hasn't been closed.
515
*/
516
if (fdEntry == NULL) {
517
errno = EBADF;
518
return -1;
519
}
520
521
for(;;) {
522
struct pollfd pfd;
523
int rv;
524
threadEntry_t self;
525
526
/*
527
* Poll the fd. If interrupted by our wakeup signal
528
* errno will be set to EBADF.
529
*/
530
pfd.fd = s;
531
pfd.events = POLLIN | POLLERR;
532
533
startOp(fdEntry, &self);
534
rv = poll(&pfd, 1, timeout);
535
endOp(fdEntry, &self);
536
537
/*
538
* If interrupted then adjust timeout. If timeout
539
* has expired return 0 (indicating timeout expired).
540
*/
541
if (rv < 0 && errno == EINTR) {
542
if (timeout > 0) {
543
gettimeofday(&t, NULL);
544
newtime = t.tv_sec * 1000 + t.tv_usec / 1000;
545
timeout -= newtime - prevtime;
546
if (timeout <= 0) {
547
return 0;
548
}
549
prevtime = newtime;
550
}
551
} else {
552
return rv;
553
}
554
555
}
556
}
557
558