Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp
40931 views
1
/*
2
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
3
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
// no precompiled headers
27
#include "jvm.h"
28
#include "asm/assembler.inline.hpp"
29
#include "classfile/vmSymbols.hpp"
30
#include "code/icBuffer.hpp"
31
#include "code/vtableStubs.hpp"
32
#include "interpreter/interpreter.hpp"
33
#include "memory/allocation.inline.hpp"
34
#include "nativeInst_zero.hpp"
35
#include "os_share_linux.hpp"
36
#include "prims/jniFastGetField.hpp"
37
#include "prims/jvm_misc.hpp"
38
#include "runtime/arguments.hpp"
39
#include "runtime/frame.inline.hpp"
40
#include "runtime/interfaceSupport.inline.hpp"
41
#include "runtime/java.hpp"
42
#include "runtime/javaCalls.hpp"
43
#include "runtime/mutexLocker.hpp"
44
#include "runtime/osThread.hpp"
45
#include "runtime/sharedRuntime.hpp"
46
#include "runtime/stubRoutines.hpp"
47
#include "runtime/thread.inline.hpp"
48
#include "runtime/timer.hpp"
49
#include "signals_posix.hpp"
50
#include "utilities/align.hpp"
51
#include "utilities/events.hpp"
52
#include "utilities/vmError.hpp"
53
54
address os::current_stack_pointer() {
55
// return the address of the current function
56
return (address)__builtin_frame_address(0);
57
}
58
59
frame os::get_sender_for_C_frame(frame* fr) {
60
ShouldNotCallThis();
61
return frame(NULL, NULL); // silence compile warning.
62
}
63
64
frame os::current_frame() {
65
// The only thing that calls this is the stack printing code in
66
// VMError::report:
67
// - Step 110 (printing stack bounds) uses the sp in the frame
68
// to determine the amount of free space on the stack. We
69
// set the sp to a close approximation of the real value in
70
// order to allow this step to complete.
71
// - Step 120 (printing native stack) tries to walk the stack.
72
// The frame we create has a NULL pc, which is ignored as an
73
// invalid frame.
74
frame dummy = frame();
75
dummy.set_sp((intptr_t *) current_stack_pointer());
76
return dummy;
77
}
78
79
char* os::non_memory_address_word() {
80
// Must never look like an address returned by reserve_memory,
81
// even in its subfields (as defined by the CPU immediate fields,
82
// if the CPU splits constants across multiple instructions).
83
// This is the value for x86; works pretty well for PPC too.
84
return (char *) -1;
85
}
86
87
address os::Posix::ucontext_get_pc(const ucontext_t* uc) {
88
ShouldNotCallThis();
89
return NULL; // silence compile warnings
90
}
91
92
void os::Posix::ucontext_set_pc(ucontext_t * uc, address pc) {
93
ShouldNotCallThis();
94
}
95
96
address os::fetch_frame_from_context(const void* ucVoid,
97
intptr_t** ret_sp,
98
intptr_t** ret_fp) {
99
ShouldNotCallThis();
100
return NULL; // silence compile warnings
101
}
102
103
frame os::fetch_frame_from_context(const void* ucVoid) {
104
ShouldNotCallThis();
105
return frame(NULL, NULL); // silence compile warnings
106
}
107
108
bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
109
ucontext_t* uc, JavaThread* thread) {
110
111
if (info != NULL && thread != NULL) {
112
// Handle ALL stack overflow variations here
113
if (sig == SIGSEGV) {
114
address addr = (address) info->si_addr;
115
116
// check if fault address is within thread stack
117
if (thread->is_in_full_stack(addr)) {
118
StackOverflow* overflow_state = thread->stack_overflow_state();
119
// stack overflow
120
if (overflow_state->in_stack_yellow_reserved_zone(addr)) {
121
overflow_state->disable_stack_yellow_reserved_zone();
122
ShouldNotCallThis();
123
}
124
else if (overflow_state->in_stack_red_zone(addr)) {
125
overflow_state->disable_stack_red_zone();
126
ShouldNotCallThis();
127
}
128
else {
129
// Accessing stack address below sp may cause SEGV if
130
// current thread has MAP_GROWSDOWN stack. This should
131
// only happen when current thread was created by user
132
// code with MAP_GROWSDOWN flag and then attached to VM.
133
// See notes in os_linux.cpp.
134
if (thread->osthread()->expanding_stack() == 0) {
135
thread->osthread()->set_expanding_stack();
136
if (os::Linux::manually_expand_stack(thread, addr)) {
137
thread->osthread()->clear_expanding_stack();
138
return true;
139
}
140
thread->osthread()->clear_expanding_stack();
141
}
142
else {
143
fatal("recursive segv. expanding stack.");
144
}
145
}
146
}
147
}
148
149
/*if (thread->thread_state() == _thread_in_Java) {
150
ShouldNotCallThis();
151
}
152
else*/ if ((thread->thread_state() == _thread_in_vm ||
153
thread->thread_state() == _thread_in_native) &&
154
sig == SIGBUS && thread->doing_unsafe_access()) {
155
ShouldNotCallThis();
156
}
157
158
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC
159
// kicks in and the heap gets shrunk before the field access.
160
/*if (sig == SIGSEGV || sig == SIGBUS) {
161
address addr = JNI_FastGetField::find_slowcase_pc(pc);
162
if (addr != (address)-1) {
163
stub = addr;
164
}
165
}*/
166
}
167
168
return false; // Fatal error
169
170
}
171
172
void os::Linux::init_thread_fpu_state(void) {
173
// Nothing to do
174
}
175
176
int os::Linux::get_fpu_control_word() {
177
ShouldNotCallThis();
178
return -1; // silence compile warnings
179
}
180
181
void os::Linux::set_fpu_control_word(int fpu) {
182
ShouldNotCallThis();
183
}
184
185
///////////////////////////////////////////////////////////////////////////////
186
// thread stack
187
188
size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K;
189
size_t os::Posix::_java_thread_min_stack_allowed = 64 * K;
190
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K;
191
192
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
193
#ifdef _LP64
194
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
195
#else
196
size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K);
197
#endif // _LP64
198
return s;
199
}
200
201
static void current_stack_region(address *bottom, size_t *size) {
202
pthread_attr_t attr;
203
int res = pthread_getattr_np(pthread_self(), &attr);
204
if (res != 0) {
205
if (res == ENOMEM) {
206
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
207
}
208
else {
209
fatal("pthread_getattr_np failed with error = %d", res);
210
}
211
}
212
213
address stack_bottom;
214
size_t stack_bytes;
215
res = pthread_attr_getstack(&attr, (void **) &stack_bottom, &stack_bytes);
216
if (res != 0) {
217
fatal("pthread_attr_getstack failed with error = %d", res);
218
}
219
address stack_top = stack_bottom + stack_bytes;
220
221
// The block of memory returned by pthread_attr_getstack() includes
222
// guard pages where present. We need to trim these off.
223
size_t page_bytes = os::Linux::page_size();
224
assert(((intptr_t) stack_bottom & (page_bytes - 1)) == 0, "unaligned stack");
225
226
size_t guard_bytes;
227
res = pthread_attr_getguardsize(&attr, &guard_bytes);
228
if (res != 0) {
229
fatal("pthread_attr_getguardsize failed with errno = %d", res);
230
}
231
int guard_pages = align_up(guard_bytes, page_bytes) / page_bytes;
232
assert(guard_bytes == guard_pages * page_bytes, "unaligned guard");
233
234
#ifdef IA64
235
// IA64 has two stacks sharing the same area of memory, a normal
236
// stack growing downwards and a register stack growing upwards.
237
// Guard pages, if present, are in the centre. This code splits
238
// the stack in two even without guard pages, though in theory
239
// there's nothing to stop us allocating more to the normal stack
240
// or more to the register stack if one or the other were found
241
// to grow faster.
242
int total_pages = align_down(stack_bytes, page_bytes) / page_bytes;
243
stack_bottom += (total_pages - guard_pages) / 2 * page_bytes;
244
#endif // IA64
245
246
stack_bottom += guard_bytes;
247
248
pthread_attr_destroy(&attr);
249
250
// The initial thread has a growable stack, and the size reported
251
// by pthread_attr_getstack is the maximum size it could possibly
252
// be given what currently mapped. This can be huge, so we cap it.
253
if (os::is_primordial_thread()) {
254
stack_bytes = stack_top - stack_bottom;
255
256
if (stack_bytes > JavaThread::stack_size_at_create())
257
stack_bytes = JavaThread::stack_size_at_create();
258
259
stack_bottom = stack_top - stack_bytes;
260
}
261
262
assert(os::current_stack_pointer() >= stack_bottom, "should do");
263
assert(os::current_stack_pointer() < stack_top, "should do");
264
265
*bottom = stack_bottom;
266
*size = stack_top - stack_bottom;
267
}
268
269
address os::current_stack_base() {
270
address bottom;
271
size_t size;
272
current_stack_region(&bottom, &size);
273
return bottom + size;
274
}
275
276
size_t os::current_stack_size() {
277
// stack size includes normal stack and HotSpot guard pages
278
address bottom;
279
size_t size;
280
current_stack_region(&bottom, &size);
281
return size;
282
}
283
284
/////////////////////////////////////////////////////////////////////////////
285
// helper functions for fatal error handler
286
287
void os::print_context(outputStream* st, const void* context) {
288
ShouldNotCallThis();
289
}
290
291
void os::print_register_info(outputStream *st, const void *context) {
292
ShouldNotCallThis();
293
}
294
295
/////////////////////////////////////////////////////////////////////////////
296
// Stubs for things that would be in linux_zero.s if it existed.
297
// You probably want to disassemble these monkeys to check they're ok.
298
299
extern "C" {
300
int SpinPause() {
301
return -1; // silence compile warnings
302
}
303
304
305
void _Copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
306
if (from > to) {
307
const jshort *end = from + count;
308
while (from < end)
309
*(to++) = *(from++);
310
}
311
else if (from < to) {
312
const jshort *end = from;
313
from += count - 1;
314
to += count - 1;
315
while (from >= end)
316
*(to--) = *(from--);
317
}
318
}
319
void _Copy_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
320
if (from > to) {
321
const jint *end = from + count;
322
while (from < end)
323
*(to++) = *(from++);
324
}
325
else if (from < to) {
326
const jint *end = from;
327
from += count - 1;
328
to += count - 1;
329
while (from >= end)
330
*(to--) = *(from--);
331
}
332
}
333
void _Copy_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
334
if (from > to) {
335
const jlong *end = from + count;
336
while (from < end)
337
os::atomic_copy64(from++, to++);
338
}
339
else if (from < to) {
340
const jlong *end = from;
341
from += count - 1;
342
to += count - 1;
343
while (from >= end)
344
os::atomic_copy64(from--, to--);
345
}
346
}
347
348
void _Copy_arrayof_conjoint_bytes(const HeapWord* from,
349
HeapWord* to,
350
size_t count) {
351
memmove(to, from, count);
352
}
353
void _Copy_arrayof_conjoint_jshorts(const HeapWord* from,
354
HeapWord* to,
355
size_t count) {
356
memmove(to, from, count * 2);
357
}
358
void _Copy_arrayof_conjoint_jints(const HeapWord* from,
359
HeapWord* to,
360
size_t count) {
361
memmove(to, from, count * 4);
362
}
363
void _Copy_arrayof_conjoint_jlongs(const HeapWord* from,
364
HeapWord* to,
365
size_t count) {
366
memmove(to, from, count * 8);
367
}
368
};
369
370
/////////////////////////////////////////////////////////////////////////////
371
// Implementations of atomic operations not supported by processors.
372
// -- http://gcc.gnu.org/onlinedocs/gcc-4.2.1/gcc/Atomic-Builtins.html
373
374
#ifndef _LP64
375
extern "C" {
376
long long unsigned int __sync_val_compare_and_swap_8(
377
volatile void *ptr,
378
long long unsigned int oldval,
379
long long unsigned int newval) {
380
ShouldNotCallThis();
381
return 0; // silence compiler warnings
382
}
383
};
384
#endif // !_LP64
385
386
#ifndef PRODUCT
387
void os::verify_stack_alignment() {
388
}
389
#endif
390
391
int os::extra_bang_size_in_bytes() {
392
// Zero does not require an additional stack banging.
393
return 0;
394
}
395
396