Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/runtime/interfaceSupport.hpp
32285 views
1
/*
2
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
26
#define SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
27
28
#include "memory/gcLocker.hpp"
29
#include "runtime/handles.inline.hpp"
30
#include "runtime/mutexLocker.hpp"
31
#include "runtime/orderAccess.hpp"
32
#include "runtime/os.hpp"
33
#include "runtime/safepoint.hpp"
34
#include "runtime/thread.inline.hpp"
35
#include "runtime/threadWXSetters.inline.hpp"
36
#include "runtime/vmThread.hpp"
37
#include "utilities/globalDefinitions.hpp"
38
#include "utilities/preserveException.hpp"
39
#include "utilities/top.hpp"
40
41
// Wrapper for all entry points to the virtual machine.
42
// The HandleMarkCleaner is a faster version of HandleMark.
43
// It relies on the fact that there is a HandleMark further
44
// down the stack (in JavaCalls::call_helper), and just resets
45
// to the saved values in that HandleMark.
46
47
class HandleMarkCleaner: public StackObj {
48
private:
49
Thread* _thread;
50
public:
51
HandleMarkCleaner(Thread* thread) {
52
_thread = thread;
53
_thread->last_handle_mark()->push();
54
}
55
~HandleMarkCleaner() {
56
_thread->last_handle_mark()->pop_and_restore();
57
}
58
59
private:
60
inline void* operator new(size_t size, void* ptr) throw() {
61
return ptr;
62
}
63
};
64
65
// InterfaceSupport provides functionality used by the VM_LEAF_BASE and
66
// VM_ENTRY_BASE macros. These macros are used to guard entry points into
67
// the VM and perform checks upon leave of the VM.
68
69
70
class InterfaceSupport: AllStatic {
71
# ifdef ASSERT
72
public:
73
static long _scavenge_alot_counter;
74
static long _fullgc_alot_counter;
75
static long _number_of_calls;
76
static long _fullgc_alot_invocation;
77
78
// tracing
79
static void trace(const char* result_type, const char* header);
80
81
// Helper methods used to implement +ScavengeALot and +FullGCALot
82
static void check_gc_alot() { if (ScavengeALot || FullGCALot) gc_alot(); }
83
static void gc_alot();
84
85
static void walk_stack_from(vframe* start_vf);
86
static void walk_stack();
87
88
# ifdef ENABLE_ZAP_DEAD_LOCALS
89
static void zap_dead_locals_old();
90
# endif
91
92
static void zombieAll();
93
static void unlinkSymbols();
94
static void deoptimizeAll();
95
static void stress_derived_pointers();
96
static void verify_stack();
97
static void verify_last_frame();
98
# endif
99
100
public:
101
// OS dependent stuff
102
#ifdef TARGET_OS_FAMILY_linux
103
# include "interfaceSupport_linux.hpp"
104
#endif
105
#ifdef TARGET_OS_FAMILY_solaris
106
# include "interfaceSupport_solaris.hpp"
107
#endif
108
#ifdef TARGET_OS_FAMILY_windows
109
# include "interfaceSupport_windows.hpp"
110
#endif
111
#ifdef TARGET_OS_FAMILY_aix
112
# include "interfaceSupport_aix.hpp"
113
#endif
114
#ifdef TARGET_OS_FAMILY_bsd
115
# include "interfaceSupport_bsd.hpp"
116
#endif
117
118
};
119
120
121
// Basic class for all thread transition classes.
122
123
class ThreadStateTransition : public StackObj {
124
protected:
125
JavaThread* _thread;
126
public:
127
ThreadStateTransition(JavaThread *thread) {
128
_thread = thread;
129
assert(thread != NULL && thread->is_Java_thread(), "must be Java thread");
130
}
131
132
// Change threadstate in a manner, so safepoint can detect changes.
133
// Time-critical: called on exit from every runtime routine
134
static inline void transition(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
135
assert(from != _thread_in_Java, "use transition_from_java");
136
assert(from != _thread_in_native, "use transition_from_native");
137
assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
138
assert(thread->thread_state() == from, "coming from wrong thread state");
139
// Change to transition state (assumes total store ordering! -Urs)
140
thread->set_thread_state((JavaThreadState)(from + 1));
141
142
// Make sure new state is seen by VM thread
143
if (os::is_MP()) {
144
if (UseMembar) {
145
// Force a fence between the write above and read below
146
OrderAccess::fence();
147
} else {
148
// store to serialize page so VM thread can do pseudo remote membar
149
os::write_memory_serialize_page(thread);
150
}
151
}
152
153
if (SafepointSynchronize::do_call_back()) {
154
SafepointSynchronize::block(thread);
155
}
156
thread->set_thread_state(to);
157
158
CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
159
}
160
161
// transition_and_fence must be used on any thread state transition
162
// where there might not be a Java call stub on the stack, in
163
// particular on Windows where the Structured Exception Handler is
164
// set up in the call stub. os::write_memory_serialize_page() can
165
// fault and we can't recover from it on Windows without a SEH in
166
// place.
167
static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
168
assert(thread->thread_state() == from, "coming from wrong thread state");
169
assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
170
// Change to transition state (assumes total store ordering! -Urs)
171
thread->set_thread_state((JavaThreadState)(from + 1));
172
173
// Make sure new state is seen by VM thread
174
if (os::is_MP()) {
175
if (UseMembar) {
176
// Force a fence between the write above and read below
177
OrderAccess::fence();
178
} else {
179
// Must use this rather than serialization page in particular on Windows
180
InterfaceSupport::serialize_memory(thread);
181
}
182
}
183
184
if (SafepointSynchronize::do_call_back()) {
185
SafepointSynchronize::block(thread);
186
}
187
thread->set_thread_state(to);
188
189
CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
190
}
191
192
// Same as above, but assumes from = _thread_in_Java. This is simpler, since we
193
// never block on entry to the VM. This will break the code, since e.g. preserve arguments
194
// have not been setup.
195
static inline void transition_from_java(JavaThread *thread, JavaThreadState to) {
196
assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state");
197
thread->set_thread_state(to);
198
}
199
200
static inline void transition_from_native(JavaThread *thread, JavaThreadState to) {
201
assert((to & 1) == 0, "odd numbers are transitions states");
202
assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state");
203
// Change to transition state (assumes total store ordering! -Urs)
204
thread->set_thread_state(_thread_in_native_trans);
205
206
// Make sure new state is seen by GC thread
207
if (os::is_MP()) {
208
if (UseMembar) {
209
// Force a fence between the write above and read below
210
OrderAccess::fence();
211
} else {
212
// Must use this rather than serialization page in particular on Windows
213
InterfaceSupport::serialize_memory(thread);
214
}
215
}
216
217
// We never install asynchronous exceptions when coming (back) in
218
// to the runtime from native code because the runtime is not set
219
// up to handle exceptions floating around at arbitrary points.
220
if (SafepointSynchronize::do_call_back() || thread->is_suspend_after_native()) {
221
JavaThread::check_safepoint_and_suspend_for_native_trans(thread);
222
223
// Clear unhandled oops anywhere where we could block, even if we don't.
224
CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
225
}
226
227
thread->set_thread_state(to);
228
}
229
protected:
230
void trans(JavaThreadState from, JavaThreadState to) { transition(_thread, from, to); }
231
void trans_from_java(JavaThreadState to) { transition_from_java(_thread, to); }
232
void trans_from_native(JavaThreadState to) { transition_from_native(_thread, to); }
233
void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); }
234
};
235
236
237
class ThreadInVMfromJava : public ThreadStateTransition {
238
public:
239
ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) {
240
trans_from_java(_thread_in_vm);
241
}
242
~ThreadInVMfromJava() {
243
trans(_thread_in_vm, _thread_in_Java);
244
// Check for pending. async. exceptions or suspends.
245
if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition();
246
}
247
};
248
249
250
class ThreadInVMfromUnknown {
251
private:
252
JavaThread* _thread;
253
public:
254
ThreadInVMfromUnknown() : _thread(NULL) {
255
Thread* t = Thread::current();
256
if (t->is_Java_thread()) {
257
JavaThread* t2 = (JavaThread*) t;
258
if (t2->thread_state() == _thread_in_native) {
259
_thread = t2;
260
ThreadStateTransition::transition_from_native(t2, _thread_in_vm);
261
// Used to have a HandleMarkCleaner but that is dangerous as
262
// it could free a handle in our (indirect, nested) caller.
263
// We expect any handles will be short lived and figure we
264
// don't need an actual HandleMark.
265
}
266
}
267
}
268
~ThreadInVMfromUnknown() {
269
if (_thread) {
270
ThreadStateTransition::transition_and_fence(_thread, _thread_in_vm, _thread_in_native);
271
}
272
}
273
};
274
275
276
class ThreadInVMfromNative : public ThreadStateTransition {
277
public:
278
ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) {
279
trans_from_native(_thread_in_vm);
280
}
281
~ThreadInVMfromNative() {
282
trans_and_fence(_thread_in_vm, _thread_in_native);
283
}
284
};
285
286
287
class ThreadToNativeFromVM : public ThreadStateTransition {
288
public:
289
ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) {
290
// We are leaving the VM at this point and going directly to native code.
291
// Block, if we are in the middle of a safepoint synchronization.
292
assert(!thread->owns_locks(), "must release all locks when leaving VM");
293
thread->frame_anchor()->make_walkable(thread);
294
trans_and_fence(_thread_in_vm, _thread_in_native);
295
// Check for pending. async. exceptions or suspends.
296
if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false);
297
}
298
299
~ThreadToNativeFromVM() {
300
trans_from_native(_thread_in_vm);
301
// We don't need to clear_walkable because it will happen automagically when we return to java
302
}
303
};
304
305
306
class ThreadBlockInVM : public ThreadStateTransition {
307
public:
308
ThreadBlockInVM(JavaThread *thread)
309
: ThreadStateTransition(thread) {
310
// Once we are blocked vm expects stack to be walkable
311
thread->frame_anchor()->make_walkable(thread);
312
trans_and_fence(_thread_in_vm, _thread_blocked);
313
}
314
~ThreadBlockInVM() {
315
trans_and_fence(_thread_blocked, _thread_in_vm);
316
// We don't need to clear_walkable because it will happen automagically when we return to java
317
}
318
};
319
320
321
// This special transition class is only used to prevent asynchronous exceptions
322
// from being installed on vm exit in situations where we can't tolerate them.
323
// See bugs: 4324348, 4854693, 4998314, 5040492, 5050705.
324
class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition {
325
public:
326
ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) {
327
trans_from_java(_thread_in_vm);
328
}
329
~ThreadInVMfromJavaNoAsyncException() {
330
trans(_thread_in_vm, _thread_in_Java);
331
// NOTE: We do not check for pending. async. exceptions.
332
// If we did and moved the pending async exception over into the
333
// pending exception field, we would need to deopt (currently C2
334
// only). However, to do so would require that we transition back
335
// to the _thread_in_vm state. Instead we postpone the handling of
336
// the async exception.
337
338
// Check for pending. suspends only.
339
if (_thread->has_special_runtime_exit_condition())
340
_thread->handle_special_runtime_exit_condition(false);
341
}
342
};
343
344
// Debug class instantiated in JRT_ENTRY and ITR_ENTRY macro.
345
// Can be used to verify properties on enter/exit of the VM.
346
347
#ifdef ASSERT
348
class VMEntryWrapper {
349
public:
350
VMEntryWrapper() {
351
if (VerifyLastFrame) {
352
InterfaceSupport::verify_last_frame();
353
}
354
}
355
356
~VMEntryWrapper() {
357
InterfaceSupport::check_gc_alot();
358
if (WalkStackALot) {
359
InterfaceSupport::walk_stack();
360
}
361
#ifdef ENABLE_ZAP_DEAD_LOCALS
362
if (ZapDeadLocalsOld) {
363
InterfaceSupport::zap_dead_locals_old();
364
}
365
#endif
366
#ifdef COMPILER2
367
// This option is not used by Compiler 1
368
if (StressDerivedPointers) {
369
InterfaceSupport::stress_derived_pointers();
370
}
371
#endif
372
if (DeoptimizeALot || DeoptimizeRandom) {
373
InterfaceSupport::deoptimizeAll();
374
}
375
if (ZombieALot) {
376
InterfaceSupport::zombieAll();
377
}
378
if (UnlinkSymbolsALot) {
379
InterfaceSupport::unlinkSymbols();
380
}
381
// do verification AFTER potential deoptimization
382
if (VerifyStack) {
383
InterfaceSupport::verify_stack();
384
}
385
386
}
387
};
388
389
390
class VMNativeEntryWrapper {
391
public:
392
VMNativeEntryWrapper() {
393
if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
394
}
395
396
~VMNativeEntryWrapper() {
397
if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
398
}
399
};
400
401
#endif
402
403
404
// VM-internal runtime interface support
405
406
#ifdef ASSERT
407
408
class RuntimeHistogramElement : public HistogramElement {
409
public:
410
RuntimeHistogramElement(const char* name);
411
};
412
413
#define TRACE_CALL(result_type, header) \
414
InterfaceSupport::_number_of_calls++; \
415
if (TraceRuntimeCalls) \
416
InterfaceSupport::trace(#result_type, #header); \
417
if (CountRuntimeCalls) { \
418
static RuntimeHistogramElement* e = new RuntimeHistogramElement(#header); \
419
if (e != NULL) e->increment_count(); \
420
}
421
#else
422
#define TRACE_CALL(result_type, header) \
423
/* do nothing */
424
#endif
425
426
427
// LEAF routines do not lock, GC or throw exceptions
428
429
#define VM_LEAF_BASE(result_type, header) \
430
TRACE_CALL(result_type, header) \
431
debug_only(NoHandleMark __hm;) \
432
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, \
433
JavaThread::current())); \
434
os::verify_stack_alignment(); \
435
/* begin of body */
436
437
#define VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread) \
438
TRACE_CALL(result_type, header) \
439
debug_only(ResetNoHandleMark __rnhm;) \
440
HandleMarkCleaner __hm(thread); \
441
Thread* THREAD = thread; \
442
os::verify_stack_alignment(); \
443
/* begin of body */
444
445
446
// ENTRY routines may lock, GC and throw exceptions
447
448
#define VM_ENTRY_BASE(result_type, header, thread) \
449
TRACE_CALL(result_type, header) \
450
HandleMarkCleaner __hm(thread); \
451
Thread* THREAD = thread; \
452
os::verify_stack_alignment(); \
453
/* begin of body */
454
455
456
// QUICK_ENTRY routines behave like ENTRY but without a handle mark
457
458
#define VM_QUICK_ENTRY_BASE(result_type, header, thread) \
459
TRACE_CALL(result_type, header) \
460
debug_only(NoHandleMark __hm;) \
461
Thread* THREAD = thread; \
462
os::verify_stack_alignment(); \
463
/* begin of body */
464
465
466
// Definitions for IRT (Interpreter Runtime)
467
// (thread is an argument passed in to all these routines)
468
469
#define IRT_ENTRY(result_type, header) \
470
result_type header { \
471
ThreadInVMfromJava __tiv(thread); \
472
VM_ENTRY_BASE(result_type, header, thread) \
473
debug_only(VMEntryWrapper __vew;)
474
475
476
#define IRT_LEAF(result_type, header) \
477
result_type header { \
478
VM_LEAF_BASE(result_type, header) \
479
debug_only(No_Safepoint_Verifier __nspv(true);)
480
481
482
#define IRT_ENTRY_NO_ASYNC(result_type, header) \
483
result_type header { \
484
ThreadInVMfromJavaNoAsyncException __tiv(thread); \
485
VM_ENTRY_BASE(result_type, header, thread) \
486
debug_only(VMEntryWrapper __vew;)
487
488
#define IRT_END }
489
490
491
// Definitions for JRT (Java (Compiler/Shared) Runtime)
492
493
#define JRT_ENTRY(result_type, header) \
494
result_type header { \
495
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \
496
ThreadInVMfromJava __tiv(thread); \
497
VM_ENTRY_BASE(result_type, header, thread) \
498
debug_only(VMEntryWrapper __vew;)
499
500
501
#define JRT_LEAF(result_type, header) \
502
result_type header { \
503
VM_LEAF_BASE(result_type, header) \
504
debug_only(JRT_Leaf_Verifier __jlv;)
505
506
507
#define JRT_ENTRY_NO_ASYNC(result_type, header) \
508
result_type header { \
509
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \
510
ThreadInVMfromJavaNoAsyncException __tiv(thread); \
511
VM_ENTRY_BASE(result_type, header, thread) \
512
debug_only(VMEntryWrapper __vew;)
513
514
// Same as JRT Entry but allows for return value after the safepoint
515
// to get back into Java from the VM
516
#define JRT_BLOCK_ENTRY(result_type, header) \
517
result_type header { \
518
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \
519
TRACE_CALL(result_type, header) \
520
HandleMarkCleaner __hm(thread);
521
522
#define JRT_BLOCK \
523
{ \
524
ThreadInVMfromJava __tiv(thread); \
525
Thread* THREAD = thread; \
526
debug_only(VMEntryWrapper __vew;)
527
528
#define JRT_BLOCK_END }
529
530
#define JRT_END }
531
532
// Definitions for JNI
533
534
#define JNI_ENTRY(result_type, header) \
535
JNI_ENTRY_NO_PRESERVE(result_type, header) \
536
WeakPreserveExceptionMark __wem(thread);
537
538
#define JNI_ENTRY_NO_PRESERVE(result_type, header) \
539
extern "C" { \
540
result_type JNICALL header { \
541
JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
542
assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
543
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \
544
ThreadInVMfromNative __tiv(thread); \
545
debug_only(VMNativeEntryWrapper __vew;) \
546
VM_ENTRY_BASE(result_type, header, thread)
547
548
549
// Ensure that the VMNativeEntryWrapper constructor, which can cause
550
// a GC, is called outside the NoHandleMark (set via VM_QUICK_ENTRY_BASE).
551
#define JNI_QUICK_ENTRY(result_type, header) \
552
extern "C" { \
553
result_type JNICALL header { \
554
JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
555
assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
556
ThreadInVMfromNative __tiv(thread); \
557
debug_only(VMNativeEntryWrapper __vew;) \
558
VM_QUICK_ENTRY_BASE(result_type, header, thread)
559
560
561
#define JNI_LEAF(result_type, header) \
562
extern "C" { \
563
result_type JNICALL header { \
564
JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
565
assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
566
VM_LEAF_BASE(result_type, header)
567
568
569
// Close the routine and the extern "C"
570
#define JNI_END } }
571
572
573
574
// Definitions for JVM
575
576
#define JVM_ENTRY(result_type, header) \
577
extern "C" { \
578
result_type JNICALL header { \
579
JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
580
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \
581
ThreadInVMfromNative __tiv(thread); \
582
debug_only(VMNativeEntryWrapper __vew;) \
583
VM_ENTRY_BASE(result_type, header, thread)
584
585
586
#define JVM_ENTRY_NO_ENV(result_type, header) \
587
extern "C" { \
588
result_type JNICALL header { \
589
JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); \
590
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \
591
ThreadInVMfromNative __tiv(thread); \
592
debug_only(VMNativeEntryWrapper __vew;) \
593
VM_ENTRY_BASE(result_type, header, thread)
594
595
596
#define JVM_QUICK_ENTRY(result_type, header) \
597
extern "C" { \
598
result_type JNICALL header { \
599
JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
600
ThreadInVMfromNative __tiv(thread); \
601
debug_only(VMNativeEntryWrapper __vew;) \
602
VM_QUICK_ENTRY_BASE(result_type, header, thread)
603
604
605
#define JVM_LEAF(result_type, header) \
606
extern "C" { \
607
result_type JNICALL header { \
608
VM_Exit::block_if_vm_exited(); \
609
VM_LEAF_BASE(result_type, header)
610
611
612
#define JVM_ENTRY_FROM_LEAF(env, result_type, header) \
613
{ { \
614
JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
615
ThreadInVMfromNative __tiv(thread); \
616
debug_only(VMNativeEntryWrapper __vew;) \
617
VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread)
618
619
620
#define JVM_END } }
621
622
#endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
623
624