Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/ppc/gc/shared/barrierSetNMethod_ppc.cpp
40948 views
1
/*
2
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "code/codeBlob.hpp"
27
#include "code/nmethod.hpp"
28
#include "code/nativeInst.hpp"
29
#include "gc/shared/barrierSetNMethod.hpp"
30
#include "utilities/debug.hpp"
31
32
class NativeNMethodBarrier: public NativeInstruction {
33
34
address get_barrier_start_address() const {
35
return NativeInstruction::addr_at(0);
36
}
37
38
NativeMovRegMem* get_patchable_instruction_handle() const {
39
// Endianness is handled by NativeMovRegMem
40
return reinterpret_cast<NativeMovRegMem*>(get_barrier_start_address() + 3 * 4);
41
}
42
43
public:
44
int get_guard_value() const {
45
// Retrieve the guard value (naming of 'offset' function is misleading).
46
return get_patchable_instruction_handle()->offset();
47
}
48
49
void release_set_guard_value(int value) {
50
// Patching is not atomic.
51
// Stale observations of the "armed" state is okay as invoking the barrier stub in that case has no
52
// unwanted side effects. Disarming is thus a non-critical operation.
53
// The visibility of the "armed" state must be ensured by safepoint/handshake.
54
55
OrderAccess::release(); // Release modified oops
56
57
// Set the guard value (naming of 'offset' function is misleading).
58
get_patchable_instruction_handle()->set_offset(value);
59
}
60
61
void verify() const {
62
// Although it's possible to just validate the to-be-patched instruction,
63
// all instructions are validated to ensure that the barrier is hit properly - especially since
64
// the pattern used in load_const32 is a quite common one.
65
66
uint* current_instruction = reinterpret_cast<uint*>(get_barrier_start_address());
67
68
// calculate_address_from_global_toc (compound instruction)
69
verify_op_code_manually(current_instruction, MacroAssembler::is_addis(*current_instruction));
70
verify_op_code_manually(current_instruction, MacroAssembler::is_addi(*current_instruction));
71
72
verify_op_code_manually(current_instruction, MacroAssembler::is_mtctr(*current_instruction));
73
74
get_patchable_instruction_handle()->verify();
75
current_instruction += 2;
76
77
verify_op_code(current_instruction, Assembler::LWZ_OPCODE);
78
79
// cmpw (mnemonic)
80
verify_op_code(current_instruction, Assembler::CMP_OPCODE);
81
82
// bnectrl (mnemonic) (weak check; not checking the exact type)
83
verify_op_code(current_instruction, Assembler::BCCTR_OPCODE);
84
85
verify_op_code(current_instruction, Assembler::ISYNC_OPCODE);
86
}
87
88
private:
89
static void verify_op_code_manually(uint*& current_instruction, bool result) {
90
assert(result, "illegal instruction sequence for nmethod entry barrier");
91
current_instruction++;
92
}
93
static void verify_op_code(uint*& current_instruction, uint expected,
94
unsigned int mask = 63u << Assembler::OPCODE_SHIFT) {
95
// Masking both, current instruction and opcode, as some opcodes in Assembler contain additional information
96
// to uniquely identify simplified mnemonics.
97
// As long as the caller doesn't provide a custom mask, that additional information is discarded.
98
verify_op_code_manually(current_instruction, (*current_instruction & mask) == (expected & mask));
99
}
100
};
101
102
static NativeNMethodBarrier* get_nmethod_barrier(nmethod* nm) {
103
address barrier_address = nm->code_begin() + nm->frame_complete_offset() + (-9 * 4);
104
105
auto barrier = reinterpret_cast<NativeNMethodBarrier*>(barrier_address);
106
debug_only(barrier->verify());
107
return barrier;
108
}
109
110
void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
111
// As PPC64 always has a valid back chain (unlike other platforms), the stub can simply pop the frame.
112
// Thus, there's nothing to do here.
113
}
114
115
void BarrierSetNMethod::disarm(nmethod* nm) {
116
if (!supports_entry_barrier(nm)) {
117
return;
118
}
119
120
NativeNMethodBarrier* barrier = get_nmethod_barrier(nm);
121
barrier->release_set_guard_value(disarmed_value());
122
}
123
124
bool BarrierSetNMethod::is_armed(nmethod* nm) {
125
if (!supports_entry_barrier(nm)) {
126
return false;
127
}
128
129
NativeNMethodBarrier* barrier = get_nmethod_barrier(nm);
130
return barrier->get_guard_value() != disarmed_value();
131
}
132
133