Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/memory/guardedMemory.hpp
32285 views
1
/*
2
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_MEMORY_GUARDED_MEMORY_HPP
26
#define SHARE_VM_MEMORY_GUARDED_MEMORY_HPP
27
28
#include "memory/allocation.hpp"
29
#include "utilities/globalDefinitions.hpp"
30
31
/**
32
* Guarded memory for detecting buffer overrun.
33
*
34
* Allows allocations to be wrapped with padded bytes of a known byte pattern,
35
* that is a "guard". Guard patterns may be verified to detect buffer overruns.
36
*
37
* Primarily used by "debug malloc" and "checked JNI".
38
*
39
* Memory layout:
40
*
41
* |Offset | Content | Description |
42
* |------------------------------------------------------------
43
* |base_addr | 0xABABABABABABABAB | Head guard |
44
* |+16 | <size_t:user_size> | User data size |
45
* |+sizeof(uintptr_t) | <tag> | Tag word |
46
* |+sizeof(void*) | 0xF1 <user_data> ( | User data |
47
* |+user_size | 0xABABABABABABABAB | Tail guard |
48
* -------------------------------------------------------------
49
*
50
* Where:
51
* - guard padding uses "badResourceValue" (0xAB)
52
* - tag word is general purpose
53
* - user data
54
* -- initially padded with "uninitBlockPad" (0xF1),
55
* -- to "freeBlockPad" (0xBA), when freed
56
*
57
* Usage:
58
*
59
* * Allocations: one may wrap allocations with guard memory:
60
* <code>
61
* Thing* alloc_thing() {
62
* void* mem = user_alloc_fn(GuardedMemory::get_total_size(sizeof(thing)));
63
* GuardedMemory guarded(mem, sizeof(thing));
64
* return (Thing*) guarded.get_user_ptr();
65
* }
66
* </code>
67
* * Verify: memory guards are still in tact
68
* <code>
69
* bool verify_thing(Thing* thing) {
70
* GuardedMemory guarded((void*)thing);
71
* return guarded.verify_guards();
72
* }
73
* </code>
74
* * Free: one may mark bytes as freed (further debugging support)
75
* <code>
76
* void free_thing(Thing* thing) {
77
* GuardedMemory guarded((void*)thing);
78
* assert(guarded.verify_guards(), "Corrupt thing");
79
* user_free_fn(guards.release_for_freeing();
80
* }
81
* </code>
82
*/
83
class GuardedMemory : StackObj { // Wrapper on stack
84
85
// Private inner classes for memory layout...
86
87
protected:
88
89
/**
90
* Guard class for header and trailer known pattern to test for overwrites.
91
*/
92
class Guard { // Class for raw memory (no vtbl allowed)
93
friend class GuardedMemory;
94
protected:
95
enum {
96
GUARD_SIZE = 16
97
};
98
99
u_char _guard[GUARD_SIZE];
100
101
public:
102
103
void build() {
104
u_char* c = _guard; // Possibly unaligned if tail guard
105
u_char* end = c + GUARD_SIZE;
106
while (c < end) {
107
*c = badResourceValue;
108
c++;
109
}
110
}
111
112
bool verify() const {
113
u_char* c = (u_char*) _guard;
114
u_char* end = c + GUARD_SIZE;
115
while (c < end) {
116
if (*c != badResourceValue) {
117
return false;
118
}
119
c++;
120
}
121
return true;
122
}
123
124
}; // GuardedMemory::Guard
125
126
/**
127
* Header guard and size
128
*/
129
class GuardHeader : Guard {
130
friend class GuardedMemory;
131
protected:
132
// Take care in modifying fields here, will effect alignment
133
// e.g. x86 ABI 16 byte stack alignment
134
union {
135
uintptr_t __unused_full_word1;
136
size_t _user_size;
137
};
138
void* _tag;
139
public:
140
void set_user_size(const size_t usz) { _user_size = usz; }
141
size_t get_user_size() const { return _user_size; }
142
143
void set_tag(const void* tag) { _tag = (void*) tag; }
144
void* get_tag() const { return _tag; }
145
146
}; // GuardedMemory::GuardHeader
147
148
// Guarded Memory...
149
150
protected:
151
u_char* _base_addr;
152
153
public:
154
155
/**
156
* Create new guarded memory.
157
*
158
* Wraps, starting at the given "base_ptr" with guards. Use "get_user_ptr()"
159
* to return a pointer suitable for user data.
160
*
161
* @param base_ptr allocation wishing to be wrapped, must be at least "GuardedMemory::get_total_size()" bytes.
162
* @param user_size the size of the user data to be wrapped.
163
* @param tag optional general purpose tag.
164
*/
165
GuardedMemory(void* base_ptr, const size_t user_size, const void* tag = NULL) {
166
wrap_with_guards(base_ptr, user_size, tag);
167
}
168
169
/**
170
* Wrap existing guarded memory.
171
*
172
* To use this constructor, one must have created guarded memory with
173
* "GuardedMemory(void*, size_t, void*)" (or indirectly via helper, e.g. "wrap_copy()").
174
*
175
* @param user_p existing wrapped memory.
176
*/
177
GuardedMemory(void* userp) {
178
u_char* user_ptr = (u_char*) userp;
179
assert((uintptr_t)user_ptr > (sizeof(GuardHeader) + 0x1000), "Invalid pointer");
180
_base_addr = (user_ptr - sizeof(GuardHeader));
181
}
182
183
/**
184
* Create new guarded memory.
185
*
186
* Wraps, starting at the given "base_ptr" with guards. Allows reuse of stack allocated helper.
187
*
188
* @param base_ptr allocation wishing to be wrapped, must be at least "GuardedMemory::get_total_size()" bytes.
189
* @param user_size the size of the user data to be wrapped.
190
* @param tag optional general purpose tag.
191
*
192
* @return user data pointer (inner pointer to supplied "base_ptr").
193
*/
194
void* wrap_with_guards(void* base_ptr, size_t user_size, const void* tag = NULL) {
195
assert(base_ptr != NULL, "Attempt to wrap NULL with memory guard");
196
_base_addr = (u_char*)base_ptr;
197
get_head_guard()->build();
198
get_head_guard()->set_user_size(user_size);
199
get_tail_guard()->build();
200
set_tag(tag);
201
set_user_bytes(uninitBlockPad);
202
assert(verify_guards(), "Expected valid memory guards");
203
return get_user_ptr();
204
}
205
206
/**
207
* Verify head and tail guards.
208
*
209
* @return true if guards are intact, false would indicate a buffer overrun.
210
*/
211
bool verify_guards() const {
212
if (_base_addr != NULL) {
213
return (get_head_guard()->verify() && get_tail_guard()->verify());
214
}
215
return false;
216
}
217
218
/**
219
* Set the general purpose tag.
220
*
221
* @param tag general purpose tag.
222
*/
223
void set_tag(const void* tag) { get_head_guard()->set_tag(tag); }
224
225
/**
226
* Return the general purpose tag.
227
*
228
* @return the general purpose tag, defaults to NULL.
229
*/
230
void* get_tag() const { return get_head_guard()->get_tag(); }
231
232
/**
233
* Return the size of the user data.
234
*
235
* @return the size of the user data.
236
*/
237
size_t get_user_size() const {
238
assert(_base_addr != NULL, "Not wrapping any memory");
239
return get_head_guard()->get_user_size();
240
}
241
242
/**
243
* Return the user data pointer.
244
*
245
* @return the user data pointer.
246
*/
247
u_char* get_user_ptr() const {
248
assert(_base_addr != NULL, "Not wrapping any memory");
249
return _base_addr + sizeof(GuardHeader);
250
}
251
252
/**
253
* Release the wrapped pointer for resource freeing.
254
*
255
* Pads the user data with "freeBlockPad", and dis-associates the helper.
256
*
257
* @return the original base pointer used to wrap the data.
258
*/
259
void* release_for_freeing() {
260
set_user_bytes(freeBlockPad);
261
return release();
262
}
263
264
/**
265
* Dis-associate the help from the original base address.
266
*
267
* @return the original base pointer used to wrap the data.
268
*/
269
void* release() {
270
void* p = (void*) _base_addr;
271
_base_addr = NULL;
272
return p;
273
}
274
275
virtual void print_on(outputStream* st) const;
276
277
protected:
278
GuardHeader* get_head_guard() const { return (GuardHeader*) _base_addr; }
279
Guard* get_tail_guard() const { return (Guard*) (get_user_ptr() + get_user_size()); };
280
void set_user_bytes(u_char ch) {
281
memset(get_user_ptr(), ch, get_user_size());
282
}
283
284
public:
285
/**
286
* Return the total size required for wrapping the given user size.
287
*
288
* @return the total size required for wrapping the given user size.
289
*/
290
static size_t get_total_size(size_t user_size) {
291
size_t total_size = sizeof(GuardHeader) + user_size + sizeof(Guard);
292
assert(total_size > user_size, "Unexpected wrap-around");
293
return total_size;
294
}
295
296
// Helper functions...
297
298
/**
299
* Wrap a copy of size "len" of "ptr".
300
*
301
* @param ptr the memory to be copied
302
* @param len the length of the copy
303
* @param tag optional general purpose tag (see GuardedMemory::get_tag())
304
*
305
* @return guarded wrapped memory pointer to the user area, or NULL if OOM.
306
*/
307
static void* wrap_copy(const void* p, const size_t len, const void* tag = NULL);
308
309
/**
310
* Free wrapped copy.
311
*
312
* Frees memory copied with "wrap_copy()".
313
*
314
* @param p memory returned by "wrap_copy()".
315
*
316
* @return true if guards were verified as intact. false indicates a buffer overrun.
317
*/
318
static bool free_copy(void* p);
319
320
// Testing...
321
#ifndef PRODUCT
322
static void test_guarded_memory(void);
323
#endif
324
}; // GuardedMemory
325
326
#endif // SHARE_VM_MEMORY_GUARDED_MEMORY_HPP
327
328