Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/memory/guardedMemory.hpp
40949 views
1
/*
2
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_MEMORY_GUARDEDMEMORY_HPP
26
#define SHARE_MEMORY_GUARDEDMEMORY_HPP
27
28
#include "memory/allocation.hpp"
29
#include "utilities/globalDefinitions.hpp"
30
31
/**
32
* Guarded memory for detecting buffer overrun.
33
*
34
* Allows allocations to be wrapped with padded bytes of a known byte pattern,
35
* that is a "guard". Guard patterns may be verified to detect buffer overruns.
36
*
37
* Primarily used by "debug malloc" and "checked JNI".
38
*
39
* Memory layout:
40
*
41
* |Offset | Content | Description |
42
* |------------------------------------------------------------
43
* |base_addr | 0xABABABABABABABAB | Head guard |
44
* |+16 | <size_t:user_size> | User data size |
45
* |+sizeof(uintptr_t) | <tag> | Tag word |
46
* |+sizeof(void*) | 0xF1 <user_data> ( | User data |
47
* |+user_size | 0xABABABABABABABAB | Tail guard |
48
* -------------------------------------------------------------
49
*
50
* Where:
51
* - guard padding uses "badResourceValue" (0xAB)
52
* - tag word is general purpose
53
* - user data
54
* -- initially padded with "uninitBlockPad" (0xF1),
55
* -- to "freeBlockPad" (0xBA), when freed
56
*
57
* Usage:
58
*
59
* * Allocations: one may wrap allocations with guard memory:
60
* <code>
61
* Thing* alloc_thing() {
62
* void* mem = user_alloc_fn(GuardedMemory::get_total_size(sizeof(thing)));
63
* GuardedMemory guarded(mem, sizeof(thing));
64
* return (Thing*) guarded.get_user_ptr();
65
* }
66
* </code>
67
* * Verify: memory guards are still in tact
68
* <code>
69
* bool verify_thing(Thing* thing) {
70
* GuardedMemory guarded((void*)thing);
71
* return guarded.verify_guards();
72
* }
73
* </code>
74
* * Free: one may mark bytes as freed (further debugging support)
75
* <code>
76
* void free_thing(Thing* thing) {
77
* GuardedMemory guarded((void*)thing);
78
* assert(guarded.verify_guards(), "Corrupt thing");
79
* user_free_fn(guards.release_for_freeing();
80
* }
81
* </code>
82
*/
83
class GuardedMemory : StackObj { // Wrapper on stack
84
85
friend class GuardedMemoryTest;
86
// Private inner classes for memory layout...
87
88
protected:
89
90
/**
91
* Guard class for header and trailer known pattern to test for overwrites.
92
*/
93
class Guard { // Class for raw memory (no vtbl allowed)
94
friend class GuardedMemory;
95
protected:
96
enum {
97
GUARD_SIZE = 16
98
};
99
100
u_char _guard[GUARD_SIZE];
101
102
public:
103
104
void build() {
105
u_char* c = _guard; // Possibly unaligned if tail guard
106
u_char* end = c + GUARD_SIZE;
107
while (c < end) {
108
*c = badResourceValue;
109
c++;
110
}
111
}
112
113
bool verify() const {
114
u_char* c = (u_char*) _guard;
115
u_char* end = c + GUARD_SIZE;
116
while (c < end) {
117
if (*c != badResourceValue) {
118
return false;
119
}
120
c++;
121
}
122
return true;
123
}
124
125
}; // GuardedMemory::Guard
126
127
/**
128
* Header guard and size
129
*/
130
class GuardHeader : Guard {
131
friend class GuardedMemory;
132
protected:
133
// Take care in modifying fields here, will effect alignment
134
// e.g. x86 ABI 16 byte stack alignment
135
union {
136
uintptr_t __unused_full_word1;
137
size_t _user_size;
138
};
139
void* _tag;
140
public:
141
void set_user_size(const size_t usz) { _user_size = usz; }
142
size_t get_user_size() const { return _user_size; }
143
144
void set_tag(const void* tag) { _tag = (void*) tag; }
145
void* get_tag() const { return _tag; }
146
147
}; // GuardedMemory::GuardHeader
148
149
// Guarded Memory...
150
151
protected:
152
u_char* _base_addr;
153
154
public:
155
156
/**
157
* Create new guarded memory.
158
*
159
* Wraps, starting at the given "base_ptr" with guards. Use "get_user_ptr()"
160
* to return a pointer suitable for user data.
161
*
162
* @param base_ptr allocation wishing to be wrapped, must be at least "GuardedMemory::get_total_size()" bytes.
163
* @param user_size the size of the user data to be wrapped.
164
* @param tag optional general purpose tag.
165
*/
166
GuardedMemory(void* base_ptr, const size_t user_size, const void* tag = NULL) {
167
wrap_with_guards(base_ptr, user_size, tag);
168
}
169
170
/**
171
* Wrap existing guarded memory.
172
*
173
* To use this constructor, one must have created guarded memory with
174
* "GuardedMemory(void*, size_t, void*)" (or indirectly via helper, e.g. "wrap_copy()").
175
*
176
* @param user_p existing wrapped memory.
177
*/
178
GuardedMemory(void* userp) {
179
u_char* user_ptr = (u_char*) userp;
180
assert((uintptr_t)user_ptr > (sizeof(GuardHeader) + 0x1000), "Invalid pointer");
181
_base_addr = (user_ptr - sizeof(GuardHeader));
182
}
183
184
/**
185
* Create new guarded memory.
186
*
187
* Wraps, starting at the given "base_ptr" with guards. Allows reuse of stack allocated helper.
188
*
189
* @param base_ptr allocation wishing to be wrapped, must be at least "GuardedMemory::get_total_size()" bytes.
190
* @param user_size the size of the user data to be wrapped.
191
* @param tag optional general purpose tag.
192
*
193
* @return user data pointer (inner pointer to supplied "base_ptr").
194
*/
195
void* wrap_with_guards(void* base_ptr, size_t user_size, const void* tag = NULL) {
196
assert(base_ptr != NULL, "Attempt to wrap NULL with memory guard");
197
_base_addr = (u_char*)base_ptr;
198
get_head_guard()->build();
199
get_head_guard()->set_user_size(user_size);
200
get_tail_guard()->build();
201
set_tag(tag);
202
set_user_bytes(uninitBlockPad);
203
assert(verify_guards(), "Expected valid memory guards");
204
return get_user_ptr();
205
}
206
207
/**
208
* Verify head and tail guards.
209
*
210
* @return true if guards are intact, false would indicate a buffer overrun.
211
*/
212
bool verify_guards() const {
213
if (_base_addr != NULL) {
214
return (get_head_guard()->verify() && get_tail_guard()->verify());
215
}
216
return false;
217
}
218
219
/**
220
* Set the general purpose tag.
221
*
222
* @param tag general purpose tag.
223
*/
224
void set_tag(const void* tag) { get_head_guard()->set_tag(tag); }
225
226
/**
227
* Return the general purpose tag.
228
*
229
* @return the general purpose tag, defaults to NULL.
230
*/
231
void* get_tag() const { return get_head_guard()->get_tag(); }
232
233
/**
234
* Return the size of the user data.
235
*
236
* @return the size of the user data.
237
*/
238
size_t get_user_size() const {
239
assert(_base_addr != NULL, "Not wrapping any memory");
240
return get_head_guard()->get_user_size();
241
}
242
243
/**
244
* Return the user data pointer.
245
*
246
* @return the user data pointer.
247
*/
248
u_char* get_user_ptr() const {
249
assert(_base_addr != NULL, "Not wrapping any memory");
250
return _base_addr + sizeof(GuardHeader);
251
}
252
253
/**
254
* Release the wrapped pointer for resource freeing.
255
*
256
* Pads the user data with "freeBlockPad", and dis-associates the helper.
257
*
258
* @return the original base pointer used to wrap the data.
259
*/
260
void* release_for_freeing() {
261
set_user_bytes(freeBlockPad);
262
return release();
263
}
264
265
/**
266
* Dis-associate the help from the original base address.
267
*
268
* @return the original base pointer used to wrap the data.
269
*/
270
void* release() {
271
void* p = (void*) _base_addr;
272
_base_addr = NULL;
273
return p;
274
}
275
276
virtual void print_on(outputStream* st) const;
277
278
protected:
279
GuardHeader* get_head_guard() const { return (GuardHeader*) _base_addr; }
280
Guard* get_tail_guard() const { return (Guard*) (get_user_ptr() + get_user_size()); };
281
void set_user_bytes(u_char ch) {
282
memset(get_user_ptr(), ch, get_user_size());
283
}
284
285
public:
286
/**
287
* Return the total size required for wrapping the given user size.
288
*
289
* @return the total size required for wrapping the given user size.
290
*/
291
static size_t get_total_size(size_t user_size) {
292
size_t total_size = sizeof(GuardHeader) + user_size + sizeof(Guard);
293
assert(total_size > user_size, "Unexpected wrap-around");
294
return total_size;
295
}
296
297
// Helper functions...
298
299
/**
300
* Wrap a copy of size "len" of "ptr".
301
*
302
* @param ptr the memory to be copied
303
* @param len the length of the copy
304
* @param tag optional general purpose tag (see GuardedMemory::get_tag())
305
*
306
* @return guarded wrapped memory pointer to the user area, or NULL if OOM.
307
*/
308
static void* wrap_copy(const void* p, const size_t len, const void* tag = NULL);
309
310
/**
311
* Free wrapped copy.
312
*
313
* Frees memory copied with "wrap_copy()".
314
*
315
* @param p memory returned by "wrap_copy()".
316
*
317
* @return true if guards were verified as intact. false indicates a buffer overrun.
318
*/
319
static bool free_copy(void* p);
320
321
}; // GuardedMemory
322
323
#endif // SHARE_MEMORY_GUARDEDMEMORY_HPP
324
325