Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/services/mallocSiteTable.hpp
32285 views
1
/*
2
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
26
#define SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
27
28
#if INCLUDE_NMT
29
30
#include "memory/allocation.hpp"
31
#include "runtime/atomic.hpp"
32
#include "services/allocationSite.hpp"
33
#include "services/mallocTracker.hpp"
34
#include "services/nmtCommon.hpp"
35
#include "utilities/nativeCallStack.hpp"
36
37
// MallocSite represents a code path that eventually calls
38
// os::malloc() to allocate memory
39
class MallocSite : public AllocationSite<MemoryCounter> {
40
public:
41
MallocSite() :
42
AllocationSite<MemoryCounter>(NativeCallStack::empty_stack(), mtNone) {}
43
44
MallocSite(const NativeCallStack& stack, MEMFLAGS flags) :
45
AllocationSite<MemoryCounter>(stack, flags) {}
46
47
48
void allocate(size_t size) { data()->allocate(size); }
49
void deallocate(size_t size) { data()->deallocate(size); }
50
51
// Memory allocated from this code path
52
size_t size() const { return peek()->size(); }
53
// The number of calls were made
54
size_t count() const { return peek()->count(); }
55
};
56
57
// Malloc site hashtable entry
58
class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {
59
private:
60
MallocSite _malloc_site;
61
MallocSiteHashtableEntry* _next;
62
63
public:
64
MallocSiteHashtableEntry() : _next(NULL) { }
65
66
MallocSiteHashtableEntry(NativeCallStack stack, MEMFLAGS flags):
67
_malloc_site(stack, flags), _next(NULL) {
68
assert(flags != mtNone, "Expect a real memory type");
69
}
70
71
inline const MallocSiteHashtableEntry* next() const {
72
return _next;
73
}
74
75
// Insert an entry atomically.
76
// Return true if the entry is inserted successfully.
77
// The operation can be failed due to contention from other thread.
78
bool atomic_insert(const MallocSiteHashtableEntry* entry) {
79
return (Atomic::cmpxchg_ptr((void*)entry, (volatile void*)&_next,
80
NULL) == NULL);
81
}
82
83
void set_callsite(const MallocSite& site) {
84
_malloc_site = site;
85
}
86
87
inline const MallocSite* peek() const { return &_malloc_site; }
88
inline MallocSite* data() { return &_malloc_site; }
89
90
inline long hash() const { return _malloc_site.hash(); }
91
inline bool equals(const NativeCallStack& stack) const {
92
return _malloc_site.equals(stack);
93
}
94
// Allocation/deallocation on this allocation site
95
inline void allocate(size_t size) { _malloc_site.allocate(size); }
96
inline void deallocate(size_t size) { _malloc_site.deallocate(size); }
97
// Memory counters
98
inline size_t size() const { return _malloc_site.size(); }
99
inline size_t count() const { return _malloc_site.count(); }
100
};
101
102
// The walker walks every entry on MallocSiteTable
103
class MallocSiteWalker : public StackObj {
104
public:
105
virtual bool do_malloc_site(const MallocSite* e) { return false; }
106
};
107
108
/*
109
* Native memory tracking call site table.
110
* The table is only needed when detail tracking is enabled.
111
*/
112
class MallocSiteTable : AllStatic {
113
private:
114
// The number of hash bucket in this hashtable. The number should
115
// be tuned if malloc activities changed significantly.
116
// The statistics data can be obtained via Jcmd
117
// jcmd <pid> VM.native_memory statistics.
118
119
// Currently, (number of buckets / number of entires) ratio is
120
// about 1 / 6
121
enum {
122
table_base_size = 128, // The base size is calculated from statistics to give
123
// table ratio around 1:6
124
table_size = (table_base_size * NMT_TrackingStackDepth - 1)
125
};
126
127
128
// This is a very special lock, that allows multiple shared accesses (sharedLock), but
129
// once exclusive access (exclusiveLock) is requested, all shared accesses are
130
// rejected forever.
131
class AccessLock : public StackObj {
132
enum LockState {
133
NoLock,
134
SharedLock,
135
ExclusiveLock
136
};
137
138
private:
139
// A very large negative number. The only possibility to "overflow"
140
// this number is when there are more than -min_jint threads in
141
// this process, which is not going to happen in foreseeable future.
142
const static int _MAGIC_ = min_jint;
143
144
LockState _lock_state;
145
volatile int* _lock;
146
public:
147
AccessLock(volatile int* lock) :
148
_lock(lock), _lock_state(NoLock) {
149
}
150
151
~AccessLock() {
152
if (_lock_state == SharedLock) {
153
Atomic::dec((volatile jint*)_lock);
154
}
155
}
156
// Acquire shared lock.
157
// Return true if shared access is granted.
158
inline bool sharedLock() {
159
jint res = Atomic::add(1, _lock);
160
if (res < 0) {
161
Atomic::add(-1, _lock);
162
return false;
163
}
164
_lock_state = SharedLock;
165
return true;
166
}
167
// Acquire exclusive lock
168
void exclusiveLock();
169
};
170
171
public:
172
static bool initialize();
173
static void shutdown();
174
175
NOT_PRODUCT(static int access_peak_count() { return _peak_count; })
176
177
// Number of hash buckets
178
static inline int hash_buckets() { return (int)table_size; }
179
180
// Access and copy a call stack from this table. Shared lock should be
181
// acquired before access the entry.
182
static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx,
183
size_t pos_idx) {
184
AccessLock locker(&_access_count);
185
if (locker.sharedLock()) {
186
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
187
MallocSite* site = malloc_site(bucket_idx, pos_idx);
188
if (site != NULL) {
189
stack = *site->call_stack();
190
return true;
191
}
192
}
193
return false;
194
}
195
196
// Record a new allocation from specified call path.
197
// Return true if the allocation is recorded successfully, bucket_idx
198
// and pos_idx are also updated to indicate the entry where the allocation
199
// information was recorded.
200
// Return false only occurs under rare scenarios:
201
// 1. out of memory
202
// 2. overflow hash bucket
203
static inline bool allocation_at(const NativeCallStack& stack, size_t size,
204
size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) {
205
AccessLock locker(&_access_count);
206
if (locker.sharedLock()) {
207
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
208
MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx, flags);
209
if (site != NULL) site->allocate(size);
210
return site != NULL;
211
}
212
return false;
213
}
214
215
// Record memory deallocation. bucket_idx and pos_idx indicate where the allocation
216
// information was recorded.
217
static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) {
218
AccessLock locker(&_access_count);
219
if (locker.sharedLock()) {
220
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
221
MallocSite* site = malloc_site(bucket_idx, pos_idx);
222
if (site != NULL) {
223
site->deallocate(size);
224
return true;
225
}
226
}
227
return false;
228
}
229
230
// Walk this table.
231
static bool walk_malloc_site(MallocSiteWalker* walker);
232
233
private:
234
static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key, MEMFLAGS flags);
235
static void reset();
236
237
// Delete a bucket linked list
238
static void delete_linked_list(MallocSiteHashtableEntry* head);
239
240
static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags);
241
static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
242
static bool walk(MallocSiteWalker* walker);
243
244
static inline unsigned int hash_to_index(unsigned int hash) {
245
return (hash % table_size);
246
}
247
248
static inline const NativeCallStack* hash_entry_allocation_stack() {
249
return (NativeCallStack*)_hash_entry_allocation_stack;
250
}
251
252
private:
253
// Counter for counting concurrent access
254
static volatile int _access_count;
255
256
// The callsite hashtable. It has to be a static table,
257
// since malloc call can come from C runtime linker.
258
static MallocSiteHashtableEntry* _table[table_size];
259
260
261
// Reserve enough memory for placing the objects
262
263
// The memory for hashtable entry allocation stack object
264
static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
265
// The memory for hashtable entry allocation callsite object
266
static size_t _hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
267
NOT_PRODUCT(static int _peak_count;)
268
};
269
270
#endif // INCLUDE_NMT
271
#endif // SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
272
273