Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/os/linux/os_linux.hpp
64440 views
1
/*
2
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef OS_LINUX_OS_LINUX_HPP
26
#define OS_LINUX_OS_LINUX_HPP
27
28
// Linux_OS defines the interface to Linux operating systems
29
30
// Information about the protection of the page at address '0' on this os.
31
static bool zero_page_read_protected() { return true; }
32
33
class Linux {
34
friend class CgroupSubsystem;
35
friend class os;
36
friend class OSContainer;
37
friend class TestReserveMemorySpecial;
38
39
static int (*_pthread_getcpuclockid)(pthread_t, clockid_t *);
40
static int (*_pthread_setname_np)(pthread_t, const char*);
41
42
static address _initial_thread_stack_bottom;
43
static uintptr_t _initial_thread_stack_size;
44
45
static const char *_libc_version;
46
static const char *_libpthread_version;
47
48
static bool _supports_fast_thread_cpu_time;
49
50
static GrowableArray<int>* _cpu_to_node;
51
static GrowableArray<int>* _nindex_to_node;
52
53
static size_t _default_large_page_size;
54
55
protected:
56
57
static julong _physical_memory;
58
static pthread_t _main_thread;
59
static int _page_size;
60
61
static julong available_memory();
62
static julong physical_memory() { return _physical_memory; }
63
static void set_physical_memory(julong phys_mem) { _physical_memory = phys_mem; }
64
static int active_processor_count();
65
66
static void initialize_system_info();
67
68
static int commit_memory_impl(char* addr, size_t bytes, bool exec);
69
static int commit_memory_impl(char* addr, size_t bytes,
70
size_t alignment_hint, bool exec);
71
72
static void set_libc_version(const char *s) { _libc_version = s; }
73
static void set_libpthread_version(const char *s) { _libpthread_version = s; }
74
75
static void rebuild_cpu_to_node_map();
76
static void rebuild_nindex_to_node_map();
77
static GrowableArray<int>* cpu_to_node() { return _cpu_to_node; }
78
static GrowableArray<int>* nindex_to_node() { return _nindex_to_node; }
79
80
static size_t default_large_page_size();
81
static size_t scan_default_large_page_size();
82
static os::PageSizes scan_multiple_page_support();
83
84
static bool setup_large_page_type(size_t page_size);
85
static bool transparent_huge_pages_sanity_check(bool warn, size_t pages_size);
86
static bool hugetlbfs_sanity_check(bool warn, size_t page_size);
87
static bool shm_hugetlbfs_sanity_check(bool warn, size_t page_size);
88
89
static int hugetlbfs_page_size_flag(size_t page_size);
90
91
static char* reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec);
92
static char* reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, size_t page_size, char* req_addr, bool exec);
93
static bool commit_memory_special(size_t bytes, size_t page_size, char* req_addr, bool exec);
94
95
static bool release_memory_special_impl(char* base, size_t bytes);
96
static bool release_memory_special_shm(char* base, size_t bytes);
97
static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
98
99
static void print_process_memory_info(outputStream* st);
100
static void print_system_memory_info(outputStream* st);
101
static bool print_container_info(outputStream* st);
102
static void print_steal_info(outputStream* st);
103
static void print_distro_info(outputStream* st);
104
static void print_libversion_info(outputStream* st);
105
static void print_proc_sys_info(outputStream* st);
106
static bool print_ld_preload_file(outputStream* st);
107
static void print_uptime_info(outputStream* st);
108
109
public:
110
struct CPUPerfTicks {
111
uint64_t used;
112
uint64_t usedKernel;
113
uint64_t total;
114
uint64_t steal;
115
bool has_steal_ticks;
116
};
117
118
// which_logical_cpu=-1 returns accumulated ticks for all cpus.
119
static bool get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu);
120
static bool _stack_is_executable;
121
static void *dlopen_helper(const char *name, char *ebuf, int ebuflen);
122
static void *dll_load_in_vmthread(const char *name, char *ebuf, int ebuflen);
123
static const char *dll_path(void* lib);
124
125
static void init_thread_fpu_state();
126
static int get_fpu_control_word();
127
static void set_fpu_control_word(int fpu_control);
128
static pthread_t main_thread(void) { return _main_thread; }
129
// returns kernel thread id (similar to LWP id on Solaris), which can be
130
// used to access /proc
131
static pid_t gettid();
132
133
static address initial_thread_stack_bottom(void) { return _initial_thread_stack_bottom; }
134
static uintptr_t initial_thread_stack_size(void) { return _initial_thread_stack_size; }
135
136
static int page_size(void) { return _page_size; }
137
static void set_page_size(int val) { _page_size = val; }
138
139
static intptr_t* ucontext_get_sp(const ucontext_t* uc);
140
static intptr_t* ucontext_get_fp(const ucontext_t* uc);
141
142
// GNU libc and libpthread version strings
143
static const char *libc_version() { return _libc_version; }
144
static const char *libpthread_version() { return _libpthread_version; }
145
146
static void libpthread_init();
147
static void sched_getcpu_init();
148
static bool libnuma_init();
149
static void* libnuma_dlsym(void* handle, const char* name);
150
// libnuma v2 (libnuma_1.2) symbols
151
static void* libnuma_v2_dlsym(void* handle, const char* name);
152
153
// Return default guard size for the specified thread type
154
static size_t default_guard_size(os::ThreadType thr_type);
155
156
static void capture_initial_stack(size_t max_size);
157
158
// Stack overflow handling
159
static bool manually_expand_stack(JavaThread * t, address addr);
160
161
// fast POSIX clocks support
162
static void fast_thread_clock_init(void);
163
164
static int pthread_getcpuclockid(pthread_t tid, clockid_t *clock_id) {
165
return _pthread_getcpuclockid ? _pthread_getcpuclockid(tid, clock_id) : -1;
166
}
167
168
static bool supports_fast_thread_cpu_time() {
169
return _supports_fast_thread_cpu_time;
170
}
171
172
static jlong fast_thread_cpu_time(clockid_t clockid);
173
174
// Determine if the vmid is the parent pid for a child in a PID namespace.
175
// Return the namespace pid if so, otherwise -1.
176
static int get_namespace_pid(int vmid);
177
178
// Output structure for query_process_memory_info()
179
struct meminfo_t {
180
ssize_t vmsize; // current virtual size
181
ssize_t vmpeak; // peak virtual size
182
ssize_t vmrss; // current resident set size
183
ssize_t vmhwm; // peak resident set size
184
ssize_t vmswap; // swapped out
185
ssize_t rssanon; // resident set size (anonymous mappings, needs 4.5)
186
ssize_t rssfile; // resident set size (file mappings, needs 4.5)
187
ssize_t rssshmem; // resident set size (shared mappings, needs 4.5)
188
};
189
190
// Attempts to query memory information about the current process and return it in the output structure.
191
// May fail (returns false) or succeed (returns true) but not all output fields are available; unavailable
192
// fields will contain -1.
193
static bool query_process_memory_info(meminfo_t* info);
194
195
// Stack repair handling
196
197
// none present
198
199
private:
200
static void numa_init();
201
static void expand_stack_to(address bottom);
202
203
typedef int (*sched_getcpu_func_t)(void);
204
typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
205
typedef int (*numa_node_to_cpus_v2_func_t)(int node, void *mask);
206
typedef int (*numa_max_node_func_t)(void);
207
typedef int (*numa_num_configured_nodes_func_t)(void);
208
typedef int (*numa_available_func_t)(void);
209
typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node);
210
typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask);
211
typedef void (*numa_interleave_memory_v2_func_t)(void *start, size_t size, struct bitmask* mask);
212
typedef struct bitmask* (*numa_get_membind_func_t)(void);
213
typedef struct bitmask* (*numa_get_interleave_mask_func_t)(void);
214
typedef long (*numa_move_pages_func_t)(int pid, unsigned long count, void **pages, const int *nodes, int *status, int flags);
215
typedef void (*numa_set_preferred_func_t)(int node);
216
typedef void (*numa_set_bind_policy_func_t)(int policy);
217
typedef int (*numa_bitmask_isbitset_func_t)(struct bitmask *bmp, unsigned int n);
218
typedef int (*numa_distance_func_t)(int node1, int node2);
219
220
static sched_getcpu_func_t _sched_getcpu;
221
static numa_node_to_cpus_func_t _numa_node_to_cpus;
222
static numa_node_to_cpus_v2_func_t _numa_node_to_cpus_v2;
223
static numa_max_node_func_t _numa_max_node;
224
static numa_num_configured_nodes_func_t _numa_num_configured_nodes;
225
static numa_available_func_t _numa_available;
226
static numa_tonode_memory_func_t _numa_tonode_memory;
227
static numa_interleave_memory_func_t _numa_interleave_memory;
228
static numa_interleave_memory_v2_func_t _numa_interleave_memory_v2;
229
static numa_set_bind_policy_func_t _numa_set_bind_policy;
230
static numa_bitmask_isbitset_func_t _numa_bitmask_isbitset;
231
static numa_distance_func_t _numa_distance;
232
static numa_get_membind_func_t _numa_get_membind;
233
static numa_get_interleave_mask_func_t _numa_get_interleave_mask;
234
static numa_move_pages_func_t _numa_move_pages;
235
static numa_set_preferred_func_t _numa_set_preferred;
236
static unsigned long* _numa_all_nodes;
237
static struct bitmask* _numa_all_nodes_ptr;
238
static struct bitmask* _numa_nodes_ptr;
239
static struct bitmask* _numa_interleave_bitmask;
240
static struct bitmask* _numa_membind_bitmask;
241
242
static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; }
243
static void set_numa_node_to_cpus(numa_node_to_cpus_func_t func) { _numa_node_to_cpus = func; }
244
static void set_numa_node_to_cpus_v2(numa_node_to_cpus_v2_func_t func) { _numa_node_to_cpus_v2 = func; }
245
static void set_numa_max_node(numa_max_node_func_t func) { _numa_max_node = func; }
246
static void set_numa_num_configured_nodes(numa_num_configured_nodes_func_t func) { _numa_num_configured_nodes = func; }
247
static void set_numa_available(numa_available_func_t func) { _numa_available = func; }
248
static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
249
static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
250
static void set_numa_interleave_memory_v2(numa_interleave_memory_v2_func_t func) { _numa_interleave_memory_v2 = func; }
251
static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; }
252
static void set_numa_bitmask_isbitset(numa_bitmask_isbitset_func_t func) { _numa_bitmask_isbitset = func; }
253
static void set_numa_distance(numa_distance_func_t func) { _numa_distance = func; }
254
static void set_numa_get_membind(numa_get_membind_func_t func) { _numa_get_membind = func; }
255
static void set_numa_get_interleave_mask(numa_get_interleave_mask_func_t func) { _numa_get_interleave_mask = func; }
256
static void set_numa_move_pages(numa_move_pages_func_t func) { _numa_move_pages = func; }
257
static void set_numa_set_preferred(numa_set_preferred_func_t func) { _numa_set_preferred = func; }
258
static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
259
static void set_numa_all_nodes_ptr(struct bitmask **ptr) { _numa_all_nodes_ptr = (ptr == NULL ? NULL : *ptr); }
260
static void set_numa_nodes_ptr(struct bitmask **ptr) { _numa_nodes_ptr = (ptr == NULL ? NULL : *ptr); }
261
static void set_numa_interleave_bitmask(struct bitmask* ptr) { _numa_interleave_bitmask = ptr ; }
262
static void set_numa_membind_bitmask(struct bitmask* ptr) { _numa_membind_bitmask = ptr ; }
263
static int sched_getcpu_syscall(void);
264
265
enum NumaAllocationPolicy{
266
NotInitialized,
267
Membind,
268
Interleave
269
};
270
static NumaAllocationPolicy _current_numa_policy;
271
272
#ifdef __GLIBC__
273
struct glibc_mallinfo {
274
int arena;
275
int ordblks;
276
int smblks;
277
int hblks;
278
int hblkhd;
279
int usmblks;
280
int fsmblks;
281
int uordblks;
282
int fordblks;
283
int keepcost;
284
};
285
286
struct glibc_mallinfo2 {
287
size_t arena;
288
size_t ordblks;
289
size_t smblks;
290
size_t hblks;
291
size_t hblkhd;
292
size_t usmblks;
293
size_t fsmblks;
294
size_t uordblks;
295
size_t fordblks;
296
size_t keepcost;
297
};
298
299
typedef struct glibc_mallinfo (*mallinfo_func_t)(void);
300
typedef struct glibc_mallinfo2 (*mallinfo2_func_t)(void);
301
302
static mallinfo_func_t _mallinfo;
303
static mallinfo2_func_t _mallinfo2;
304
#endif
305
306
public:
307
static int sched_getcpu() { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
308
static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen);
309
static int numa_max_node() { return _numa_max_node != NULL ? _numa_max_node() : -1; }
310
static int numa_num_configured_nodes() {
311
return _numa_num_configured_nodes != NULL ? _numa_num_configured_nodes() : -1;
312
}
313
static int numa_available() { return _numa_available != NULL ? _numa_available() : -1; }
314
static int numa_tonode_memory(void *start, size_t size, int node) {
315
return _numa_tonode_memory != NULL ? _numa_tonode_memory(start, size, node) : -1;
316
}
317
318
static bool is_running_in_interleave_mode() {
319
return _current_numa_policy == Interleave;
320
}
321
322
static void set_configured_numa_policy(NumaAllocationPolicy numa_policy) {
323
_current_numa_policy = numa_policy;
324
}
325
326
static NumaAllocationPolicy identify_numa_policy() {
327
for (int node = 0; node <= Linux::numa_max_node(); node++) {
328
if (Linux::_numa_bitmask_isbitset(Linux::_numa_interleave_bitmask, node)) {
329
return Interleave;
330
}
331
}
332
return Membind;
333
}
334
335
static void numa_interleave_memory(void *start, size_t size) {
336
// Prefer v2 API
337
if (_numa_interleave_memory_v2 != NULL) {
338
if (is_running_in_interleave_mode()) {
339
_numa_interleave_memory_v2(start, size, _numa_interleave_bitmask);
340
} else if (_numa_membind_bitmask != NULL) {
341
_numa_interleave_memory_v2(start, size, _numa_membind_bitmask);
342
}
343
} else if (_numa_interleave_memory != NULL) {
344
_numa_interleave_memory(start, size, _numa_all_nodes);
345
}
346
}
347
static void numa_set_preferred(int node) {
348
if (_numa_set_preferred != NULL) {
349
_numa_set_preferred(node);
350
}
351
}
352
static void numa_set_bind_policy(int policy) {
353
if (_numa_set_bind_policy != NULL) {
354
_numa_set_bind_policy(policy);
355
}
356
}
357
static int numa_distance(int node1, int node2) {
358
return _numa_distance != NULL ? _numa_distance(node1, node2) : -1;
359
}
360
static long numa_move_pages(int pid, unsigned long count, void **pages, const int *nodes, int *status, int flags) {
361
return _numa_move_pages != NULL ? _numa_move_pages(pid, count, pages, nodes, status, flags) : -1;
362
}
363
static int get_node_by_cpu(int cpu_id);
364
static int get_existing_num_nodes();
365
// Check if numa node is configured (non-zero memory node).
366
static bool is_node_in_configured_nodes(unsigned int n) {
367
if (_numa_bitmask_isbitset != NULL && _numa_all_nodes_ptr != NULL) {
368
return _numa_bitmask_isbitset(_numa_all_nodes_ptr, n);
369
} else
370
return false;
371
}
372
// Check if numa node exists in the system (including zero memory nodes).
373
static bool is_node_in_existing_nodes(unsigned int n) {
374
if (_numa_bitmask_isbitset != NULL && _numa_nodes_ptr != NULL) {
375
return _numa_bitmask_isbitset(_numa_nodes_ptr, n);
376
} else if (_numa_bitmask_isbitset != NULL && _numa_all_nodes_ptr != NULL) {
377
// Not all libnuma API v2 implement numa_nodes_ptr, so it's not possible
378
// to trust the API version for checking its absence. On the other hand,
379
// numa_nodes_ptr found in libnuma 2.0.9 and above is the only way to get
380
// a complete view of all numa nodes in the system, hence numa_nodes_ptr
381
// is used to handle CPU and nodes on architectures (like PowerPC) where
382
// there can exist nodes with CPUs but no memory or vice-versa and the
383
// nodes may be non-contiguous. For most of the architectures, like
384
// x86_64, numa_node_ptr presents the same node set as found in
385
// numa_all_nodes_ptr so it's possible to use numa_all_nodes_ptr as a
386
// substitute.
387
return _numa_bitmask_isbitset(_numa_all_nodes_ptr, n);
388
} else
389
return false;
390
}
391
// Check if node is in bound node set.
392
static bool is_node_in_bound_nodes(int node) {
393
if (_numa_bitmask_isbitset != NULL) {
394
if (is_running_in_interleave_mode()) {
395
return _numa_bitmask_isbitset(_numa_interleave_bitmask, node);
396
} else {
397
return _numa_membind_bitmask != NULL ? _numa_bitmask_isbitset(_numa_membind_bitmask, node) : false;
398
}
399
}
400
return false;
401
}
402
// Check if bound to only one numa node.
403
// Returns true if bound to a single numa node, otherwise returns false.
404
static bool is_bound_to_single_node() {
405
int nodes = 0;
406
unsigned int node = 0;
407
unsigned int highest_node_number = 0;
408
409
if (_numa_membind_bitmask != NULL && _numa_max_node != NULL && _numa_bitmask_isbitset != NULL) {
410
highest_node_number = _numa_max_node();
411
} else {
412
return false;
413
}
414
415
for (node = 0; node <= highest_node_number; node++) {
416
if (_numa_bitmask_isbitset(_numa_membind_bitmask, node)) {
417
nodes++;
418
}
419
}
420
421
if (nodes == 1) {
422
return true;
423
} else {
424
return false;
425
}
426
}
427
428
static const GrowableArray<int>* numa_nindex_to_node() {
429
return _nindex_to_node;
430
}
431
};
432
433
#endif // OS_LINUX_OS_LINUX_HPP
434
435