Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/memory/allocation.inline.hpp
32285 views
1
/*
2
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
26
#define SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
27
28
#include "runtime/atomic.inline.hpp"
29
#include "runtime/os.hpp"
30
#include "services/memTracker.hpp"
31
32
// Explicit C-heap memory management
33
34
void trace_heap_malloc(size_t size, const char* name, void *p);
35
void trace_heap_free(void *p);
36
37
#ifndef PRODUCT
38
// Increments unsigned long value for statistics (not atomic on MP).
39
inline void inc_stat_counter(volatile julong* dest, julong add_value) {
40
#if defined(SPARC) || defined(X86) || defined(AARCH64)
41
// Sparc, X86 and AArch64 have atomic jlong (8 bytes) instructions
42
julong value = Atomic::load((volatile jlong*)dest);
43
value += add_value;
44
Atomic::store((jlong)value, (volatile jlong*)dest);
45
#else
46
// possible word-tearing during load/store
47
*dest += add_value;
48
#endif
49
}
50
#endif
51
52
// allocate using malloc; will fail if no memory available
53
inline char* AllocateHeap(size_t size, MEMFLAGS flags,
54
const NativeCallStack& stack,
55
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
56
char* p = (char*) os::malloc(size, flags, stack);
57
#ifdef ASSERT
58
if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p);
59
#endif
60
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
61
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap");
62
}
63
return p;
64
}
65
66
#ifdef __GNUC__
67
__attribute__((always_inline))
68
#endif
69
inline char* AllocateHeap(size_t size, MEMFLAGS flags,
70
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
71
return AllocateHeap(size, flags, CURRENT_PC, alloc_failmode);
72
}
73
74
#ifdef __GNUC__
75
__attribute__((always_inline))
76
#endif
77
inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag,
78
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
79
char* p = (char*) os::realloc(old, size, flag, CURRENT_PC);
80
#ifdef ASSERT
81
if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p);
82
#endif
83
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
84
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap");
85
}
86
return p;
87
}
88
89
inline void FreeHeap(void* p, MEMFLAGS memflags = mtInternal) {
90
#ifdef ASSERT
91
if (PrintMallocFree) trace_heap_free(p);
92
#endif
93
os::free(p, memflags);
94
}
95
96
97
template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
98
const NativeCallStack& stack) throw() {
99
void* p = (void*)AllocateHeap(size, F, stack);
100
#ifdef ASSERT
101
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
102
#endif
103
return p;
104
}
105
106
template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size) throw() {
107
return CHeapObj<F>::operator new(size, CALLER_PC);
108
}
109
110
template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
111
const std::nothrow_t& nothrow_constant, const NativeCallStack& stack) throw() {
112
void* p = (void*)AllocateHeap(size, F, stack,
113
AllocFailStrategy::RETURN_NULL);
114
#ifdef ASSERT
115
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
116
#endif
117
return p;
118
}
119
120
template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
121
const std::nothrow_t& nothrow_constant) throw() {
122
return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
123
}
124
125
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
126
const NativeCallStack& stack) throw() {
127
return CHeapObj<F>::operator new(size, stack);
128
}
129
130
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size)
131
throw() {
132
return CHeapObj<F>::operator new(size, CALLER_PC);
133
}
134
135
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
136
const std::nothrow_t& nothrow_constant, const NativeCallStack& stack) throw() {
137
return CHeapObj<F>::operator new(size, nothrow_constant, stack);
138
}
139
140
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
141
const std::nothrow_t& nothrow_constant) throw() {
142
return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
143
}
144
145
template <MEMFLAGS F> void CHeapObj<F>::operator delete(void* p){
146
FreeHeap(p, F);
147
}
148
149
template <MEMFLAGS F> void CHeapObj<F>::operator delete [](void* p){
150
FreeHeap(p, F);
151
}
152
153
template <class E, MEMFLAGS F>
154
E* ArrayAllocator<E, F>::allocate(size_t length) {
155
assert(_addr == NULL, "Already in use");
156
157
_size = sizeof(E) * length;
158
_use_malloc = _size < ArrayAllocatorMallocLimit;
159
160
if (_use_malloc) {
161
_addr = AllocateHeap(_size, F);
162
if (_addr == NULL && _size >= (size_t)os::vm_allocation_granularity()) {
163
// malloc failed let's try with mmap instead
164
_use_malloc = false;
165
} else {
166
return (E*)_addr;
167
}
168
}
169
170
int alignment = os::vm_allocation_granularity();
171
_size = align_size_up(_size, alignment);
172
173
_addr = os::reserve_memory(_size, NULL, alignment, F MACOS_AARCH64_ONLY(, !ExecMem));
174
if (_addr == NULL) {
175
vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)");
176
}
177
178
os::commit_memory_or_exit(_addr, _size, !ExecMem, "Allocator (commit)");
179
180
return (E*)_addr;
181
}
182
183
template<class E, MEMFLAGS F>
184
void ArrayAllocator<E, F>::free() {
185
if (_addr != NULL) {
186
if (_use_malloc) {
187
FreeHeap(_addr, F);
188
} else {
189
os::release_memory(_addr, _size);
190
}
191
_addr = NULL;
192
}
193
}
194
195
#endif // SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
196
197