Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp
40948 views
1
/*
2
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*/
23
24
#include "precompiled.hpp"
25
#include "gc/z/zGlobals.hpp"
26
#include "gc/z/zGranuleMap.inline.hpp"
27
#include "gc/z/zLargePages.inline.hpp"
28
#include "gc/z/zMapper_windows.hpp"
29
#include "gc/z/zPhysicalMemoryBacking_windows.hpp"
30
#include "logging/log.hpp"
31
#include "runtime/globals.hpp"
32
#include "utilities/debug.hpp"
33
34
class ZPhysicalMemoryBackingImpl : public CHeapObj<mtGC> {
35
public:
36
virtual size_t commit(size_t offset, size_t size) = 0;
37
virtual size_t uncommit(size_t offset, size_t size) = 0;
38
virtual void map(uintptr_t addr, size_t size, size_t offset) const = 0;
39
virtual void unmap(uintptr_t addr, size_t size) const = 0;
40
};
41
42
// Implements small pages (paged) support using placeholder reservation.
43
//
44
// The backing commits and uncommits physical memory, that can be
45
// multi-mapped into the virtual address space. To support fine-graned
46
// committing and uncommitting, each ZGranuleSize'd chunk is mapped to
47
// a separate paging file mapping.
48
49
class ZPhysicalMemoryBackingSmallPages : public ZPhysicalMemoryBackingImpl {
50
private:
51
ZGranuleMap<HANDLE> _handles;
52
53
HANDLE get_handle(uintptr_t offset) const {
54
HANDLE const handle = _handles.get(offset);
55
assert(handle != 0, "Should be set");
56
return handle;
57
}
58
59
void put_handle(uintptr_t offset, HANDLE handle) {
60
assert(handle != INVALID_HANDLE_VALUE, "Invalid handle");
61
assert(_handles.get(offset) == 0, "Should be cleared");
62
_handles.put(offset, handle);
63
}
64
65
void clear_handle(uintptr_t offset) {
66
assert(_handles.get(offset) != 0, "Should be set");
67
_handles.put(offset, 0);
68
}
69
70
public:
71
ZPhysicalMemoryBackingSmallPages(size_t max_capacity) :
72
ZPhysicalMemoryBackingImpl(),
73
_handles(max_capacity) {}
74
75
size_t commit(size_t offset, size_t size) {
76
for (size_t i = 0; i < size; i += ZGranuleSize) {
77
HANDLE const handle = ZMapper::create_and_commit_paging_file_mapping(ZGranuleSize);
78
if (handle == 0) {
79
return i;
80
}
81
82
put_handle(offset + i, handle);
83
}
84
85
return size;
86
}
87
88
size_t uncommit(size_t offset, size_t size) {
89
for (size_t i = 0; i < size; i += ZGranuleSize) {
90
HANDLE const handle = get_handle(offset + i);
91
clear_handle(offset + i);
92
ZMapper::close_paging_file_mapping(handle);
93
}
94
95
return size;
96
}
97
98
void map(uintptr_t addr, size_t size, size_t offset) const {
99
assert(is_aligned(offset, ZGranuleSize), "Misaligned");
100
assert(is_aligned(addr, ZGranuleSize), "Misaligned");
101
assert(is_aligned(size, ZGranuleSize), "Misaligned");
102
103
for (size_t i = 0; i < size; i += ZGranuleSize) {
104
HANDLE const handle = get_handle(offset + i);
105
ZMapper::map_view_replace_placeholder(handle, 0 /* offset */, addr + i, ZGranuleSize);
106
}
107
}
108
109
void unmap(uintptr_t addr, size_t size) const {
110
assert(is_aligned(addr, ZGranuleSize), "Misaligned");
111
assert(is_aligned(size, ZGranuleSize), "Misaligned");
112
113
for (size_t i = 0; i < size; i += ZGranuleSize) {
114
ZMapper::unmap_view_preserve_placeholder(addr + i, ZGranuleSize);
115
}
116
}
117
};
118
119
// Implements Large Pages (locked) support using shared AWE physical memory.
120
//
121
// Shared AWE physical memory also works with small pages, but it has
122
// a few drawbacks that makes it a no-go to use it at this point:
123
//
124
// 1) It seems to use 8 bytes of committed memory per *reserved* memory.
125
// Given our scheme to use a large address space range this turns out to
126
// use too much memory.
127
//
128
// 2) It requires memory locking privilages, even for small pages. This
129
// has always been a requirement for large pages, and would be an extra
130
// restriction for usage with small pages.
131
//
132
// Note: The large pages size is tied to our ZGranuleSize.
133
134
extern HANDLE ZAWESection;
135
136
class ZPhysicalMemoryBackingLargePages : public ZPhysicalMemoryBackingImpl {
137
private:
138
ULONG_PTR* const _page_array;
139
140
static ULONG_PTR* alloc_page_array(size_t max_capacity) {
141
const size_t npages = max_capacity / ZGranuleSize;
142
const size_t array_size = npages * sizeof(ULONG_PTR);
143
144
return (ULONG_PTR*)os::malloc(array_size, mtGC);
145
}
146
147
public:
148
ZPhysicalMemoryBackingLargePages(size_t max_capacity) :
149
ZPhysicalMemoryBackingImpl(),
150
_page_array(alloc_page_array(max_capacity)) {}
151
152
size_t commit(size_t offset, size_t size) {
153
const size_t index = offset >> ZGranuleSizeShift;
154
const size_t npages = size >> ZGranuleSizeShift;
155
156
size_t npages_res = npages;
157
const bool res = AllocateUserPhysicalPages(ZAWESection, &npages_res, &_page_array[index]);
158
if (!res) {
159
fatal("Failed to allocate physical memory " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)",
160
size / M, offset, GetLastError());
161
} else {
162
log_debug(gc)("Allocated physical memory: " SIZE_FORMAT "M @ " PTR_FORMAT, size / M, offset);
163
}
164
165
// AllocateUserPhysicalPages might not be able to allocate the requested amount of memory.
166
// The allocated number of pages are written in npages_res.
167
return npages_res << ZGranuleSizeShift;
168
}
169
170
size_t uncommit(size_t offset, size_t size) {
171
const size_t index = offset >> ZGranuleSizeShift;
172
const size_t npages = size >> ZGranuleSizeShift;
173
174
size_t npages_res = npages;
175
const bool res = FreeUserPhysicalPages(ZAWESection, &npages_res, &_page_array[index]);
176
if (!res) {
177
fatal("Failed to uncommit physical memory " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)",
178
size, offset, GetLastError());
179
}
180
181
return npages_res << ZGranuleSizeShift;
182
}
183
184
void map(uintptr_t addr, size_t size, size_t offset) const {
185
const size_t npages = size >> ZGranuleSizeShift;
186
const size_t index = offset >> ZGranuleSizeShift;
187
188
const bool res = MapUserPhysicalPages((char*)addr, npages, &_page_array[index]);
189
if (!res) {
190
fatal("Failed to map view " PTR_FORMAT " " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)",
191
addr, size / M, offset, GetLastError());
192
}
193
}
194
195
void unmap(uintptr_t addr, size_t size) const {
196
const size_t npages = size >> ZGranuleSizeShift;
197
198
const bool res = MapUserPhysicalPages((char*)addr, npages, NULL);
199
if (!res) {
200
fatal("Failed to unmap view " PTR_FORMAT " " SIZE_FORMAT "M (%d)",
201
addr, size / M, GetLastError());
202
}
203
}
204
};
205
206
static ZPhysicalMemoryBackingImpl* select_impl(size_t max_capacity) {
207
if (ZLargePages::is_enabled()) {
208
return new ZPhysicalMemoryBackingLargePages(max_capacity);
209
}
210
211
return new ZPhysicalMemoryBackingSmallPages(max_capacity);
212
}
213
214
ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity) :
215
_impl(select_impl(max_capacity)) {}
216
217
bool ZPhysicalMemoryBacking::is_initialized() const {
218
return true;
219
}
220
221
void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
222
// Does nothing
223
}
224
225
size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
226
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
227
offset / M, (offset + length) / M, length / M);
228
229
return _impl->commit(offset, length);
230
}
231
232
size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) {
233
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
234
offset / M, (offset + length) / M, length / M);
235
236
return _impl->uncommit(offset, length);
237
}
238
239
void ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, size_t offset) const {
240
assert(is_aligned(offset, ZGranuleSize), "Misaligned: " PTR_FORMAT, offset);
241
assert(is_aligned(addr, ZGranuleSize), "Misaligned: " PTR_FORMAT, addr);
242
assert(is_aligned(size, ZGranuleSize), "Misaligned: " PTR_FORMAT, size);
243
244
_impl->map(addr, size, offset);
245
}
246
247
void ZPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const {
248
assert(is_aligned(addr, ZGranuleSize), "Misaligned");
249
assert(is_aligned(size, ZGranuleSize), "Misaligned");
250
251
_impl->unmap(addr, size);
252
}
253
254