Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/os/windows/gc/z/zVirtualMemory_windows.cpp
40948 views
1
/*
2
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*/
23
24
#include "precompiled.hpp"
25
#include "gc/z/zAddress.inline.hpp"
26
#include "gc/z/zGlobals.hpp"
27
#include "gc/z/zLargePages.inline.hpp"
28
#include "gc/z/zMapper_windows.hpp"
29
#include "gc/z/zSyscall_windows.hpp"
30
#include "gc/z/zVirtualMemory.hpp"
31
#include "utilities/align.hpp"
32
#include "utilities/debug.hpp"
33
34
class ZVirtualMemoryManagerImpl : public CHeapObj<mtGC> {
35
public:
36
virtual void initialize_before_reserve() {}
37
virtual void initialize_after_reserve(ZMemoryManager* manager) {}
38
virtual bool reserve(uintptr_t addr, size_t size) = 0;
39
virtual void unreserve(uintptr_t addr, size_t size) = 0;
40
};
41
42
// Implements small pages (paged) support using placeholder reservation.
43
class ZVirtualMemoryManagerSmallPages : public ZVirtualMemoryManagerImpl {
44
private:
45
class PlaceholderCallbacks : public AllStatic {
46
public:
47
static void split_placeholder(uintptr_t start, size_t size) {
48
ZMapper::split_placeholder(ZAddress::marked0(start), size);
49
ZMapper::split_placeholder(ZAddress::marked1(start), size);
50
ZMapper::split_placeholder(ZAddress::remapped(start), size);
51
}
52
53
static void coalesce_placeholders(uintptr_t start, size_t size) {
54
ZMapper::coalesce_placeholders(ZAddress::marked0(start), size);
55
ZMapper::coalesce_placeholders(ZAddress::marked1(start), size);
56
ZMapper::coalesce_placeholders(ZAddress::remapped(start), size);
57
}
58
59
static void split_into_placeholder_granules(uintptr_t start, size_t size) {
60
for (uintptr_t addr = start; addr < start + size; addr += ZGranuleSize) {
61
split_placeholder(addr, ZGranuleSize);
62
}
63
}
64
65
static void coalesce_into_one_placeholder(uintptr_t start, size_t size) {
66
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
67
68
if (size > ZGranuleSize) {
69
coalesce_placeholders(start, size);
70
}
71
}
72
73
static void create_callback(const ZMemory* area) {
74
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
75
coalesce_into_one_placeholder(area->start(), area->size());
76
}
77
78
static void destroy_callback(const ZMemory* area) {
79
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
80
// Don't try split the last granule - VirtualFree will fail
81
split_into_placeholder_granules(area->start(), area->size() - ZGranuleSize);
82
}
83
84
static void shrink_from_front_callback(const ZMemory* area, size_t size) {
85
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
86
split_into_placeholder_granules(area->start(), size);
87
}
88
89
static void shrink_from_back_callback(const ZMemory* area, size_t size) {
90
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
91
// Don't try split the last granule - VirtualFree will fail
92
split_into_placeholder_granules(area->end() - size, size - ZGranuleSize);
93
}
94
95
static void grow_from_front_callback(const ZMemory* area, size_t size) {
96
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
97
coalesce_into_one_placeholder(area->start() - size, area->size() + size);
98
}
99
100
static void grow_from_back_callback(const ZMemory* area, size_t size) {
101
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
102
coalesce_into_one_placeholder(area->start(), area->size() + size);
103
}
104
105
static void register_with(ZMemoryManager* manager) {
106
// Each reserved virtual memory address area registered in _manager is
107
// exactly covered by a single placeholder. Callbacks are installed so
108
// that whenever a memory area changes, the corresponding placeholder
109
// is adjusted.
110
//
111
// The create and grow callbacks are called when virtual memory is
112
// returned to the memory manager. The new memory area is then covered
113
// by a new single placeholder.
114
//
115
// The destroy and shrink callbacks are called when virtual memory is
116
// allocated from the memory manager. The memory area is then is split
117
// into granule-sized placeholders.
118
//
119
// See comment in zMapper_windows.cpp explaining why placeholders are
120
// split into ZGranuleSize sized placeholders.
121
122
ZMemoryManager::Callbacks callbacks;
123
124
callbacks._create = &create_callback;
125
callbacks._destroy = &destroy_callback;
126
callbacks._shrink_from_front = &shrink_from_front_callback;
127
callbacks._shrink_from_back = &shrink_from_back_callback;
128
callbacks._grow_from_front = &grow_from_front_callback;
129
callbacks._grow_from_back = &grow_from_back_callback;
130
131
manager->register_callbacks(callbacks);
132
}
133
};
134
135
virtual void initialize_after_reserve(ZMemoryManager* manager) {
136
PlaceholderCallbacks::register_with(manager);
137
}
138
139
virtual bool reserve(uintptr_t addr, size_t size) {
140
const uintptr_t res = ZMapper::reserve(addr, size);
141
142
assert(res == addr || res == NULL, "Should not reserve other memory than requested");
143
return res == addr;
144
}
145
146
virtual void unreserve(uintptr_t addr, size_t size) {
147
ZMapper::unreserve(addr, size);
148
}
149
};
150
151
// Implements Large Pages (locked) support using shared AWE physical memory.
152
153
// ZPhysicalMemory layer needs access to the section
154
HANDLE ZAWESection;
155
156
class ZVirtualMemoryManagerLargePages : public ZVirtualMemoryManagerImpl {
157
private:
158
virtual void initialize_before_reserve() {
159
ZAWESection = ZMapper::create_shared_awe_section();
160
}
161
162
virtual bool reserve(uintptr_t addr, size_t size) {
163
const uintptr_t res = ZMapper::reserve_for_shared_awe(ZAWESection, addr, size);
164
165
assert(res == addr || res == NULL, "Should not reserve other memory than requested");
166
return res == addr;
167
}
168
169
virtual void unreserve(uintptr_t addr, size_t size) {
170
ZMapper::unreserve_for_shared_awe(addr, size);
171
}
172
};
173
174
static ZVirtualMemoryManagerImpl* _impl = NULL;
175
176
void ZVirtualMemoryManager::pd_initialize_before_reserve() {
177
if (ZLargePages::is_enabled()) {
178
_impl = new ZVirtualMemoryManagerLargePages();
179
} else {
180
_impl = new ZVirtualMemoryManagerSmallPages();
181
}
182
_impl->initialize_before_reserve();
183
}
184
185
void ZVirtualMemoryManager::pd_initialize_after_reserve() {
186
_impl->initialize_after_reserve(&_manager);
187
}
188
189
bool ZVirtualMemoryManager::pd_reserve(uintptr_t addr, size_t size) {
190
return _impl->reserve(addr, size);
191
}
192
193
void ZVirtualMemoryManager::pd_unreserve(uintptr_t addr, size_t size) {
194
_impl->unreserve(addr, size);
195
}
196
197