Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp
40971 views
1
/*
2
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*/
23
24
#include "precompiled.hpp"
25
#include "gc/shared/gcLogPrecious.hpp"
26
#include "gc/z/zErrno.hpp"
27
#include "gc/z/zGlobals.hpp"
28
#include "gc/z/zLargePages.inline.hpp"
29
#include "gc/z/zPhysicalMemory.inline.hpp"
30
#include "gc/z/zPhysicalMemoryBacking_bsd.hpp"
31
#include "logging/log.hpp"
32
#include "runtime/globals.hpp"
33
#include "runtime/os.hpp"
34
#include "utilities/align.hpp"
35
#include "utilities/debug.hpp"
36
37
#include <mach/mach.h>
38
#include <mach/mach_vm.h>
39
#include <sys/mman.h>
40
#include <sys/types.h>
41
42
// The backing is represented by a reserved virtual address space, in which
43
// we commit and uncommit physical memory. Multi-mapping the different heap
44
// views is done by simply remapping the backing memory using mach_vm_remap().
45
46
static int vm_flags_superpage() {
47
if (!ZLargePages::is_explicit()) {
48
return 0;
49
}
50
51
const int page_size_in_megabytes = ZGranuleSize >> 20;
52
return page_size_in_megabytes << VM_FLAGS_SUPERPAGE_SHIFT;
53
}
54
55
static ZErrno mremap(uintptr_t from_addr, uintptr_t to_addr, size_t size) {
56
mach_vm_address_t remap_addr = to_addr;
57
vm_prot_t remap_cur_prot;
58
vm_prot_t remap_max_prot;
59
60
// Remap memory to an additional location
61
const kern_return_t res = mach_vm_remap(mach_task_self(),
62
&remap_addr,
63
size,
64
0 /* mask */,
65
VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | vm_flags_superpage(),
66
mach_task_self(),
67
from_addr,
68
FALSE /* copy */,
69
&remap_cur_prot,
70
&remap_max_prot,
71
VM_INHERIT_COPY);
72
73
return (res == KERN_SUCCESS) ? ZErrno(0) : ZErrno(EINVAL);
74
}
75
76
ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity) :
77
_base(0),
78
_initialized(false) {
79
80
// Reserve address space for backing memory
81
_base = (uintptr_t)os::reserve_memory(max_capacity);
82
if (_base == 0) {
83
// Failed
84
log_error_pd(gc)("Failed to reserve address space for backing memory");
85
return;
86
}
87
88
// Successfully initialized
89
_initialized = true;
90
}
91
92
bool ZPhysicalMemoryBacking::is_initialized() const {
93
return _initialized;
94
}
95
96
void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
97
// Does nothing
98
}
99
100
bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const {
101
assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
102
assert(is_aligned(length, os::vm_page_size()), "Invalid length");
103
104
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
105
offset / M, (offset + length) / M, length / M);
106
107
const uintptr_t addr = _base + offset;
108
const void* const res = mmap((void*)addr, length, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
109
if (res == MAP_FAILED) {
110
ZErrno err;
111
log_error(gc)("Failed to commit memory (%s)", err.to_string());
112
return false;
113
}
114
115
// Success
116
return true;
117
}
118
119
size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
120
// Try to commit the whole region
121
if (commit_inner(offset, length)) {
122
// Success
123
return length;
124
}
125
126
// Failed, try to commit as much as possible
127
size_t start = offset;
128
size_t end = offset + length;
129
130
for (;;) {
131
length = align_down((end - start) / 2, ZGranuleSize);
132
if (length == 0) {
133
// Done, don't commit more
134
return start - offset;
135
}
136
137
if (commit_inner(start, length)) {
138
// Success, try commit more
139
start += length;
140
} else {
141
// Failed, try commit less
142
end -= length;
143
}
144
}
145
}
146
147
size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
148
assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
149
assert(is_aligned(length, os::vm_page_size()), "Invalid length");
150
151
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
152
offset / M, (offset + length) / M, length / M);
153
154
const uintptr_t start = _base + offset;
155
const void* const res = mmap((void*)start, length, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
156
if (res == MAP_FAILED) {
157
ZErrno err;
158
log_error(gc)("Failed to uncommit memory (%s)", err.to_string());
159
return 0;
160
}
161
162
return length;
163
}
164
165
void ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const {
166
const ZErrno err = mremap(_base + offset, addr, size);
167
if (err) {
168
fatal("Failed to remap memory (%s)", err.to_string());
169
}
170
}
171
172
void ZPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const {
173
// Note that we must keep the address space reservation intact and just detach
174
// the backing memory. For this reason we map a new anonymous, non-accessible
175
// and non-reserved page over the mapping instead of actually unmapping.
176
const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
177
if (res == MAP_FAILED) {
178
ZErrno err;
179
fatal("Failed to map memory (%s)", err.to_string());
180
}
181
}
182
183