Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
numba
GitHub Repository: numba/llvmlite
Path: blob/main/ffi/memorymanager.cpp
1154 views
1
//===---- memorymanager.cpp - Memory manager for MCJIT/RtDyld *- C++ -*----===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file implements the section-based memory manager used by the MCJIT
10
// execution engine and RuntimeDyld
11
//
12
//===----------------------------------------------------------------------===//
13
14
#include "memorymanager.h"
15
#include "llvm/Support/MathExtras.h"
16
#include "llvm/Support/Process.h"
17
18
#define DEBUG_TYPE "llvmlite-memory-manager"
19
20
namespace llvm {
21
22
uint8_t *LlvmliteMemoryManager::allocateDataSection(uintptr_t Size,
23
unsigned Alignment,
24
unsigned SectionID,
25
StringRef SectionName,
26
bool IsReadOnly) {
27
if (IsReadOnly)
28
return allocateSection(LlvmliteMemoryManager::AllocationPurpose::ROData,
29
Size, Alignment);
30
return allocateSection(LlvmliteMemoryManager::AllocationPurpose::RWData,
31
Size, Alignment);
32
}
33
34
uint8_t *LlvmliteMemoryManager::allocateCodeSection(uintptr_t Size,
35
unsigned Alignment,
36
unsigned SectionID,
37
StringRef SectionName) {
38
return allocateSection(LlvmliteMemoryManager::AllocationPurpose::Code, Size,
39
Alignment);
40
}
41
42
uint8_t *LlvmliteMemoryManager::allocateSection(
43
LlvmliteMemoryManager::AllocationPurpose Purpose, uintptr_t Size,
44
unsigned Alignment) {
45
LLVM_DEBUG(
46
dbgs() << "\nLlvmliteMemoryManager::allocateSection() request:\n");
47
48
LLVM_DEBUG(dbgs() << "Requested size / alignment: "
49
<< format_hex(Size, 2, true) << " / " << Alignment
50
<< "\n");
51
52
// Chosen to match the stub alignment value used in reserveAllocationSpace()
53
if (!Alignment)
54
Alignment = 8;
55
56
assert(!(Alignment & (Alignment - 1)) &&
57
"Alignment must be a power of two.");
58
59
uintptr_t RequiredSize =
60
Alignment * ((Size + Alignment - 1) / Alignment + 1);
61
uintptr_t Addr = 0;
62
63
LLVM_DEBUG(dbgs() << "Allocating " << format_hex(RequiredSize, 2, true)
64
<< " bytes for ");
65
66
MemoryGroup &MemGroup = [&]() -> MemoryGroup & {
67
switch (Purpose) {
68
case AllocationPurpose::Code:
69
LLVM_DEBUG(dbgs() << "CodeMem at ");
70
return CodeMem;
71
case AllocationPurpose::ROData:
72
LLVM_DEBUG(dbgs() << "RODataMem at ");
73
return RODataMem;
74
case AllocationPurpose::RWData:
75
LLVM_DEBUG(dbgs() << "RWDataMem at ");
76
return RWDataMem;
77
}
78
llvm_unreachable("Unknown LlvmliteMemoryManager::AllocationPurpose");
79
}();
80
81
// Look in the list of free memory regions and use a block there if one
82
// is available.
83
for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
84
if (FreeMB.Free.allocatedSize() >= RequiredSize) {
85
Addr = (uintptr_t)FreeMB.Free.base();
86
uintptr_t EndOfBlock = Addr + FreeMB.Free.allocatedSize();
87
// Align the address.
88
Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
89
90
if (FreeMB.PendingPrefixIndex == (unsigned)-1) {
91
// The part of the block we're giving out to the user is now
92
// pending
93
MemGroup.PendingMem.push_back(
94
sys::MemoryBlock((void *)Addr, Size));
95
96
// Remember this pending block, such that future allocations can
97
// just modify it rather than creating a new one
98
FreeMB.PendingPrefixIndex = MemGroup.PendingMem.size() - 1;
99
} else {
100
sys::MemoryBlock &PendingMB =
101
MemGroup.PendingMem[FreeMB.PendingPrefixIndex];
102
PendingMB =
103
sys::MemoryBlock(PendingMB.base(),
104
Addr + Size - (uintptr_t)PendingMB.base());
105
}
106
107
// Remember how much free space is now left in this block
108
FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size),
109
EndOfBlock - Addr - Size);
110
LLVM_DEBUG(dbgs() << format_hex(Addr, 18, true) << "\n");
111
return (uint8_t *)Addr;
112
}
113
}
114
115
assert(false && "All memory must be pre-allocated");
116
117
// If asserts are turned off, returning a null pointer in the event of a
118
// failure to find a preallocated block large enough should at least lead
119
// to a quick crash.
120
return nullptr;
121
}
122
123
bool LlvmliteMemoryManager::hasSpace(const MemoryGroup &MemGroup,
124
uintptr_t Size) const {
125
for (const FreeMemBlock &FreeMB : MemGroup.FreeMem) {
126
if (FreeMB.Free.allocatedSize() >= Size)
127
return true;
128
}
129
return false;
130
}
131
132
void LlvmliteMemoryManager::reserveAllocationSpace(
133
uintptr_t CodeSize, Align CodeAlign, uintptr_t RODataSize,
134
Align RODataAlign, uintptr_t RWDataSize, Align RWDataAlign) {
135
LLVM_DEBUG(
136
dbgs()
137
<< "\nLlvmliteMemoryManager::reserveAllocationSpace() request:\n\n");
138
LLVM_DEBUG(dbgs() << "Code size / align: " << format_hex(CodeSize, 2, true)
139
<< " / " << CodeAlign.value() << "\n");
140
LLVM_DEBUG(dbgs() << "ROData size / align: "
141
<< format_hex(RODataSize, 2, true) << " / "
142
<< RODataAlign.value() << "\n");
143
LLVM_DEBUG(dbgs() << "RWData size / align: "
144
<< format_hex(RWDataSize, 2, true) << " / "
145
<< RWDataAlign.value() << "\n");
146
147
if (CodeSize == 0 && RODataSize == 0 && RWDataSize == 0) {
148
LLVM_DEBUG(dbgs() << "No memory requested - returning early.\n");
149
return;
150
}
151
152
// Code alignment needs to be at least the stub alignment - however, we
153
// don't have an easy way to get that here so as a workaround, we assume
154
// it's 8, which is the largest value I observed across all platforms.
155
constexpr uint64_t StubAlign = 8;
156
157
CodeAlign = Align(std::max(CodeAlign.value(), StubAlign));
158
159
// ROData and RWData may not need to be aligned to the StubAlign, but the
160
// stub alignment seems like a reasonable (if slightly arbitrary) minimum
161
// alignment for them that should not cause any issues on all (i.e. 64-bit)
162
// platforms.
163
RODataAlign = Align(std::max(RODataAlign.value(), StubAlign));
164
RWDataAlign = Align(std::max(RWDataAlign.value(), StubAlign));
165
166
// Get space required for each section. Use the same calculation as
167
// allocateSection because we need to be able to satisfy it.
168
uintptr_t RequiredCodeSize =
169
alignTo(CodeSize, CodeAlign) + CodeAlign.value();
170
uintptr_t RequiredRODataSize =
171
alignTo(RODataSize, RODataAlign) + RODataAlign.value();
172
uintptr_t RequiredRWDataSize =
173
alignTo(RWDataSize, RWDataAlign) + RWDataAlign.value();
174
175
if (hasSpace(CodeMem, RequiredCodeSize) &&
176
hasSpace(RODataMem, RequiredRODataSize) &&
177
hasSpace(RWDataMem, RequiredRWDataSize)) {
178
// Sufficient space in contiguous block already available.
179
LLVM_DEBUG(
180
dbgs() << "Previous preallocation sufficient; reusing it.\n");
181
return;
182
}
183
184
// MemoryManager does not have functions for releasing memory after it's
185
// allocated. Normally it tries to use any excess blocks that were
186
// allocated due to page alignment, but if we have insufficient free memory
187
// for the request this can lead to allocating disparate memory that can
188
// violate the ARM ABI. Clear free memory so only the new allocations are
189
// used, but do not release allocated memory as it may still be in-use.
190
CodeMem.FreeMem.clear();
191
RODataMem.FreeMem.clear();
192
RWDataMem.FreeMem.clear();
193
194
// Round up to the nearest page size. Blocks must be page-aligned.
195
static const size_t PageSize = sys::Process::getPageSizeEstimate();
196
RequiredCodeSize = alignTo(RequiredCodeSize, PageSize);
197
RequiredRODataSize = alignTo(RequiredRODataSize, PageSize);
198
RequiredRWDataSize = alignTo(RequiredRWDataSize, PageSize);
199
uintptr_t RequiredSize =
200
RequiredCodeSize + RequiredRODataSize + RequiredRWDataSize;
201
202
LLVM_DEBUG(dbgs() << "Reserving " << format_hex(RequiredSize, 2, true)
203
<< " bytes\n");
204
205
std::error_code ec;
206
const sys::MemoryBlock *near = nullptr;
207
sys::MemoryBlock MB = MMapper.allocateMappedMemory(
208
AllocationPurpose::RWData, RequiredSize, near,
209
sys::Memory::MF_READ | sys::Memory::MF_WRITE, ec);
210
if (ec) {
211
assert(false && "Failed to allocate mapped memory");
212
}
213
214
// CodeMem will arbitrarily own this MemoryBlock to handle cleanup.
215
CodeMem.AllocatedMem.push_back(MB);
216
217
uintptr_t Addr = (uintptr_t)MB.base();
218
FreeMemBlock FreeMB;
219
FreeMB.PendingPrefixIndex = (unsigned)-1;
220
221
if (CodeSize > 0) {
222
LLVM_DEBUG(dbgs() << "Code mem starts at " << format_hex(Addr, 18, true)
223
<< ", size " << format_hex(RequiredCodeSize, 2, true)
224
<< "\n");
225
assert(isAddrAligned(Align(CodeAlign), (void *)Addr));
226
FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredCodeSize);
227
CodeMem.FreeMem.push_back(FreeMB);
228
Addr += RequiredCodeSize;
229
}
230
231
if (RODataSize > 0) {
232
LLVM_DEBUG(dbgs() << "ROData mem starts at "
233
<< format_hex(Addr, 18, true) << ", size "
234
<< format_hex(RequiredRODataSize, 2, true) << "\n");
235
assert(isAddrAligned(Align(RODataAlign), (void *)Addr));
236
FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredRODataSize);
237
RODataMem.FreeMem.push_back(FreeMB);
238
Addr += RequiredRODataSize;
239
}
240
241
if (RWDataSize > 0) {
242
LLVM_DEBUG(dbgs() << "RWData mem starts at "
243
<< format_hex(Addr, 18, true) << ", size "
244
<< format_hex(RequiredRWDataSize, 2, true) << "\n");
245
assert(isAddrAligned(Align(RWDataAlign), (void *)Addr));
246
FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredRWDataSize);
247
RWDataMem.FreeMem.push_back(FreeMB);
248
}
249
250
LLVM_DEBUG(dbgs() << "\n");
251
}
252
253
bool LlvmliteMemoryManager::finalizeMemory(std::string *ErrMsg) {
254
// FIXME: Should in-progress permissions be reverted if an error occurs?
255
std::error_code ec;
256
257
// Make code memory executable.
258
ec = applyMemoryGroupPermissions(CodeMem, sys::Memory::MF_READ |
259
sys::Memory::MF_EXEC);
260
if (ec) {
261
if (ErrMsg) {
262
*ErrMsg = ec.message();
263
}
264
return true;
265
}
266
267
// Make read-only data memory read-only.
268
ec = applyMemoryGroupPermissions(RODataMem, sys::Memory::MF_READ);
269
if (ec) {
270
if (ErrMsg) {
271
*ErrMsg = ec.message();
272
}
273
return true;
274
}
275
276
// Read-write data memory already has the correct permissions
277
278
// Some platforms with separate data cache and instruction cache require
279
// explicit cache flush, otherwise JIT code manipulations (like resolved
280
// relocations) will get to the data cache but not to the instruction cache.
281
invalidateInstructionCache();
282
283
return false;
284
}
285
286
static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M) {
287
static const size_t PageSize = sys::Process::getPageSizeEstimate();
288
289
size_t StartOverlap =
290
(PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize;
291
292
size_t TrimmedSize = M.allocatedSize();
293
TrimmedSize -= StartOverlap;
294
TrimmedSize -= TrimmedSize % PageSize;
295
296
sys::MemoryBlock Trimmed((void *)((uintptr_t)M.base() + StartOverlap),
297
TrimmedSize);
298
299
assert(((uintptr_t)Trimmed.base() % PageSize) == 0);
300
assert((Trimmed.allocatedSize() % PageSize) == 0);
301
assert(M.base() <= Trimmed.base() &&
302
Trimmed.allocatedSize() <= M.allocatedSize());
303
304
return Trimmed;
305
}
306
307
std::error_code
308
LlvmliteMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
309
unsigned Permissions) {
310
for (sys::MemoryBlock &MB : MemGroup.PendingMem)
311
if (std::error_code EC = MMapper.protectMappedMemory(MB, Permissions))
312
return EC;
313
314
MemGroup.PendingMem.clear();
315
316
// Now go through free blocks and trim any of them that don't span the
317
// entire page because one of the pending blocks may have overlapped it.
318
for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
319
FreeMB.Free = trimBlockToPageSize(FreeMB.Free);
320
// We cleared the PendingMem list, so all these pointers are now invalid
321
FreeMB.PendingPrefixIndex = (unsigned)-1;
322
}
323
324
// Remove all blocks which are now empty
325
erase_if(MemGroup.FreeMem, [](FreeMemBlock &FreeMB) {
326
return FreeMB.Free.allocatedSize() == 0;
327
});
328
329
return std::error_code();
330
}
331
332
void LlvmliteMemoryManager::invalidateInstructionCache() {
333
for (sys::MemoryBlock &Block : CodeMem.PendingMem)
334
sys::Memory::InvalidateInstructionCache(Block.base(),
335
Block.allocatedSize());
336
}
337
338
LlvmliteMemoryManager::~LlvmliteMemoryManager() {
339
for (MemoryGroup *Group : {&CodeMem, &RWDataMem, &RODataMem}) {
340
for (sys::MemoryBlock &Block : Group->AllocatedMem)
341
MMapper.releaseMappedMemory(Block);
342
}
343
}
344
345
LlvmliteMemoryManager::MemoryMapper::~MemoryMapper() {}
346
347
void LlvmliteMemoryManager::anchor() {}
348
349
namespace {
350
// Trivial implementation of LlvmliteMemoryManager::MemoryMapper that just calls
351
// into sys::Memory.
352
class DefaultMMapper final : public LlvmliteMemoryManager::MemoryMapper {
353
public:
354
sys::MemoryBlock
355
allocateMappedMemory(LlvmliteMemoryManager::AllocationPurpose Purpose,
356
size_t NumBytes,
357
const sys::MemoryBlock *const NearBlock,
358
unsigned Flags, std::error_code &EC) override {
359
return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags,
360
EC);
361
}
362
363
std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
364
unsigned Flags) override {
365
return sys::Memory::protectMappedMemory(Block, Flags);
366
}
367
368
std::error_code releaseMappedMemory(sys::MemoryBlock &M) override {
369
return sys::Memory::releaseMappedMemory(M);
370
}
371
};
372
373
DefaultMMapper DefaultMMapperInstance;
374
} // namespace
375
376
LlvmliteMemoryManager::LlvmliteMemoryManager(MemoryMapper *MM)
377
: MMapper(MM ? *MM : DefaultMMapperInstance) {}
378
379
} // namespace llvm
380
381