Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Roblox
GitHub Repository: Roblox/luau
Path: blob/master/CodeGen/src/CodeAllocator.cpp
2725 views
1
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
2
#include "Luau/CodeAllocator.h"
3
4
#include "Luau/CodeGenCommon.h"
5
6
#include <string.h>
7
8
LUAU_FASTFLAGVARIABLE(LuauCodegenFreeBlocks)
9
LUAU_FASTFLAGVARIABLE(LuauCodegenProtectData)
10
11
#if defined(_WIN32)
12
13
#ifndef WIN32_LEAN_AND_MEAN
14
#define WIN32_LEAN_AND_MEAN
15
#endif
16
#ifndef NOMINMAX
17
#define NOMINMAX
18
#endif
19
#include <windows.h>
20
21
const size_t kPageSize = 4096;
22
#else
23
#include <sys/mman.h>
24
#include <unistd.h>
25
26
#if defined(__FreeBSD__) && !(_POSIX_C_SOURCE >= 200112L)
27
const size_t kPageSize = getpagesize();
28
#else
29
const size_t kPageSize = sysconf(_SC_PAGESIZE);
30
#endif
31
#endif
32
33
#ifdef __APPLE__
34
extern "C" void sys_icache_invalidate(void* start, size_t len);
35
#endif
36
37
38
#if defined(_WIN32)
39
static uint8_t* allocatePagesImpl(size_t size)
40
{
41
CODEGEN_ASSERT(size == Luau::CodeGen::CodeAllocator::alignToPageSize(size));
42
43
return (uint8_t*)VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
44
}
45
46
static void freePagesImpl(uint8_t* mem, size_t size)
47
{
48
CODEGEN_ASSERT(size == Luau::CodeGen::CodeAllocator::alignToPageSize(size));
49
50
if (VirtualFree(mem, 0, MEM_RELEASE) == 0)
51
CODEGEN_ASSERT(!"failed to deallocate block memory");
52
}
53
54
[[nodiscard]] static bool makePagesExecutable(uint8_t* mem, size_t size)
55
{
56
CODEGEN_ASSERT((uintptr_t(mem) & (kPageSize - 1)) == 0);
57
CODEGEN_ASSERT(size == Luau::CodeGen::CodeAllocator::alignToPageSize(size));
58
59
DWORD oldProtect;
60
return VirtualProtect(mem, size, PAGE_EXECUTE_READ, &oldProtect) != 0;
61
}
62
63
[[nodiscard]] static bool makePagesNotExecutable(uint8_t* mem, size_t size)
64
{
65
CODEGEN_ASSERT((uintptr_t(mem) & (kPageSize - 1)) == 0);
66
CODEGEN_ASSERT(size == Luau::CodeGen::CodeAllocator::alignToPageSize(size));
67
68
DWORD oldProtect;
69
return VirtualProtect(mem, size, PAGE_READWRITE, &oldProtect) != 0;
70
}
71
72
[[nodiscard]] static bool makePagesReadOnly(uint8_t* mem, size_t size)
73
{
74
CODEGEN_ASSERT((uintptr_t(mem) & (kPageSize - 1)) == 0);
75
CODEGEN_ASSERT(size == Luau::CodeGen::CodeAllocator::alignToPageSize(size));
76
77
DWORD oldProtect;
78
return VirtualProtect(mem, size, PAGE_READONLY, &oldProtect) != 0;
79
}
80
81
static void flushInstructionCache(uint8_t* mem, size_t size)
82
{
83
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP | WINAPI_PARTITION_SYSTEM)
84
if (FlushInstructionCache(GetCurrentProcess(), mem, size) == 0)
85
CODEGEN_ASSERT(!"Failed to flush instruction cache");
86
#endif
87
}
88
#else
89
static uint8_t* allocatePagesImpl(size_t size)
90
{
91
CODEGEN_ASSERT(size == Luau::CodeGen::CodeAllocator::alignToPageSize(size));
92
93
#ifdef __APPLE__
94
void* result = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_JIT, -1, 0);
95
#else
96
void* result = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
97
#endif
98
99
return (result == MAP_FAILED) ? nullptr : static_cast<uint8_t*>(result);
100
}
101
102
static void freePagesImpl(uint8_t* mem, size_t size)
103
{
104
CODEGEN_ASSERT(size == Luau::CodeGen::CodeAllocator::alignToPageSize(size));
105
106
if (munmap(mem, size) != 0)
107
CODEGEN_ASSERT(!"Failed to deallocate block memory");
108
}
109
110
[[nodiscard]] static bool makePagesExecutable(uint8_t* mem, size_t size)
111
{
112
CODEGEN_ASSERT((uintptr_t(mem) & (kPageSize - 1)) == 0);
113
CODEGEN_ASSERT(size == Luau::CodeGen::CodeAllocator::alignToPageSize(size));
114
115
return mprotect(mem, size, PROT_READ | PROT_EXEC) == 0;
116
}
117
118
[[nodiscard]] static bool makePagesNotExecutable(uint8_t* mem, size_t size)
119
{
120
CODEGEN_ASSERT((uintptr_t(mem) & (kPageSize - 1)) == 0);
121
CODEGEN_ASSERT(size == Luau::CodeGen::CodeAllocator::alignToPageSize(size));
122
123
return mprotect(mem, size, PROT_READ | PROT_WRITE) == 0;
124
}
125
126
[[nodiscard]] static bool makePagesReadOnly(uint8_t* mem, size_t size)
127
{
128
CODEGEN_ASSERT((uintptr_t(mem) & (kPageSize - 1)) == 0);
129
CODEGEN_ASSERT(size == Luau::CodeGen::CodeAllocator::alignToPageSize(size));
130
131
return mprotect(mem, size, PROT_READ) == 0;
132
}
133
134
static void flushInstructionCache(uint8_t* mem, size_t size)
135
{
136
#ifdef __EMSCRIPTEN__
137
#elif defined(__APPLE__)
138
sys_icache_invalidate(mem, size);
139
#else
140
__builtin___clear_cache((char*)mem, (char*)mem + size);
141
#endif
142
}
143
#endif
144
145
namespace Luau
146
{
147
namespace CodeGen
148
{
149
150
size_t CodeAllocator::alignToPageSize(size_t size)
151
{
152
return (size + kPageSize - 1) & ~(kPageSize - 1);
153
}
154
155
CodeAllocator::CodeAllocator(size_t blockSize, size_t maxTotalSize)
156
: CodeAllocator(blockSize, maxTotalSize, nullptr, nullptr)
157
{
158
}
159
160
CodeAllocator::CodeAllocator(size_t blockSize, size_t maxTotalSize, AllocationCallback* allocationCallback, void* allocationCallbackContext)
161
: blockSize{blockSize}
162
, maxTotalSize{maxTotalSize}
163
, allocationCallback{allocationCallback}
164
, allocationCallbackContext{allocationCallbackContext}
165
{
166
CODEGEN_ASSERT(blockSize > kMaxReservedDataSize);
167
CODEGEN_ASSERT(maxTotalSize >= blockSize);
168
}
169
170
CodeAllocator::~CodeAllocator()
171
{
172
if (destroyBlockUnwindInfo)
173
{
174
for (void* unwindInfo : unwindInfos)
175
destroyBlockUnwindInfo(context, unwindInfo);
176
}
177
178
if (FFlag::LuauCodegenFreeBlocks)
179
CODEGEN_ASSERT(liveAllocations == 0);
180
181
for (uint8_t* block : blocks)
182
freePages(block, blockSize);
183
}
184
185
bool CodeAllocator::allocate_DEPRECATED(
186
const uint8_t* data,
187
size_t dataSize,
188
const uint8_t* code,
189
size_t codeSize,
190
uint8_t*& result,
191
size_t& resultSize,
192
uint8_t*& resultCodeStart
193
)
194
{
195
CODEGEN_ASSERT(!FFlag::LuauCodegenFreeBlocks);
196
197
// 'Round up' to preserve code alignment
198
size_t alignedDataSize = (dataSize + (kCodeAlignment - 1)) & ~(kCodeAlignment - 1);
199
200
size_t totalSize = alignedDataSize + codeSize;
201
202
// Function has to fit into a single block with unwinding information
203
if (totalSize > blockSize - kMaxReservedDataSize)
204
return false;
205
206
size_t startOffset = 0;
207
208
// We might need a new block
209
if (totalSize > size_t(blockEnd - blockPos))
210
{
211
if (!allocateNewBlock(startOffset))
212
return false;
213
214
CODEGEN_ASSERT(totalSize <= size_t(blockEnd - blockPos));
215
}
216
217
CODEGEN_ASSERT((uintptr_t(blockPos) & (kPageSize - 1)) == 0); // Allocation starts on page boundary
218
219
size_t dataOffset = startOffset + alignedDataSize - dataSize;
220
size_t codeOffset = startOffset + alignedDataSize;
221
222
if (dataSize)
223
memcpy(blockPos + dataOffset, data, dataSize);
224
if (codeSize)
225
memcpy(blockPos + codeOffset, code, codeSize);
226
227
size_t pageAlignedSize = alignToPageSize(startOffset + totalSize);
228
229
if (!makePagesExecutable(blockPos, pageAlignedSize))
230
return false;
231
232
flushInstructionCache(blockPos + codeOffset, codeSize);
233
234
result = blockPos + startOffset;
235
resultSize = totalSize;
236
resultCodeStart = blockPos + codeOffset;
237
238
// Ensure that future allocations from the block start from a page boundary.
239
// This is important since we use W^X, and writing to the previous page would require briefly removing
240
// executable bit from it, which may result in access violations if that code is being executed concurrently.
241
if (pageAlignedSize <= size_t(blockEnd - blockPos))
242
{
243
blockPos += pageAlignedSize;
244
CODEGEN_ASSERT((uintptr_t(blockPos) & (kPageSize - 1)) == 0);
245
CODEGEN_ASSERT(blockPos <= blockEnd);
246
}
247
else
248
{
249
// Future allocations will need to allocate fresh blocks
250
blockPos = blockEnd;
251
}
252
253
return true;
254
}
255
256
CodeAllocationData CodeAllocator::allocate(const uint8_t* data, size_t dataSize, const uint8_t* code, size_t codeSize)
257
{
258
CODEGEN_ASSERT(FFlag::LuauCodegenFreeBlocks);
259
260
size_t startOffset = 0;
261
size_t codeOffset;
262
size_t dataOffset;
263
size_t pageAlignedSize;
264
size_t totalSize;
265
266
if (FFlag::LuauCodegenProtectData)
267
{
268
if (dataSize != 0)
269
{
270
// Data and code sections occupy separate page ranges so that data can be made read-only
271
// and code can be made executable independently. The code section starts on the first page
272
// boundary after the unwind info header and data.
273
274
// Function has to fit into a single block with unwinding information
275
if (alignToPageSize(kMaxReservedDataSize + dataSize) + codeSize > blockSize)
276
return {};
277
278
// We might need a new block
279
if (alignToPageSize(dataSize) + codeSize > size_t(blockEnd - blockPos))
280
{
281
if (!allocateNewBlock(startOffset))
282
return {};
283
284
CODEGEN_ASSERT(alignToPageSize(startOffset + dataSize) + codeSize <= size_t(blockEnd - blockPos));
285
}
286
287
codeOffset = alignToPageSize(startOffset + dataSize);
288
dataOffset = codeOffset - dataSize;
289
totalSize = alignToPageSize(dataSize) + codeSize;
290
pageAlignedSize = alignToPageSize(codeOffset + codeSize);
291
}
292
else
293
{
294
// No data to protect — code starts directly after the unwind info header
295
totalSize = codeSize;
296
297
if (totalSize > blockSize - kMaxReservedDataSize)
298
return {};
299
300
if (totalSize > size_t(blockEnd - blockPos))
301
{
302
if (!allocateNewBlock(startOffset))
303
return {};
304
305
CODEGEN_ASSERT(totalSize <= size_t(blockEnd - blockPos));
306
}
307
308
dataOffset = startOffset;
309
codeOffset = startOffset;
310
pageAlignedSize = alignToPageSize(startOffset + totalSize);
311
}
312
}
313
else
314
{
315
// 'Round up' to preserve code alignment
316
size_t alignedDataSize = (dataSize + (kCodeAlignment - 1)) & ~(kCodeAlignment - 1);
317
318
totalSize = alignedDataSize + codeSize;
319
320
// Function has to fit into a single block with unwinding information
321
if (totalSize > blockSize - kMaxReservedDataSize)
322
return {};
323
324
// We might need a new block
325
if (totalSize > size_t(blockEnd - blockPos))
326
{
327
if (!allocateNewBlock(startOffset))
328
return {};
329
330
CODEGEN_ASSERT(totalSize <= size_t(blockEnd - blockPos));
331
}
332
333
dataOffset = startOffset + alignedDataSize - dataSize;
334
codeOffset = startOffset + alignedDataSize;
335
pageAlignedSize = alignToPageSize(startOffset + totalSize);
336
}
337
338
CODEGEN_ASSERT((uintptr_t(blockPos) & (kPageSize - 1)) == 0); // Allocation starts on page boundary
339
340
if (dataSize != 0)
341
memcpy(blockPos + dataOffset, data, dataSize);
342
if (codeSize != 0)
343
memcpy(blockPos + codeOffset, code, codeSize);
344
345
if (FFlag::LuauCodegenProtectData)
346
{
347
if (dataSize != 0)
348
{
349
// Make data pages read-only and code pages executable independently
350
if (!makePagesReadOnly(blockPos, codeOffset))
351
return {};
352
if (!makePagesExecutable(blockPos + codeOffset, pageAlignedSize - codeOffset))
353
return {};
354
}
355
else
356
{
357
if (!makePagesExecutable(blockPos, pageAlignedSize))
358
return {};
359
}
360
}
361
else
362
{
363
if (!makePagesExecutable(blockPos, pageAlignedSize))
364
return {};
365
}
366
367
liveAllocations++;
368
369
flushInstructionCache(blockPos + codeOffset, codeSize);
370
371
CodeAllocationData result;
372
373
result.start = blockPos + startOffset;
374
result.size = totalSize;
375
result.codeStart = blockPos + codeOffset;
376
377
result.allocationStart = blockPos;
378
result.allocationSize = pageAlignedSize;
379
380
// Ensure that future allocations from the block start from a page boundary.
381
// This is important since we use W^X, and writing to the previous page would require briefly removing
382
// executable bit from it, which may result in access violations if that code is being executed concurrently.
383
if (pageAlignedSize <= size_t(blockEnd - blockPos))
384
{
385
blockPos += pageAlignedSize;
386
CODEGEN_ASSERT((uintptr_t(blockPos) & (kPageSize - 1)) == 0);
387
CODEGEN_ASSERT(blockPos <= blockEnd);
388
}
389
else
390
{
391
// Future allocations will need to allocate fresh blocks
392
blockPos = blockEnd;
393
}
394
395
return result;
396
}
397
398
void CodeAllocator::deallocate(CodeAllocationData codeAllocationData)
399
{
400
CODEGEN_ASSERT(FFlag::LuauCodegenFreeBlocks);
401
402
if (codeAllocationData.allocationStart == nullptr)
403
return;
404
405
[[maybe_unused]] bool result = makePagesNotExecutable(codeAllocationData.allocationStart, codeAllocationData.allocationSize);
406
CODEGEN_ASSERT(result);
407
408
CODEGEN_ASSERT(liveAllocations != 0);
409
liveAllocations--;
410
411
// TODO: new allocations should be able to reuse the freed pages (but note that first block page contains unwind data)
412
}
413
414
bool CodeAllocator::allocateNewBlock(size_t& unwindInfoSize)
415
{
416
// Stop allocating once we reach a global limit
417
if ((blocks.size() + 1) * blockSize > maxTotalSize)
418
return false;
419
420
uint8_t* block = allocatePages(blockSize);
421
422
if (!block)
423
return false;
424
425
blockPos = block;
426
blockEnd = block + blockSize;
427
428
blocks.push_back(block);
429
430
if (createBlockUnwindInfo)
431
{
432
void* unwindInfo = createBlockUnwindInfo(context, block, blockSize, unwindInfoSize);
433
434
// 'Round up' to preserve alignment of the following data and code
435
unwindInfoSize = (unwindInfoSize + (kCodeAlignment - 1)) & ~(kCodeAlignment - 1);
436
437
CODEGEN_ASSERT(unwindInfoSize <= kMaxReservedDataSize);
438
439
if (!unwindInfo)
440
return false;
441
442
unwindInfos.push_back(unwindInfo);
443
}
444
445
return true;
446
}
447
448
uint8_t* CodeAllocator::allocatePages(size_t size) const
449
{
450
const size_t pageAlignedSize = alignToPageSize(size);
451
452
uint8_t* const mem = allocatePagesImpl(pageAlignedSize);
453
if (mem == nullptr)
454
return nullptr;
455
456
if (allocationCallback)
457
allocationCallback(allocationCallbackContext, nullptr, 0, mem, pageAlignedSize);
458
459
return mem;
460
}
461
462
void CodeAllocator::freePages(uint8_t* mem, size_t size) const
463
{
464
const size_t pageAlignedSize = alignToPageSize(size);
465
466
if (allocationCallback)
467
allocationCallback(allocationCallbackContext, mem, pageAlignedSize, nullptr, 0);
468
469
freePagesImpl(mem, pageAlignedSize);
470
}
471
472
} // namespace CodeGen
473
} // namespace Luau
474
475