Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Roblox
GitHub Repository: Roblox/luau
Path: blob/master/CodeGen/src/CodeGenContext.cpp
2725 views
1
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
2
#include "CodeGenContext.h"
3
4
#include "CodeGenA64.h"
5
#include "CodeGenLower.h"
6
#include "CodeGenX64.h"
7
8
#include "Luau/CodeGenCommon.h"
9
#include "Luau/CodeBlockUnwind.h"
10
#include "Luau/UnwindBuilder.h"
11
#include "Luau/UnwindBuilderDwarf2.h"
12
#include "Luau/UnwindBuilderWin.h"
13
14
#include "lapi.h"
15
16
LUAU_FASTINTVARIABLE(LuauCodeGenBlockSize, 4 * 1024 * 1024)
17
LUAU_FASTINTVARIABLE(LuauCodeGenMaxTotalSize, 256 * 1024 * 1024)
18
LUAU_FASTFLAG(LuauCodegenFreeBlocks)
19
20
namespace Luau
21
{
22
namespace CodeGen
23
{
24
25
static const Instruction kCodeEntryInsn = LOP_NATIVECALL;
26
27
// From CodeGen.cpp
28
static void* gPerfLogContext = nullptr;
29
static PerfLogFn gPerfLogFn = nullptr;
30
31
unsigned int getCpuFeaturesA64();
32
unsigned int getCpuFeaturesX64();
33
34
void setPerfLog(void* context, PerfLogFn logFn)
35
{
36
gPerfLogContext = context;
37
gPerfLogFn = logFn;
38
}
39
40
static void logPerfFunction(Proto* p, uintptr_t addr, unsigned size)
41
{
42
CODEGEN_ASSERT(p->source);
43
44
const char* source = getstr(p->source);
45
source = (source[0] == '=' || source[0] == '@') ? source + 1 : "[string]";
46
47
char name[256];
48
snprintf(name, sizeof(name), "<luau> %s:%d %s", source, p->linedefined, p->debugname ? getstr(p->debugname) : "");
49
50
if (gPerfLogFn)
51
gPerfLogFn(gPerfLogContext, addr, size, name);
52
}
53
54
static void logPerfFunctions(
55
const std::vector<Proto*>& moduleProtos,
56
const uint8_t* nativeModuleBaseAddress,
57
const std::vector<NativeProtoExecDataPtr>& nativeProtos
58
)
59
{
60
if (gPerfLogFn == nullptr)
61
return;
62
63
if (nativeProtos.size() > 0)
64
gPerfLogFn(
65
gPerfLogContext,
66
uintptr_t(nativeModuleBaseAddress),
67
unsigned(getNativeProtoExecDataHeader(nativeProtos[0].get()).entryOffsetOrAddress - nativeModuleBaseAddress),
68
"<luau helpers>"
69
);
70
71
auto protoIt = moduleProtos.begin();
72
73
for (const NativeProtoExecDataPtr& nativeProto : nativeProtos)
74
{
75
const NativeProtoExecDataHeader& header = getNativeProtoExecDataHeader(nativeProto.get());
76
77
while (protoIt != moduleProtos.end() && uint32_t((**protoIt).bytecodeid) != header.bytecodeId)
78
{
79
++protoIt;
80
}
81
82
CODEGEN_ASSERT(protoIt != moduleProtos.end());
83
84
logPerfFunction(*protoIt, uintptr_t(header.entryOffsetOrAddress), uint32_t(header.nativeCodeSize));
85
}
86
}
87
88
// If Release is true, the native proto will be removed from the vector and
89
// ownership will be assigned to the Proto object (for use with the
90
// StandaloneCodeContext). If Release is false, the native proto will not be
91
// removed from the vector (for use with the SharedCodeContext).
92
template<bool Release, typename NativeProtosVector>
93
[[nodiscard]] static uint32_t bindNativeProtos(const std::vector<Proto*>& moduleProtos, NativeProtosVector& nativeProtos)
94
{
95
uint32_t protosBound = 0;
96
97
auto protoIt = moduleProtos.begin();
98
99
for (auto& nativeProto : nativeProtos)
100
{
101
const NativeProtoExecDataHeader& header = getNativeProtoExecDataHeader(nativeProto.get());
102
103
while (protoIt != moduleProtos.end() && uint32_t((**protoIt).bytecodeid) != header.bytecodeId)
104
{
105
++protoIt;
106
}
107
108
CODEGEN_ASSERT(protoIt != moduleProtos.end());
109
110
// The NativeProtoExecData is now owned by the VM and will be destroyed
111
// via onDestroyFunction.
112
Proto* proto = *protoIt;
113
114
if constexpr (Release)
115
{
116
proto->execdata = nativeProto.release();
117
}
118
else
119
{
120
proto->execdata = nativeProto.get();
121
}
122
123
proto->exectarget = reinterpret_cast<uintptr_t>(header.entryOffsetOrAddress);
124
proto->codeentry = &kCodeEntryInsn;
125
126
++protosBound;
127
}
128
129
return protosBound;
130
}
131
132
BaseCodeGenContext::BaseCodeGenContext(size_t blockSize, size_t maxTotalSize, AllocationCallback* allocationCallback, void* allocationCallbackContext)
133
: codeAllocator{blockSize, maxTotalSize, allocationCallback, allocationCallbackContext}
134
{
135
CODEGEN_ASSERT(isSupported());
136
137
#if defined(_WIN32)
138
unwindBuilder = std::make_unique<UnwindBuilderWin>();
139
#else
140
unwindBuilder = std::make_unique<UnwindBuilderDwarf2>();
141
#endif
142
143
codeAllocator.context = unwindBuilder.get();
144
codeAllocator.createBlockUnwindInfo = createBlockUnwindInfo;
145
codeAllocator.destroyBlockUnwindInfo = destroyBlockUnwindInfo;
146
147
initFunctions(context);
148
}
149
150
BaseCodeGenContext::~BaseCodeGenContext()
151
{
152
if (FFlag::LuauCodegenFreeBlocks)
153
codeAllocator.deallocate(gateAllocationData);
154
}
155
156
[[nodiscard]] bool BaseCodeGenContext::initHeaderFunctions()
157
{
158
#if defined(CODEGEN_TARGET_X64)
159
if (!X64::initHeaderFunctions(*this))
160
return false;
161
#elif defined(CODEGEN_TARGET_A64)
162
if (!A64::initHeaderFunctions(*this))
163
return false;
164
#endif
165
166
if (gPerfLogFn)
167
gPerfLogFn(gPerfLogContext, uintptr_t(context.gateEntry), 4096, "<luau gate>");
168
169
return true;
170
}
171
172
173
StandaloneCodeGenContext::StandaloneCodeGenContext(
174
size_t blockSize,
175
size_t maxTotalSize,
176
AllocationCallback* allocationCallback,
177
void* allocationCallbackContext
178
)
179
: BaseCodeGenContext{blockSize, maxTotalSize, allocationCallback, allocationCallbackContext}
180
, sharedAllocator{&codeAllocator}
181
{
182
}
183
184
[[nodiscard]] std::optional<ModuleBindResult> StandaloneCodeGenContext::tryBindExistingModule(const ModuleId&, const std::vector<Proto*>&)
185
{
186
// The StandaloneCodeGenContext does not support sharing of native code
187
return {};
188
}
189
190
[[nodiscard]] ModuleBindResult StandaloneCodeGenContext::bindModule(
191
const std::optional<ModuleId>&,
192
const std::vector<Proto*>& moduleProtos,
193
std::vector<NativeProtoExecDataPtr> nativeProtos,
194
const uint8_t* data,
195
size_t dataSize,
196
const uint8_t* code,
197
size_t codeSize
198
)
199
{
200
if (FFlag::LuauCodegenFreeBlocks)
201
{
202
NativeModuleRef moduleRef = sharedAllocator.insertAnonymousNativeModule(std::move(nativeProtos), data, dataSize, code, codeSize);
203
204
// If we did not get a NativeModule back, allocation failed:
205
if (moduleRef.empty())
206
return {CodeGenCompilationResult::AllocationFailed};
207
208
logPerfFunctions(moduleProtos, moduleRef->getModuleBaseAddress(), moduleRef->getNativeProtos());
209
210
// Bind the native protos and acquire an owning reference for each:
211
const uint32_t protosBound = bindNativeProtos<false>(moduleProtos, moduleRef->getNativeProtos());
212
moduleRef->addRefs(protosBound);
213
214
return {CodeGenCompilationResult::Success, protosBound};
215
}
216
else
217
{
218
uint8_t* nativeData = nullptr;
219
size_t sizeNativeData = 0;
220
uint8_t* codeStart = nullptr;
221
if (!codeAllocator.allocate_DEPRECATED(data, int(dataSize), code, int(codeSize), nativeData, sizeNativeData, codeStart))
222
{
223
return {CodeGenCompilationResult::AllocationFailed};
224
}
225
226
// Relocate the entry offsets to their final executable addresses:
227
for (const NativeProtoExecDataPtr& nativeProto : nativeProtos)
228
{
229
NativeProtoExecDataHeader& header = getNativeProtoExecDataHeader(nativeProto.get());
230
231
header.entryOffsetOrAddress = codeStart + reinterpret_cast<uintptr_t>(header.entryOffsetOrAddress);
232
}
233
234
logPerfFunctions(moduleProtos, codeStart, nativeProtos);
235
236
const uint32_t protosBound = bindNativeProtos<true>(moduleProtos, nativeProtos);
237
238
return {CodeGenCompilationResult::Success, protosBound};
239
}
240
}
241
242
void StandaloneCodeGenContext::onCloseState() noexcept
243
{
244
// The StandaloneCodeGenContext is owned by the one VM that owns it, so when
245
// that VM is destroyed, we destroy *this as well:
246
delete this;
247
}
248
249
void StandaloneCodeGenContext::onDestroyFunction(void* execdata) noexcept
250
{
251
if (FFlag::LuauCodegenFreeBlocks)
252
getNativeProtoExecDataHeader(static_cast<const uint32_t*>(execdata)).nativeModule->release();
253
else
254
destroyNativeProtoExecData(static_cast<uint32_t*>(execdata));
255
}
256
257
258
SharedCodeGenContext::SharedCodeGenContext(
259
size_t blockSize,
260
size_t maxTotalSize,
261
AllocationCallback* allocationCallback,
262
void* allocationCallbackContext
263
)
264
: BaseCodeGenContext{blockSize, maxTotalSize, allocationCallback, allocationCallbackContext}
265
, sharedAllocator{&codeAllocator}
266
{
267
}
268
269
[[nodiscard]] std::optional<ModuleBindResult> SharedCodeGenContext::tryBindExistingModule(
270
const ModuleId& moduleId,
271
const std::vector<Proto*>& moduleProtos
272
)
273
{
274
NativeModuleRef nativeModule = sharedAllocator.tryGetNativeModule(moduleId);
275
if (nativeModule.empty())
276
{
277
return {};
278
}
279
280
// Bind the native protos and acquire an owning reference for each:
281
const uint32_t protosBound = bindNativeProtos<false>(moduleProtos, nativeModule->getNativeProtos());
282
nativeModule->addRefs(protosBound);
283
284
return {{CodeGenCompilationResult::Success, protosBound}};
285
}
286
287
[[nodiscard]] ModuleBindResult SharedCodeGenContext::bindModule(
288
const std::optional<ModuleId>& moduleId,
289
const std::vector<Proto*>& moduleProtos,
290
std::vector<NativeProtoExecDataPtr> nativeProtos,
291
const uint8_t* data,
292
size_t dataSize,
293
const uint8_t* code,
294
size_t codeSize
295
)
296
{
297
const std::pair<NativeModuleRef, bool> insertionResult = [&]() -> std::pair<NativeModuleRef, bool>
298
{
299
if (moduleId.has_value())
300
{
301
return sharedAllocator.getOrInsertNativeModule(*moduleId, std::move(nativeProtos), data, dataSize, code, codeSize);
302
}
303
else
304
{
305
return {sharedAllocator.insertAnonymousNativeModule(std::move(nativeProtos), data, dataSize, code, codeSize), true};
306
}
307
}();
308
309
// If we did not get a NativeModule back, allocation failed:
310
if (insertionResult.first.empty())
311
return {CodeGenCompilationResult::AllocationFailed};
312
313
// If we allocated a new module, log the function code ranges for perf:
314
if (insertionResult.second)
315
logPerfFunctions(moduleProtos, insertionResult.first->getModuleBaseAddress(), insertionResult.first->getNativeProtos());
316
317
// Bind the native protos and acquire an owning reference for each:
318
const uint32_t protosBound = bindNativeProtos<false>(moduleProtos, insertionResult.first->getNativeProtos());
319
insertionResult.first->addRefs(protosBound);
320
321
return {CodeGenCompilationResult::Success, protosBound};
322
}
323
324
void SharedCodeGenContext::onCloseState() noexcept
325
{
326
// The lifetime of the SharedCodeGenContext is managed separately from the
327
// VMs that use it. When a VM is destroyed, we don't need to do anything
328
// here.
329
}
330
331
void SharedCodeGenContext::onDestroyFunction(void* execdata) noexcept
332
{
333
getNativeProtoExecDataHeader(static_cast<const uint32_t*>(execdata)).nativeModule->release();
334
}
335
336
337
[[nodiscard]] UniqueSharedCodeGenContext createSharedCodeGenContext()
338
{
339
return createSharedCodeGenContext(size_t(FInt::LuauCodeGenBlockSize), size_t(FInt::LuauCodeGenMaxTotalSize), nullptr, nullptr);
340
}
341
342
[[nodiscard]] UniqueSharedCodeGenContext createSharedCodeGenContext(AllocationCallback* allocationCallback, void* allocationCallbackContext)
343
{
344
return createSharedCodeGenContext(
345
size_t(FInt::LuauCodeGenBlockSize), size_t(FInt::LuauCodeGenMaxTotalSize), allocationCallback, allocationCallbackContext
346
);
347
}
348
349
[[nodiscard]] UniqueSharedCodeGenContext createSharedCodeGenContext(
350
size_t blockSize,
351
size_t maxTotalSize,
352
AllocationCallback* allocationCallback,
353
void* allocationCallbackContext
354
)
355
{
356
UniqueSharedCodeGenContext codeGenContext{new SharedCodeGenContext{blockSize, maxTotalSize, nullptr, nullptr}};
357
358
if (!codeGenContext->initHeaderFunctions())
359
return {};
360
361
return codeGenContext;
362
}
363
364
void destroySharedCodeGenContext(const SharedCodeGenContext* codeGenContext) noexcept
365
{
366
delete codeGenContext;
367
}
368
369
void SharedCodeGenContextDeleter::operator()(const SharedCodeGenContext* codeGenContext) const noexcept
370
{
371
destroySharedCodeGenContext(codeGenContext);
372
}
373
374
375
[[nodiscard]] static BaseCodeGenContext* getCodeGenContext(lua_State* L) noexcept
376
{
377
return static_cast<BaseCodeGenContext*>(L->global->ecb.context);
378
}
379
380
static void onCloseState(lua_State* L)
381
{
382
getCodeGenContext(L)->onCloseState();
383
L->global->ecb = lua_ExecutionCallbacks{};
384
}
385
386
static void onDestroyFunction(lua_State* L, Proto* proto)
387
{
388
getCodeGenContext(L)->onDestroyFunction(proto->execdata);
389
proto->execdata = nullptr;
390
proto->exectarget = 0;
391
proto->codeentry = proto->code;
392
}
393
394
static int onEnter(lua_State* L, Proto* proto)
395
{
396
BaseCodeGenContext* codeGenContext = getCodeGenContext(L);
397
398
CODEGEN_ASSERT(proto->execdata);
399
CODEGEN_ASSERT(L->ci->savedpc >= proto->code && L->ci->savedpc < proto->code + proto->sizecode);
400
401
uintptr_t target = proto->exectarget + static_cast<uint32_t*>(proto->execdata)[L->ci->savedpc - proto->code];
402
403
// Returns 1 to finish the function in the VM
404
return GateFn(codeGenContext->context.gateEntry)(L, proto, target, &codeGenContext->context);
405
}
406
407
static int onEnterDisabled(lua_State* L, Proto* proto)
408
{
409
// If the function wasn't entered natively, it cannot be resumed natively later
410
L->ci->flags &= ~LUA_CALLINFO_NATIVE;
411
412
return 1;
413
}
414
415
// Defined in CodeGen.cpp
416
void onDisable(lua_State* L, Proto* proto);
417
418
static size_t getMemorySize(lua_State* L, Proto* proto)
419
{
420
const NativeProtoExecDataHeader& execDataHeader = getNativeProtoExecDataHeader(static_cast<const uint32_t*>(proto->execdata));
421
422
const size_t execDataSize = sizeof(NativeProtoExecDataHeader) + execDataHeader.bytecodeInstructionCount * sizeof(Instruction);
423
424
// While execDataSize is exactly the size of the allocation we made and hold for 'execdata' field, the code size is approximate
425
// This is because code+data page is shared and owned by all Proto from a single module and each one can keep the whole region alive
426
// So individual Proto being freed by GC will not reflect memory use by native code correctly
427
return execDataSize + execDataHeader.nativeCodeSize;
428
}
429
430
static char* getCounterData(lua_State* L, Proto* proto, size_t* count)
431
{
432
CODEGEN_ASSERT(count != nullptr);
433
434
const NativeProtoExecDataHeader& execDataHeader = getNativeProtoExecDataHeader(static_cast<const uint32_t*>(proto->execdata));
435
436
*count = execDataHeader.extraDataCount / 4;
437
return reinterpret_cast<char*>(static_cast<uint32_t*>(proto->execdata) + proto->sizecode);
438
}
439
440
static void initializeExecutionCallbacks(lua_State* L, BaseCodeGenContext* codeGenContext) noexcept
441
{
442
CODEGEN_ASSERT(codeGenContext != nullptr);
443
444
lua_ExecutionCallbacks* ecb = &L->global->ecb;
445
446
ecb->context = codeGenContext;
447
ecb->close = onCloseState;
448
ecb->destroy = onDestroyFunction;
449
ecb->enter = onEnter;
450
ecb->disable = onDisable;
451
ecb->getmemorysize = getMemorySize;
452
ecb->getcounterdata = getCounterData;
453
}
454
455
void create(lua_State* L)
456
{
457
return create(L, size_t(FInt::LuauCodeGenBlockSize), size_t(FInt::LuauCodeGenMaxTotalSize), nullptr, nullptr);
458
}
459
460
void create(lua_State* L, AllocationCallback* allocationCallback, void* allocationCallbackContext)
461
{
462
return create(L, size_t(FInt::LuauCodeGenBlockSize), size_t(FInt::LuauCodeGenMaxTotalSize), allocationCallback, allocationCallbackContext);
463
}
464
465
void create(lua_State* L, size_t blockSize, size_t maxTotalSize, AllocationCallback* allocationCallback, void* allocationCallbackContext)
466
{
467
std::unique_ptr<StandaloneCodeGenContext> codeGenContext =
468
std::make_unique<StandaloneCodeGenContext>(blockSize, maxTotalSize, allocationCallback, allocationCallbackContext);
469
470
if (!codeGenContext->initHeaderFunctions())
471
return;
472
473
initializeExecutionCallbacks(L, codeGenContext.release());
474
}
475
476
void create(lua_State* L, SharedCodeGenContext* codeGenContext)
477
{
478
initializeExecutionCallbacks(L, codeGenContext);
479
}
480
481
[[nodiscard]] static NativeProtoExecDataPtr createNativeProtoExecData(Proto* proto, const IrBuilder& ir)
482
{
483
uint32_t extraDataCount = uint32_t(ir.function.extraNativeData.size());
484
485
NativeProtoExecDataPtr nativeExecData = createNativeProtoExecData(proto->sizecode, extraDataCount);
486
487
uint32_t instTarget = ir.function.entryLocation;
488
uint32_t unassignedOffset = ir.function.endLocation - instTarget;
489
490
for (int i = 0; i < proto->sizecode; ++i)
491
{
492
const BytecodeMapping& bcMapping = ir.function.bcMapping[i];
493
494
CODEGEN_ASSERT(bcMapping.asmLocation >= instTarget);
495
496
if (bcMapping.asmLocation != ~0u)
497
nativeExecData[i] = bcMapping.asmLocation - instTarget;
498
else
499
nativeExecData[i] = unassignedOffset;
500
}
501
502
// After the instruction offsets, custom native data is placed
503
for (uint32_t i = 0; i < extraDataCount; i++)
504
nativeExecData[proto->sizecode + i] = ir.function.extraNativeData[i];
505
506
// Set first instruction offset to 0 so that entering this function still
507
// executes any generated entry code.
508
nativeExecData[0] = 0;
509
510
NativeProtoExecDataHeader& header = getNativeProtoExecDataHeader(nativeExecData.get());
511
header.entryOffsetOrAddress = reinterpret_cast<const uint8_t*>(static_cast<uintptr_t>(instTarget));
512
header.bytecodeId = uint32_t(proto->bytecodeid);
513
header.bytecodeInstructionCount = proto->sizecode;
514
header.extraDataCount = extraDataCount;
515
516
return nativeExecData;
517
}
518
519
template<typename AssemblyBuilder>
520
[[nodiscard]] static NativeProtoExecDataPtr createNativeFunction(
521
AssemblyBuilder& build,
522
ModuleHelpers& helpers,
523
Proto* proto,
524
uint32_t& totalIrInstCount,
525
const CompilationOptions& options,
526
CodeGenCompilationResult& result
527
)
528
{
529
IrBuilder ir(options.hooks);
530
ir.buildFunctionIr(proto);
531
532
unsigned instCount = unsigned(ir.function.instructions.size());
533
534
if (totalIrInstCount + instCount >= unsigned(FInt::CodegenHeuristicsInstructionLimit.value))
535
{
536
result = CodeGenCompilationResult::CodeGenOverflowInstructionLimit;
537
return {};
538
}
539
540
totalIrInstCount += instCount;
541
542
AssemblyOptions assemblyOptions;
543
assemblyOptions.compilationOptions = options;
544
545
if (!lowerFunction(ir, build, helpers, proto, assemblyOptions, /* stats */ nullptr, result))
546
{
547
return {};
548
}
549
550
return createNativeProtoExecData(proto, ir);
551
}
552
553
[[nodiscard]] static CompilationResult compileInternal(
554
const std::optional<ModuleId>& moduleId,
555
lua_State* L,
556
int idx,
557
const CompilationOptions& options,
558
CompilationStats* stats
559
)
560
{
561
CODEGEN_ASSERT(lua_isLfunction(L, idx));
562
const TValue* func = luaA_toobject(L, idx);
563
564
Proto* root = clvalue(func)->l.p;
565
566
if ((options.flags & CodeGen_OnlyNativeModules) != 0 && (root->flags & LPF_NATIVE_MODULE) == 0 && (root->flags & LPF_NATIVE_FUNCTION) == 0)
567
return CompilationResult{CodeGenCompilationResult::NotNativeModule};
568
569
BaseCodeGenContext* codeGenContext = getCodeGenContext(L);
570
if (codeGenContext == nullptr)
571
return CompilationResult{CodeGenCompilationResult::CodeGenNotInitialized};
572
573
std::vector<Proto*> protos;
574
gatherFunctions(protos, root, options.flags, root->flags & LPF_NATIVE_FUNCTION);
575
576
// Skip protos that have been compiled during previous invocations of CodeGen::compile
577
protos.erase(
578
std::remove_if(
579
protos.begin(),
580
protos.end(),
581
[](Proto* p)
582
{
583
return p == nullptr || p->execdata != nullptr;
584
}
585
),
586
protos.end()
587
);
588
589
if (protos.empty())
590
return CompilationResult{CodeGenCompilationResult::NothingToCompile};
591
592
if (stats != nullptr)
593
stats->functionsTotal = uint32_t(protos.size());
594
595
if (moduleId.has_value())
596
{
597
if (std::optional<ModuleBindResult> existingModuleBindResult = codeGenContext->tryBindExistingModule(*moduleId, protos))
598
{
599
if (stats != nullptr)
600
stats->functionsBound = existingModuleBindResult->functionsBound;
601
602
return CompilationResult{existingModuleBindResult->compilationResult};
603
}
604
}
605
606
#if defined(CODEGEN_TARGET_A64)
607
static unsigned int cpuFeatures = getCpuFeaturesA64();
608
A64::AssemblyBuilderA64 build(/* logText= */ false, cpuFeatures);
609
#else
610
static unsigned int cpuFeatures = getCpuFeaturesX64();
611
X64::AssemblyBuilderX64 build(/* logText= */ false, cpuFeatures);
612
#endif
613
614
ModuleHelpers helpers;
615
#if defined(CODEGEN_TARGET_A64)
616
A64::assembleHelpers(build, helpers);
617
#else
618
X64::assembleHelpers(build, helpers);
619
#endif
620
621
CompilationResult compilationResult;
622
623
std::vector<NativeProtoExecDataPtr> nativeProtos;
624
nativeProtos.reserve(protos.size());
625
626
uint32_t totalIrInstCount = 0;
627
628
for (size_t i = 0; i != protos.size(); ++i)
629
{
630
CodeGenCompilationResult protoResult = CodeGenCompilationResult::Success;
631
632
NativeProtoExecDataPtr nativeExecData = createNativeFunction(build, helpers, protos[i], totalIrInstCount, options, protoResult);
633
if (nativeExecData != nullptr)
634
{
635
nativeProtos.push_back(std::move(nativeExecData));
636
}
637
else
638
{
639
compilationResult.protoFailures.push_back(
640
{protoResult, protos[i]->debugname ? getstr(protos[i]->debugname) : "", protos[i]->linedefined}
641
);
642
}
643
}
644
645
// Very large modules might result in overflowing a jump offset; in this
646
// case we currently abandon the entire module
647
if (!build.finalize())
648
{
649
compilationResult.result = CodeGenCompilationResult::CodeGenAssemblerFinalizationFailure;
650
return compilationResult;
651
}
652
653
// If no functions were assembled, we don't need to allocate/copy executable pages for helpers
654
if (nativeProtos.empty())
655
return compilationResult;
656
657
if (stats != nullptr)
658
{
659
for (const NativeProtoExecDataPtr& nativeExecData : nativeProtos)
660
{
661
NativeProtoExecDataHeader& header = getNativeProtoExecDataHeader(nativeExecData.get());
662
663
stats->bytecodeSizeBytes += header.bytecodeInstructionCount * sizeof(Instruction);
664
665
// Account for the native -> bytecode instruction offsets mapping:
666
stats->nativeMetadataSizeBytes += header.bytecodeInstructionCount * sizeof(uint32_t);
667
}
668
669
stats->functionsCompiled += uint32_t(nativeProtos.size());
670
stats->nativeCodeSizeBytes += build.code.size() * sizeof(build.code[0]);
671
stats->nativeDataSizeBytes += build.data.size();
672
}
673
674
for (size_t i = 0; i < nativeProtos.size(); ++i)
675
{
676
NativeProtoExecDataHeader& header = getNativeProtoExecDataHeader(nativeProtos[i].get());
677
678
uint32_t begin = uint32_t(reinterpret_cast<uintptr_t>(header.entryOffsetOrAddress));
679
uint32_t end = i + 1 < nativeProtos.size() ? uint32_t(uintptr_t(getNativeProtoExecDataHeader(nativeProtos[i + 1].get()).entryOffsetOrAddress))
680
: uint32_t(build.code.size() * sizeof(build.code[0]));
681
682
CODEGEN_ASSERT(begin < end);
683
684
header.nativeCodeSize = end - begin;
685
}
686
687
const ModuleBindResult bindResult = codeGenContext->bindModule(
688
moduleId,
689
protos,
690
std::move(nativeProtos),
691
reinterpret_cast<const uint8_t*>(build.data.data()),
692
build.data.size(),
693
reinterpret_cast<const uint8_t*>(build.code.data()),
694
build.code.size() * sizeof(build.code[0])
695
);
696
697
if (stats != nullptr)
698
stats->functionsBound = bindResult.functionsBound;
699
700
if (bindResult.compilationResult != CodeGenCompilationResult::Success)
701
compilationResult.result = bindResult.compilationResult;
702
703
return compilationResult;
704
}
705
706
CompilationResult compile(const ModuleId& moduleId, lua_State* L, int idx, const CompilationOptions& options, CompilationStats* stats)
707
{
708
return compileInternal(moduleId, L, idx, options, stats);
709
}
710
711
CompilationResult compile(lua_State* L, int idx, const CompilationOptions& options, CompilationStats* stats)
712
{
713
return compileInternal({}, L, idx, options, stats);
714
}
715
716
CompilationResult compile(lua_State* L, int idx, unsigned int flags, CompilationStats* stats)
717
{
718
return compileInternal({}, L, idx, CompilationOptions{flags}, stats);
719
}
720
721
CompilationResult compile(const ModuleId& moduleId, lua_State* L, int idx, unsigned int flags, CompilationStats* stats)
722
{
723
return compileInternal(moduleId, L, idx, CompilationOptions{flags}, stats);
724
}
725
726
[[nodiscard]] bool isNativeExecutionEnabled(lua_State* L)
727
{
728
return getCodeGenContext(L) != nullptr && L->global->ecb.enter == onEnter;
729
}
730
731
void setNativeExecutionEnabled(lua_State* L, bool enabled)
732
{
733
if (getCodeGenContext(L) != nullptr)
734
L->global->ecb.enter = enabled ? onEnter : onEnterDisabled;
735
}
736
737
void disableNativeExecutionForFunction(lua_State* L, const int level) noexcept
738
{
739
CODEGEN_ASSERT(unsigned(level) < unsigned(L->ci - L->base_ci));
740
741
const CallInfo* ci = L->ci - level;
742
const TValue* o = ci->func;
743
CODEGEN_ASSERT(ttisfunction(o));
744
745
Proto* proto = clvalue(o)->l.p;
746
CODEGEN_ASSERT(proto);
747
748
CODEGEN_ASSERT(proto->codeentry != proto->code);
749
onDestroyFunction(L, proto);
750
}
751
752
static uint8_t userdataRemapperWrap(lua_State* L, const char* str, size_t len)
753
{
754
if (BaseCodeGenContext* codegenCtx = getCodeGenContext(L))
755
{
756
uint8_t index = codegenCtx->userdataRemapper(codegenCtx->userdataRemappingContext, str, len);
757
758
if (index < (LBC_TYPE_TAGGED_USERDATA_END - LBC_TYPE_TAGGED_USERDATA_BASE))
759
return LBC_TYPE_TAGGED_USERDATA_BASE + index;
760
}
761
762
return LBC_TYPE_USERDATA;
763
}
764
765
void setUserdataRemapper(lua_State* L, void* context, UserdataRemapperCallback cb)
766
{
767
if (BaseCodeGenContext* codegenCtx = getCodeGenContext(L))
768
{
769
codegenCtx->userdataRemappingContext = context;
770
codegenCtx->userdataRemapper = cb;
771
772
L->global->ecb.gettypemapping = cb ? userdataRemapperWrap : nullptr;
773
}
774
}
775
776
} // namespace CodeGen
777
} // namespace Luau
778
779