Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Roblox
GitHub Repository: Roblox/luau
Path: blob/master/CodeGen/src/IrTranslation.cpp
2725 views
1
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
2
#include "IrTranslation.h"
3
4
#include "Luau/Bytecode.h"
5
#include "Luau/CodeGenOptions.h"
6
#include "Luau/IrBuilder.h"
7
#include "Luau/IrUtils.h"
8
9
#include "IrTranslateBuiltins.h"
10
11
#include "lobject.h"
12
#include "lstate.h"
13
#include "ltm.h"
14
15
LUAU_FASTFLAG(LuauCodegenBlockSafeEnv)
16
LUAU_FASTFLAG(LuauCodegenDseOnCondJump)
17
LUAU_FASTFLAG(LuauCodegenMarkDeadRegisters2)
18
19
namespace Luau
20
{
21
namespace CodeGen
22
{
23
24
// Helper to consistently define a switch to instruction fallback code
25
struct FallbackStreamScope
26
{
27
FallbackStreamScope(IrBuilder& build, IrOp fallback, IrOp next)
28
: build(build)
29
, next(next)
30
{
31
CODEGEN_ASSERT(fallback.kind == IrOpKind::Block);
32
CODEGEN_ASSERT(next.kind == IrOpKind::Block);
33
34
build.inst(IrCmd::JUMP, next);
35
build.beginBlock(fallback);
36
}
37
38
~FallbackStreamScope()
39
{
40
build.beginBlock(next);
41
}
42
43
IrBuilder& build;
44
IrOp next;
45
};
46
47
static IrOp getInitializedFallback(IrBuilder& build, IrOp& fallback, int pcpos)
48
{
49
if (fallback.kind == IrOpKind::None)
50
fallback = build.fallbackBlock(pcpos);
51
52
return fallback;
53
}
54
55
static IrOp loadDoubleOrConstant(IrBuilder& build, IrOp arg)
56
{
57
if (arg.kind == IrOpKind::VmConst)
58
{
59
CODEGEN_ASSERT(build.function.proto);
60
TValue protok = build.function.proto->k[vmConstOp(arg)];
61
62
CODEGEN_ASSERT(protok.tt == LUA_TNUMBER);
63
64
return build.constDouble(protok.value.n);
65
}
66
67
return build.inst(IrCmd::LOAD_DOUBLE, arg);
68
}
69
70
void translateInstLoadNil(IrBuilder& build, const Instruction* pc)
71
{
72
int ra = LUAU_INSN_A(*pc);
73
74
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNIL));
75
}
76
77
void translateInstLoadB(IrBuilder& build, const Instruction* pc, int pcpos)
78
{
79
int ra = LUAU_INSN_A(*pc);
80
81
build.inst(IrCmd::STORE_INT, build.vmReg(ra), build.constInt(LUAU_INSN_B(*pc)));
82
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TBOOLEAN));
83
84
if (int target = LUAU_INSN_C(*pc))
85
build.inst(IrCmd::JUMP, build.blockAtInst(pcpos + 1 + target));
86
}
87
88
void translateInstLoadN(IrBuilder& build, const Instruction* pc)
89
{
90
int ra = LUAU_INSN_A(*pc);
91
92
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), build.constDouble(double(LUAU_INSN_D(*pc))));
93
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
94
}
95
96
static void translateInstLoadConstant(IrBuilder& build, int ra, int k)
97
{
98
TValue protok = build.function.proto->k[k];
99
100
// Compiler only generates LOADK for source-level constants, so dynamic imports are not affected
101
if (protok.tt == LUA_TNIL)
102
{
103
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNIL));
104
}
105
else if (protok.tt == LUA_TBOOLEAN)
106
{
107
build.inst(IrCmd::STORE_INT, build.vmReg(ra), build.constInt(protok.value.b));
108
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TBOOLEAN));
109
}
110
else if (protok.tt == LUA_TNUMBER)
111
{
112
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), build.constDouble(protok.value.n));
113
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
114
}
115
else
116
{
117
// Tag could be LUA_TSTRING or LUA_TVECTOR; for TSTRING we could generate LOAD_POINTER/STORE_POINTER/STORE_TAG, but it's not profitable;
118
// however, it's still valuable to preserve the tag throughout the optimization pipeline to eliminate tag checks.
119
IrOp load = build.inst(IrCmd::LOAD_TVALUE, build.vmConst(k), build.constInt(0), build.constTag(protok.tt));
120
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), load);
121
}
122
}
123
124
void translateInstLoadK(IrBuilder& build, const Instruction* pc)
125
{
126
translateInstLoadConstant(build, LUAU_INSN_A(*pc), LUAU_INSN_D(*pc));
127
}
128
129
void translateInstLoadKX(IrBuilder& build, const Instruction* pc)
130
{
131
translateInstLoadConstant(build, LUAU_INSN_A(*pc), pc[1]);
132
}
133
134
void translateInstMove(IrBuilder& build, const Instruction* pc)
135
{
136
int ra = LUAU_INSN_A(*pc);
137
int rb = LUAU_INSN_B(*pc);
138
139
IrOp load = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(rb));
140
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), load);
141
}
142
143
void translateInstJump(IrBuilder& build, const Instruction* pc, int pcpos)
144
{
145
build.inst(IrCmd::JUMP, build.blockAtInst(pcpos + 1 + LUAU_INSN_D(*pc)));
146
}
147
148
void translateInstJumpBack(IrBuilder& build, const Instruction* pc, int pcpos)
149
{
150
build.inst(IrCmd::INTERRUPT, build.constUint(pcpos));
151
build.inst(IrCmd::JUMP, build.blockAtInst(pcpos + 1 + LUAU_INSN_D(*pc)));
152
}
153
154
void translateInstJumpIf(IrBuilder& build, const Instruction* pc, int pcpos, bool not_)
155
{
156
int ra = LUAU_INSN_A(*pc);
157
158
IrOp target = build.blockAtInst(pcpos + 1 + LUAU_INSN_D(*pc));
159
IrOp next = build.blockAtInst(pcpos + 1);
160
161
// TODO: falsy/truthy conditions should be deconstructed into more primitive operations
162
if (not_)
163
build.inst(IrCmd::JUMP_IF_FALSY, build.vmReg(ra), target, next);
164
else
165
build.inst(IrCmd::JUMP_IF_TRUTHY, build.vmReg(ra), target, next);
166
167
// Fallthrough in original bytecode is implicit, so we start next internal block here
168
if (build.isInternalBlock(next))
169
build.beginBlock(next);
170
}
171
172
void translateInstJumpIfEq(IrBuilder& build, const Instruction* pc, int pcpos, bool not_)
173
{
174
int ra = LUAU_INSN_A(*pc);
175
int rb = pc[1];
176
177
IrOp target = build.blockAtInst(pcpos + 1 + LUAU_INSN_D(*pc));
178
IrOp next = build.blockAtInst(pcpos + 2);
179
180
BytecodeTypes bcTypes = build.function.getBytecodeTypesAt(pcpos);
181
182
// fast-path: number (when both operands are expected to be a number or are unknown)
183
if (isExpectedOrUnknownBytecodeType(bcTypes.a, LBC_TYPE_NUMBER) && isExpectedOrUnknownBytecodeType(bcTypes.b, LBC_TYPE_NUMBER))
184
{
185
IrOp fallback = build.fallbackBlock(pcpos);
186
187
IrOp ta = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra));
188
build.inst(IrCmd::CHECK_TAG, ta, build.constTag(LUA_TNUMBER), fallback);
189
190
IrOp tb = build.inst(IrCmd::LOAD_TAG, build.vmReg(rb));
191
build.inst(IrCmd::CHECK_TAG, tb, build.constTag(LUA_TNUMBER), fallback);
192
193
IrOp va = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(ra));
194
IrOp vb = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(rb));
195
196
build.inst(IrCmd::JUMP_CMP_NUM, va, vb, build.cond(IrCondition::NotEqual), not_ ? target : next, not_ ? next : target);
197
198
build.beginBlock(fallback);
199
}
200
201
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
202
203
IrOp result = build.inst(IrCmd::CMP_ANY, build.vmReg(ra), build.vmReg(rb), build.cond(IrCondition::Equal));
204
build.inst(IrCmd::JUMP_CMP_INT, result, build.constInt(0), build.cond(IrCondition::Equal), not_ ? target : next, not_ ? next : target);
205
206
build.beginBlock(next);
207
}
208
209
void translateInstJumpIfEqShortcut(IrBuilder& build, const Instruction* pc, int pcpos, bool not_)
210
{
211
int rr = LUAU_INSN_A(pc[2]);
212
213
int ra = LUAU_INSN_A(*pc);
214
int rb = pc[1];
215
216
IrOp next = build.blockAtInst(pcpos + 4);
217
IrOp fallback;
218
219
BytecodeTypes bcTypes = build.function.getBytecodeTypesAt(pcpos);
220
221
// fast-path: number (when both operands are expected to be a number or are unknown)
222
if (isExpectedOrUnknownBytecodeType(bcTypes.a, LBC_TYPE_NUMBER) && isExpectedOrUnknownBytecodeType(bcTypes.b, LBC_TYPE_NUMBER))
223
{
224
IrOp ta = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra));
225
build.inst(
226
IrCmd::CHECK_TAG,
227
ta,
228
build.constTag(LUA_TNUMBER),
229
bcTypes.a == LBC_TYPE_NUMBER ? build.vmExit(pcpos) : getInitializedFallback(build, fallback, pcpos)
230
);
231
232
IrOp tb = build.inst(IrCmd::LOAD_TAG, build.vmReg(rb));
233
build.inst(
234
IrCmd::CHECK_TAG,
235
tb,
236
build.constTag(LUA_TNUMBER),
237
bcTypes.b == LBC_TYPE_NUMBER ? build.vmExit(pcpos) : getInitializedFallback(build, fallback, pcpos)
238
);
239
240
IrOp va = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(ra));
241
IrOp vb = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(rb));
242
243
IrOp result = build.inst(
244
IrCmd::CMP_SPLIT_TVALUE,
245
build.constTag(LUA_TNUMBER),
246
build.constTag(LUA_TNUMBER),
247
va,
248
vb,
249
build.cond(not_ ? IrCondition::NotEqual : IrCondition::Equal)
250
);
251
252
build.inst(IrCmd::STORE_INT, build.vmReg(rr), result);
253
build.inst(IrCmd::STORE_TAG, build.vmReg(rr), build.constTag(LUA_TBOOLEAN));
254
build.inst(IrCmd::JUMP, next);
255
256
// If we don't need a fallback, we are done
257
if (fallback.kind == IrOpKind::None)
258
return;
259
260
// Otherwise, start the fallback block
261
// Note that if the number fast-path is not taken at all code that would have been in the fallback is actually the main path
262
build.beginBlock(fallback);
263
}
264
265
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
266
267
IrOp result = build.inst(IrCmd::CMP_ANY, build.vmReg(ra), build.vmReg(rb), build.cond(IrCondition::Equal));
268
269
// CMP_ANY doesn't support NotEqual, but we can compute !result as 1-result
270
if (not_)
271
result = build.inst(IrCmd::SUB_INT, build.constInt(1), result);
272
273
build.inst(IrCmd::STORE_INT, build.vmReg(rr), result);
274
build.inst(IrCmd::STORE_TAG, build.vmReg(rr), build.constTag(LUA_TBOOLEAN));
275
build.inst(IrCmd::JUMP, next);
276
}
277
278
void translateInstJumpIfCond(IrBuilder& build, const Instruction* pc, int pcpos, IrCondition cond)
279
{
280
CODEGEN_ASSERT(cond != IrCondition::Equal && cond != IrCondition::NotEqual);
281
282
int ra = LUAU_INSN_A(*pc);
283
int rb = pc[1];
284
285
IrOp target = build.blockAtInst(pcpos + 1 + LUAU_INSN_D(*pc));
286
IrOp next = build.blockAtInst(pcpos + 2);
287
288
BytecodeTypes bcTypes = build.function.getBytecodeTypesAt(pcpos);
289
290
// fast-path: number (when both operands are expected to be a number or are unknown)
291
if (isExpectedOrUnknownBytecodeType(bcTypes.a, LBC_TYPE_NUMBER) && isExpectedOrUnknownBytecodeType(bcTypes.b, LBC_TYPE_NUMBER))
292
{
293
IrOp fallback = build.fallbackBlock(pcpos);
294
295
IrOp ta = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra));
296
build.inst(IrCmd::CHECK_TAG, ta, build.constTag(LUA_TNUMBER), fallback);
297
298
IrOp tb = build.inst(IrCmd::LOAD_TAG, build.vmReg(rb));
299
build.inst(IrCmd::CHECK_TAG, tb, build.constTag(LUA_TNUMBER), fallback);
300
301
IrOp va = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(ra));
302
IrOp vb = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(rb));
303
304
build.inst(IrCmd::JUMP_CMP_NUM, va, vb, build.cond(cond), target, next);
305
306
build.beginBlock(fallback);
307
}
308
309
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
310
311
bool reverse = false;
312
313
if (cond == IrCondition::NotLessEqual)
314
{
315
reverse = true;
316
cond = IrCondition::LessEqual;
317
}
318
else if (cond == IrCondition::NotLess)
319
{
320
reverse = true;
321
cond = IrCondition::Less;
322
}
323
else if (cond == IrCondition::NotEqual)
324
{
325
reverse = true;
326
cond = IrCondition::Equal;
327
}
328
329
IrOp result = build.inst(IrCmd::CMP_ANY, build.vmReg(ra), build.vmReg(rb), build.cond(cond));
330
build.inst(IrCmd::JUMP_CMP_INT, result, build.constInt(0), build.cond(IrCondition::Equal), reverse ? target : next, reverse ? next : target);
331
332
build.beginBlock(next);
333
}
334
335
void translateInstJumpX(IrBuilder& build, const Instruction* pc, int pcpos)
336
{
337
build.inst(IrCmd::INTERRUPT, build.constUint(pcpos));
338
build.inst(IrCmd::JUMP, build.blockAtInst(pcpos + 1 + LUAU_INSN_E(*pc)));
339
}
340
341
void translateInstJumpxEqNil(IrBuilder& build, const Instruction* pc, int pcpos)
342
{
343
int ra = LUAU_INSN_A(*pc);
344
bool not_ = (pc[1] & 0x80000000) != 0;
345
346
IrOp target = build.blockAtInst(pcpos + 1 + LUAU_INSN_D(*pc));
347
IrOp next = build.blockAtInst(pcpos + 2);
348
349
IrOp ta = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra));
350
build.inst(IrCmd::JUMP_EQ_TAG, ta, build.constTag(LUA_TNIL), not_ ? next : target, not_ ? target : next);
351
352
// Fallthrough in original bytecode is implicit, so we start next internal block here
353
if (build.isInternalBlock(next))
354
build.beginBlock(next);
355
}
356
357
void translateInstJumpxEqNilShortcut(IrBuilder& build, const Instruction* pc, int pcpos)
358
{
359
int rr = LUAU_INSN_A(pc[2]);
360
361
int ra = LUAU_INSN_A(*pc);
362
uint32_t aux = pc[1];
363
bool not_ = LUAU_INSN_AUX_NOT(aux) != 0;
364
365
IrOp next = build.blockAtInst(pcpos + 4);
366
367
IrOp ta = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra));
368
369
IrOp result = build.inst(IrCmd::CMP_TAG, ta, build.constTag(LUA_TNIL), build.cond(not_ ? IrCondition::NotEqual : IrCondition::Equal));
370
371
build.inst(IrCmd::STORE_TAG, build.vmReg(rr), build.constTag(LUA_TBOOLEAN));
372
build.inst(IrCmd::STORE_INT, build.vmReg(rr), result);
373
build.inst(IrCmd::JUMP, next);
374
375
// Fallthrough in original bytecode is implicit, so we start next internal block here
376
if (build.isInternalBlock(next))
377
build.beginBlock(next);
378
}
379
380
void translateInstJumpxEqB(IrBuilder& build, const Instruction* pc, int pcpos)
381
{
382
int ra = LUAU_INSN_A(*pc);
383
uint32_t aux = pc[1];
384
bool not_ = LUAU_INSN_AUX_NOT(aux) != 0;
385
386
IrOp target = build.blockAtInst(pcpos + 1 + LUAU_INSN_D(*pc));
387
IrOp next = build.blockAtInst(pcpos + 2);
388
IrOp checkValue = build.block(IrBlockKind::Internal);
389
390
IrOp ta = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra));
391
392
build.inst(IrCmd::JUMP_EQ_TAG, ta, build.constTag(LUA_TBOOLEAN), checkValue, not_ ? target : next);
393
394
build.beginBlock(checkValue);
395
IrOp va = build.inst(IrCmd::LOAD_INT, build.vmReg(ra));
396
397
build.inst(
398
IrCmd::JUMP_CMP_INT, va, build.constInt(LUAU_INSN_AUX_KB(aux)), build.cond(IrCondition::Equal), not_ ? next : target, not_ ? target : next
399
);
400
401
// Fallthrough in original bytecode is implicit, so we start next internal block here
402
if (build.isInternalBlock(next))
403
build.beginBlock(next);
404
}
405
406
void translateInstJumpxEqBShortcut(IrBuilder& build, const Instruction* pc, int pcpos)
407
{
408
int rr = LUAU_INSN_A(pc[2]);
409
410
int ra = LUAU_INSN_A(*pc);
411
uint32_t aux = pc[1];
412
bool not_ = LUAU_INSN_AUX_NOT(aux) != 0;
413
414
IrOp next = build.blockAtInst(pcpos + 4);
415
416
IrOp ta = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra));
417
IrOp va = build.inst(IrCmd::LOAD_INT, build.vmReg(ra));
418
IrOp vb = build.constInt(LUAU_INSN_AUX_KB(aux));
419
420
IrOp result =
421
build.inst(IrCmd::CMP_SPLIT_TVALUE, ta, build.constTag(LUA_TBOOLEAN), va, vb, build.cond(not_ ? IrCondition::NotEqual : IrCondition::Equal));
422
423
build.inst(IrCmd::STORE_TAG, build.vmReg(rr), build.constTag(LUA_TBOOLEAN));
424
build.inst(IrCmd::STORE_INT, build.vmReg(rr), result);
425
build.inst(IrCmd::JUMP, next);
426
427
// Fallthrough in original bytecode is implicit, so we start next internal block here
428
if (build.isInternalBlock(next))
429
build.beginBlock(next);
430
}
431
432
void translateInstJumpxEqN(IrBuilder& build, const Instruction* pc, int pcpos)
433
{
434
int ra = LUAU_INSN_A(*pc);
435
uint32_t aux = pc[1];
436
bool not_ = LUAU_INSN_AUX_NOT(aux) != 0;
437
438
IrOp target = build.blockAtInst(pcpos + 1 + LUAU_INSN_D(*pc));
439
IrOp next = build.blockAtInst(pcpos + 2);
440
IrOp checkValue = build.block(IrBlockKind::Internal);
441
442
IrOp ta = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra));
443
444
build.inst(IrCmd::JUMP_EQ_TAG, ta, build.constTag(LUA_TNUMBER), checkValue, not_ ? target : next);
445
446
build.beginBlock(checkValue);
447
IrOp va = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(ra));
448
449
CODEGEN_ASSERT(build.function.proto);
450
TValue protok = build.function.proto->k[LUAU_INSN_AUX_KV(aux)];
451
452
CODEGEN_ASSERT(protok.tt == LUA_TNUMBER);
453
IrOp vb = build.constDouble(protok.value.n);
454
455
build.inst(IrCmd::JUMP_CMP_NUM, va, vb, build.cond(IrCondition::NotEqual), not_ ? target : next, not_ ? next : target);
456
457
// Fallthrough in original bytecode is implicit, so we start next internal block here
458
if (build.isInternalBlock(next))
459
build.beginBlock(next);
460
}
461
462
void translateInstJumpxEqNShortcut(IrBuilder& build, const Instruction* pc, int pcpos)
463
{
464
int rr = LUAU_INSN_A(pc[2]);
465
466
int ra = LUAU_INSN_A(*pc);
467
uint32_t aux = pc[1];
468
bool not_ = LUAU_INSN_AUX_NOT(aux) != 0;
469
470
IrOp next = build.blockAtInst(pcpos + 4);
471
472
IrOp ta = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra));
473
IrOp va = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(ra));
474
475
CODEGEN_ASSERT(build.function.proto);
476
TValue protok = build.function.proto->k[LUAU_INSN_AUX_KV(aux)];
477
478
CODEGEN_ASSERT(protok.tt == LUA_TNUMBER);
479
IrOp vb = build.constDouble(protok.value.n);
480
481
IrOp result =
482
build.inst(IrCmd::CMP_SPLIT_TVALUE, ta, build.constTag(LUA_TNUMBER), va, vb, build.cond(not_ ? IrCondition::NotEqual : IrCondition::Equal));
483
484
build.inst(IrCmd::STORE_TAG, build.vmReg(rr), build.constTag(LUA_TBOOLEAN));
485
build.inst(IrCmd::STORE_INT, build.vmReg(rr), result);
486
build.inst(IrCmd::JUMP, next);
487
488
// Fallthrough in original bytecode is implicit, so we start next internal block here
489
if (build.isInternalBlock(next))
490
build.beginBlock(next);
491
}
492
493
void translateInstJumpxEqS(IrBuilder& build, const Instruction* pc, int pcpos)
494
{
495
int ra = LUAU_INSN_A(*pc);
496
uint32_t aux = pc[1];
497
bool not_ = LUAU_INSN_AUX_NOT(aux) != 0;
498
499
IrOp target = build.blockAtInst(pcpos + 1 + LUAU_INSN_D(*pc));
500
IrOp next = build.blockAtInst(pcpos + 2);
501
IrOp checkValue = build.block(IrBlockKind::Internal);
502
503
IrOp ta = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra));
504
build.inst(IrCmd::JUMP_EQ_TAG, ta, build.constTag(LUA_TSTRING), checkValue, not_ ? target : next);
505
506
build.beginBlock(checkValue);
507
IrOp va = build.inst(IrCmd::LOAD_POINTER, build.vmReg(ra));
508
IrOp vb = build.inst(IrCmd::LOAD_POINTER, build.vmConst(LUAU_INSN_AUX_KV(aux)));
509
510
build.inst(IrCmd::JUMP_EQ_POINTER, va, vb, not_ ? next : target, not_ ? target : next);
511
512
// Fallthrough in original bytecode is implicit, so we start next internal block here
513
if (build.isInternalBlock(next))
514
build.beginBlock(next);
515
}
516
517
void translateInstJumpxEqSShortcut(IrBuilder& build, const Instruction* pc, int pcpos)
518
{
519
int rr = LUAU_INSN_A(pc[2]);
520
521
int ra = LUAU_INSN_A(*pc);
522
uint32_t aux = pc[1];
523
bool not_ = LUAU_INSN_AUX_NOT(aux) != 0;
524
525
IrOp next = build.blockAtInst(pcpos + 4);
526
527
IrOp ta = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra));
528
IrOp va = build.inst(IrCmd::LOAD_POINTER, build.vmReg(ra));
529
IrOp vb = build.inst(IrCmd::LOAD_POINTER, build.vmConst(LUAU_INSN_AUX_KV(aux)));
530
531
IrOp result =
532
build.inst(IrCmd::CMP_SPLIT_TVALUE, ta, build.constTag(LUA_TSTRING), va, vb, build.cond(not_ ? IrCondition::NotEqual : IrCondition::Equal));
533
534
build.inst(IrCmd::STORE_TAG, build.vmReg(rr), build.constTag(LUA_TBOOLEAN));
535
build.inst(IrCmd::STORE_INT, build.vmReg(rr), result);
536
build.inst(IrCmd::JUMP, next);
537
538
// Fallthrough in original bytecode is implicit, so we start next internal block here
539
if (build.isInternalBlock(next))
540
build.beginBlock(next);
541
}
542
543
static void translateBinaryNumericFallbackIfRequired(IrBuilder& build, IrOp fallback, int ra, IrOp opb, IrOp opc, TMS tm, int pcpos)
544
{
545
if (fallback.kind != IrOpKind::None)
546
{
547
IrOp next = build.blockAtInst(pcpos + 1);
548
FallbackStreamScope scope(build, fallback, next);
549
550
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
551
build.inst(IrCmd::DO_ARITH, build.vmReg(ra), opb, opc, build.constInt(tm));
552
build.inst(IrCmd::JUMP, next);
553
}
554
}
555
556
static void translateInstBinaryNumeric(IrBuilder& build, int ra, int rb, int rc, IrOp opb, IrOp opc, int pcpos, TMS tm)
557
{
558
IrOp fallback;
559
560
BytecodeTypes bcTypes = build.function.getBytecodeTypesAt(pcpos);
561
562
// Special fast-paths for vectors, matching the cases we have in VM
563
if (bcTypes.a == LBC_TYPE_VECTOR && bcTypes.b == LBC_TYPE_VECTOR &&
564
(tm == TM_ADD || tm == TM_SUB || tm == TM_MUL || tm == TM_DIV || tm == TM_IDIV))
565
{
566
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rb)), build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
567
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rc)), build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
568
569
IrOp vb = build.inst(IrCmd::LOAD_TVALUE, opb);
570
IrOp vc = build.inst(IrCmd::LOAD_TVALUE, opc);
571
IrOp result;
572
573
switch (tm)
574
{
575
case TM_ADD:
576
result = build.inst(IrCmd::ADD_VEC, vb, vc);
577
break;
578
case TM_SUB:
579
result = build.inst(IrCmd::SUB_VEC, vb, vc);
580
break;
581
case TM_MUL:
582
result = build.inst(IrCmd::MUL_VEC, vb, vc);
583
break;
584
case TM_DIV:
585
result = build.inst(IrCmd::DIV_VEC, vb, vc);
586
break;
587
case TM_IDIV:
588
result = build.inst(IrCmd::IDIV_VEC, vb, vc);
589
break;
590
default:
591
CODEGEN_ASSERT(!"Unknown TM op");
592
}
593
594
result = build.inst(IrCmd::TAG_VECTOR, result);
595
596
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), result);
597
return;
598
}
599
else if (!isUserdataBytecodeType(bcTypes.a) && bcTypes.b == LBC_TYPE_VECTOR && (tm == TM_MUL || tm == TM_DIV || tm == TM_IDIV))
600
{
601
if (rb != -1)
602
{
603
build.inst(
604
IrCmd::CHECK_TAG,
605
build.inst(IrCmd::LOAD_TAG, build.vmReg(rb)),
606
build.constTag(LUA_TNUMBER),
607
bcTypes.a == LBC_TYPE_NUMBER ? build.vmExit(pcpos) : getInitializedFallback(build, fallback, pcpos)
608
);
609
}
610
611
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rc)), build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
612
613
IrOp vb = build.inst(IrCmd::FLOAT_TO_VEC, build.inst(IrCmd::NUM_TO_FLOAT, loadDoubleOrConstant(build, opb)));
614
IrOp vc = build.inst(IrCmd::LOAD_TVALUE, opc);
615
IrOp result;
616
617
switch (tm)
618
{
619
case TM_MUL:
620
result = build.inst(IrCmd::MUL_VEC, vb, vc);
621
break;
622
case TM_DIV:
623
result = build.inst(IrCmd::DIV_VEC, vb, vc);
624
break;
625
case TM_IDIV:
626
result = build.inst(IrCmd::IDIV_VEC, vb, vc);
627
break;
628
default:
629
CODEGEN_ASSERT(!"Unknown TM op");
630
}
631
632
result = build.inst(IrCmd::TAG_VECTOR, result);
633
634
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), result);
635
636
translateBinaryNumericFallbackIfRequired(build, fallback, ra, opb, opc, tm, pcpos);
637
return;
638
}
639
else if (bcTypes.a == LBC_TYPE_VECTOR && !isUserdataBytecodeType(bcTypes.b) && (tm == TM_MUL || tm == TM_DIV || tm == TM_IDIV))
640
{
641
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rb)), build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
642
643
if (rc != -1)
644
{
645
build.inst(
646
IrCmd::CHECK_TAG,
647
build.inst(IrCmd::LOAD_TAG, build.vmReg(rc)),
648
build.constTag(LUA_TNUMBER),
649
bcTypes.b == LBC_TYPE_NUMBER ? build.vmExit(pcpos) : getInitializedFallback(build, fallback, pcpos)
650
);
651
}
652
653
IrOp vb = build.inst(IrCmd::LOAD_TVALUE, opb);
654
IrOp vc = build.inst(IrCmd::FLOAT_TO_VEC, build.inst(IrCmd::NUM_TO_FLOAT, loadDoubleOrConstant(build, opc)));
655
IrOp result;
656
657
switch (tm)
658
{
659
case TM_MUL:
660
result = build.inst(IrCmd::MUL_VEC, vb, vc);
661
break;
662
case TM_DIV:
663
result = build.inst(IrCmd::DIV_VEC, vb, vc);
664
break;
665
case TM_IDIV:
666
result = build.inst(IrCmd::IDIV_VEC, vb, vc);
667
break;
668
default:
669
CODEGEN_ASSERT(!"Unknown TM op");
670
}
671
672
result = build.inst(IrCmd::TAG_VECTOR, result);
673
674
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), result);
675
676
translateBinaryNumericFallbackIfRequired(build, fallback, ra, opb, opc, tm, pcpos);
677
return;
678
}
679
680
if (isUserdataBytecodeType(bcTypes.a) || isUserdataBytecodeType(bcTypes.b))
681
{
682
if (build.hostHooks.userdataMetamethod &&
683
build.hostHooks.userdataMetamethod(build, bcTypes.a, bcTypes.b, ra, opb, opc, tmToHostMetamethod(tm), pcpos))
684
return;
685
686
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
687
build.inst(IrCmd::DO_ARITH, build.vmReg(ra), opb, opc, build.constInt(tm));
688
return;
689
}
690
691
// fast-path: number
692
if (rb != -1)
693
{
694
IrOp tb = build.inst(IrCmd::LOAD_TAG, build.vmReg(rb));
695
build.inst(
696
IrCmd::CHECK_TAG,
697
tb,
698
build.constTag(LUA_TNUMBER),
699
bcTypes.a == LBC_TYPE_NUMBER ? build.vmExit(pcpos) : getInitializedFallback(build, fallback, pcpos)
700
);
701
}
702
703
if (rc != -1 && rc != rb)
704
{
705
IrOp tc = build.inst(IrCmd::LOAD_TAG, build.vmReg(rc));
706
build.inst(
707
IrCmd::CHECK_TAG,
708
tc,
709
build.constTag(LUA_TNUMBER),
710
bcTypes.b == LBC_TYPE_NUMBER ? build.vmExit(pcpos) : getInitializedFallback(build, fallback, pcpos)
711
);
712
}
713
714
IrOp vb = loadDoubleOrConstant(build, opb);
715
IrOp vc;
716
IrOp result;
717
718
if (opc.kind == IrOpKind::VmConst)
719
{
720
CODEGEN_ASSERT(build.function.proto);
721
TValue protok = build.function.proto->k[vmConstOp(opc)];
722
723
CODEGEN_ASSERT(protok.tt == LUA_TNUMBER);
724
725
// VM has special cases for exponentiation with constants
726
if (tm == TM_POW && protok.value.n == 0.5)
727
result = build.inst(IrCmd::SQRT_NUM, vb);
728
else if (tm == TM_POW && protok.value.n == 2.0)
729
result = build.inst(IrCmd::MUL_NUM, vb, vb);
730
else if (tm == TM_POW && protok.value.n == 3.0)
731
result = build.inst(IrCmd::MUL_NUM, vb, build.inst(IrCmd::MUL_NUM, vb, vb));
732
else
733
vc = build.constDouble(protok.value.n);
734
}
735
else
736
{
737
vc = build.inst(IrCmd::LOAD_DOUBLE, opc);
738
}
739
740
if (result.kind == IrOpKind::None)
741
{
742
CODEGEN_ASSERT(vc.kind != IrOpKind::None);
743
744
switch (tm)
745
{
746
case TM_ADD:
747
result = build.inst(IrCmd::ADD_NUM, vb, vc);
748
break;
749
case TM_SUB:
750
result = build.inst(IrCmd::SUB_NUM, vb, vc);
751
break;
752
case TM_MUL:
753
result = build.inst(IrCmd::MUL_NUM, vb, vc);
754
break;
755
case TM_DIV:
756
result = build.inst(IrCmd::DIV_NUM, vb, vc);
757
break;
758
case TM_IDIV:
759
result = build.inst(IrCmd::IDIV_NUM, vb, vc);
760
break;
761
case TM_MOD:
762
result = build.inst(IrCmd::MOD_NUM, vb, vc);
763
break;
764
case TM_POW:
765
result = build.inst(IrCmd::INVOKE_LIBM, build.constUint(LBF_MATH_POW), vb, vc);
766
break;
767
default:
768
CODEGEN_ASSERT(!"Unsupported binary op");
769
}
770
}
771
772
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), result);
773
774
if (ra != rb && ra != rc) // TODO: optimization should handle second check, but we'll test this later
775
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
776
777
translateBinaryNumericFallbackIfRequired(build, fallback, ra, opb, opc, tm, pcpos);
778
}
779
780
void translateInstBinary(IrBuilder& build, const Instruction* pc, int pcpos, TMS tm)
781
{
782
translateInstBinaryNumeric(
783
build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), LUAU_INSN_C(*pc), build.vmReg(LUAU_INSN_B(*pc)), build.vmReg(LUAU_INSN_C(*pc)), pcpos, tm
784
);
785
}
786
787
void translateInstBinaryK(IrBuilder& build, const Instruction* pc, int pcpos, TMS tm)
788
{
789
translateInstBinaryNumeric(
790
build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), -1, build.vmReg(LUAU_INSN_B(*pc)), build.vmConst(LUAU_INSN_C(*pc)), pcpos, tm
791
);
792
}
793
794
void translateInstBinaryRK(IrBuilder& build, const Instruction* pc, int pcpos, TMS tm)
795
{
796
translateInstBinaryNumeric(
797
build, LUAU_INSN_A(*pc), -1, LUAU_INSN_C(*pc), build.vmConst(LUAU_INSN_B(*pc)), build.vmReg(LUAU_INSN_C(*pc)), pcpos, tm
798
);
799
}
800
801
void translateInstNot(IrBuilder& build, const Instruction* pc)
802
{
803
int ra = LUAU_INSN_A(*pc);
804
int rb = LUAU_INSN_B(*pc);
805
806
IrOp tb = build.inst(IrCmd::LOAD_TAG, build.vmReg(rb));
807
IrOp vb = build.inst(IrCmd::LOAD_INT, build.vmReg(rb));
808
809
IrOp va = build.inst(IrCmd::NOT_ANY, tb, vb);
810
811
build.inst(IrCmd::STORE_INT, build.vmReg(ra), va);
812
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TBOOLEAN));
813
}
814
815
void translateInstMinus(IrBuilder& build, const Instruction* pc, int pcpos)
816
{
817
BytecodeTypes bcTypes = build.function.getBytecodeTypesAt(pcpos);
818
819
int ra = LUAU_INSN_A(*pc);
820
int rb = LUAU_INSN_B(*pc);
821
822
if (bcTypes.a == LBC_TYPE_VECTOR)
823
{
824
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rb)), build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
825
826
IrOp vb = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(rb));
827
IrOp va = build.inst(IrCmd::UNM_VEC, vb);
828
va = build.inst(IrCmd::TAG_VECTOR, va);
829
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), va);
830
return;
831
}
832
833
if (isUserdataBytecodeType(bcTypes.a))
834
{
835
if (build.hostHooks.userdataMetamethod &&
836
build.hostHooks.userdataMetamethod(build, bcTypes.a, bcTypes.b, ra, build.vmReg(rb), {}, tmToHostMetamethod(TM_UNM), pcpos))
837
return;
838
839
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
840
build.inst(IrCmd::DO_ARITH, build.vmReg(ra), build.vmReg(rb), build.vmReg(rb), build.constInt(TM_UNM));
841
return;
842
}
843
844
IrOp fallback;
845
846
IrOp tb = build.inst(IrCmd::LOAD_TAG, build.vmReg(rb));
847
build.inst(
848
IrCmd::CHECK_TAG,
849
tb,
850
build.constTag(LUA_TNUMBER),
851
bcTypes.a == LBC_TYPE_NUMBER ? build.vmExit(pcpos) : getInitializedFallback(build, fallback, pcpos)
852
);
853
854
// fast-path: number
855
IrOp vb = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(rb));
856
IrOp va = build.inst(IrCmd::UNM_NUM, vb);
857
858
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), va);
859
860
if (ra != rb)
861
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
862
863
if (fallback.kind != IrOpKind::None)
864
{
865
IrOp next = build.blockAtInst(pcpos + 1);
866
FallbackStreamScope scope(build, fallback, next);
867
868
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
869
build.inst(IrCmd::DO_ARITH, build.vmReg(ra), build.vmReg(rb), build.vmReg(rb), build.constInt(TM_UNM));
870
build.inst(IrCmd::JUMP, next);
871
}
872
}
873
874
void translateInstLength(IrBuilder& build, const Instruction* pc, int pcpos)
875
{
876
BytecodeTypes bcTypes = build.function.getBytecodeTypesAt(pcpos);
877
878
int ra = LUAU_INSN_A(*pc);
879
int rb = LUAU_INSN_B(*pc);
880
881
if (isUserdataBytecodeType(bcTypes.a))
882
{
883
if (build.hostHooks.userdataMetamethod &&
884
build.hostHooks.userdataMetamethod(build, bcTypes.a, bcTypes.b, ra, build.vmReg(rb), {}, tmToHostMetamethod(TM_LEN), pcpos))
885
return;
886
887
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
888
build.inst(IrCmd::DO_LEN, build.vmReg(ra), build.vmReg(rb));
889
return;
890
}
891
892
IrOp fallback = build.fallbackBlock(pcpos);
893
894
IrOp tb = build.inst(IrCmd::LOAD_TAG, build.vmReg(rb));
895
build.inst(IrCmd::CHECK_TAG, tb, build.constTag(LUA_TTABLE), bcTypes.a == LBC_TYPE_TABLE ? build.vmExit(pcpos) : fallback);
896
897
// fast-path: table without __len
898
IrOp vb = build.inst(IrCmd::LOAD_POINTER, build.vmReg(rb));
899
build.inst(IrCmd::CHECK_NO_METATABLE, vb, fallback);
900
901
IrOp va = build.inst(IrCmd::TABLE_LEN, vb);
902
IrOp vai = build.inst(IrCmd::INT_TO_NUM, va);
903
904
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), vai);
905
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
906
907
IrOp next = build.blockAtInst(pcpos + 1);
908
FallbackStreamScope scope(build, fallback, next);
909
910
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
911
build.inst(IrCmd::DO_LEN, build.vmReg(ra), build.vmReg(rb));
912
build.inst(IrCmd::JUMP, next);
913
}
914
915
void translateInstNewTable(IrBuilder& build, const Instruction* pc, int pcpos)
916
{
917
int ra = LUAU_INSN_A(*pc);
918
int b = LUAU_INSN_B(*pc);
919
uint32_t aux = pc[1];
920
921
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
922
923
IrOp va = build.inst(IrCmd::NEW_TABLE, build.constUint(aux), build.constUint(b == 0 ? 0 : 1 << (b - 1)));
924
build.inst(IrCmd::STORE_POINTER, build.vmReg(ra), va);
925
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TTABLE));
926
927
build.inst(IrCmd::CHECK_GC);
928
}
929
930
void translateInstDupTable(IrBuilder& build, const Instruction* pc, int pcpos)
931
{
932
int ra = LUAU_INSN_A(*pc);
933
int k = LUAU_INSN_D(*pc);
934
935
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
936
937
IrOp table = build.inst(IrCmd::LOAD_POINTER, build.vmConst(k));
938
IrOp va = build.inst(IrCmd::DUP_TABLE, table);
939
build.inst(IrCmd::STORE_POINTER, build.vmReg(ra), va);
940
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TTABLE));
941
942
build.inst(IrCmd::CHECK_GC);
943
}
944
945
void translateInstGetUpval(IrBuilder& build, const Instruction* pc, int pcpos)
946
{
947
int ra = LUAU_INSN_A(*pc);
948
int up = LUAU_INSN_B(*pc);
949
950
IrOp value = build.inst(IrCmd::GET_UPVALUE, build.vmUpvalue(up));
951
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), value);
952
}
953
954
void translateInstSetUpval(IrBuilder& build, const Instruction* pc, int pcpos)
955
{
956
int ra = LUAU_INSN_A(*pc);
957
int up = LUAU_INSN_B(*pc);
958
959
IrOp value = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(ra));
960
build.inst(IrCmd::SET_UPVALUE, build.vmUpvalue(up), value, build.undef());
961
}
962
963
void translateInstCloseUpvals(IrBuilder& build, const Instruction* pc)
964
{
965
int ra = LUAU_INSN_A(*pc);
966
967
build.inst(IrCmd::CLOSE_UPVALS, build.vmReg(ra));
968
}
969
970
IrOp translateFastCallN(IrBuilder& build, const Instruction* pc, int pcpos, bool customParams, int customParamCount, IrOp customArgs, IrOp customArg3)
971
{
972
LuauOpcode opcode = LuauOpcode(LUAU_INSN_OP(*pc));
973
int bfid = LUAU_INSN_A(*pc);
974
int skip = LUAU_INSN_C(*pc);
975
976
Instruction call = pc[skip + 1];
977
CODEGEN_ASSERT(LUAU_INSN_OP(call) == LOP_CALL);
978
int ra = LUAU_INSN_A(call);
979
980
int nparams = customParams ? customParamCount : LUAU_INSN_B(call) - 1;
981
int nresults = LUAU_INSN_C(call) - 1;
982
int arg = customParams ? LUAU_INSN_B(*pc) : ra + 1;
983
IrOp args = customParams ? customArgs : build.vmReg(ra + 2);
984
985
IrOp builtinArgs = args;
986
987
if (customArgs.kind == IrOpKind::VmConst)
988
{
989
CODEGEN_ASSERT(build.function.proto);
990
TValue protok = build.function.proto->k[vmConstOp(customArgs)];
991
992
if (protok.tt == LUA_TNUMBER)
993
builtinArgs = build.constDouble(protok.value.n);
994
}
995
996
IrOp builtinArg3 = customParams ? customArg3 : build.vmReg(ra + 3);
997
998
IrOp fallback = build.fallbackBlock(pcpos);
999
1000
// In unsafe environment, instead of retrying fastcall at 'pcpos' we side-exit directly to fallback sequence
1001
if (FFlag::LuauCodegenBlockSafeEnv)
1002
build.checkSafeEnv(pcpos + getOpLength(opcode));
1003
else
1004
build.inst(IrCmd::CHECK_SAFE_ENV, build.vmExit(pcpos + getOpLength(opcode)));
1005
1006
BuiltinImplResult br = translateBuiltin(
1007
build, LuauBuiltinFunction(bfid), ra, arg, builtinArgs, builtinArg3, nparams, nresults, fallback, pcpos + getOpLength(opcode)
1008
);
1009
1010
if (br.type != BuiltinImplType::None)
1011
{
1012
CODEGEN_ASSERT(nparams != LUA_MULTRET && "builtins are not allowed to handle variadic arguments");
1013
1014
if (nresults == LUA_MULTRET)
1015
build.inst(IrCmd::ADJUST_STACK_TO_REG, build.vmReg(ra), build.constInt(br.actualResultCount));
1016
else if (FFlag::LuauCodegenMarkDeadRegisters2)
1017
build.inst(IrCmd::MARK_DEAD, build.vmReg(ra + 1), build.constInt(-1));
1018
1019
if (br.type != BuiltinImplType::UsesFallback)
1020
{
1021
// We ended up not using the fallback block, kill it
1022
build.function.blockOp(fallback).kind = IrBlockKind::Dead;
1023
1024
return build.undef();
1025
}
1026
}
1027
else
1028
{
1029
IrOp arg3 = customParams ? customArg3 : build.undef();
1030
1031
// TODO: we can skip saving pc for some well-behaved builtins which we didn't inline
1032
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + getOpLength(opcode)));
1033
1034
IrOp res = build.inst(
1035
IrCmd::INVOKE_FASTCALL,
1036
build.constUint(bfid),
1037
build.vmReg(ra),
1038
build.vmReg(arg),
1039
args,
1040
arg3,
1041
build.constInt(nparams),
1042
build.constInt(nresults)
1043
);
1044
build.inst(IrCmd::CHECK_FASTCALL_RES, res, fallback);
1045
1046
if (nresults == LUA_MULTRET)
1047
build.inst(IrCmd::ADJUST_STACK_TO_REG, build.vmReg(ra), res);
1048
else if (nparams == LUA_MULTRET)
1049
build.inst(IrCmd::ADJUST_STACK_TO_TOP);
1050
}
1051
1052
return fallback;
1053
}
1054
1055
// numeric for loop always ends with the computation of step that targets ra+1
1056
// any conditionals would result in a split basic block, so we can recover the step constants by pattern matching the IR we generated for LOADN/K
1057
static IrOp getLoopStepK(IrBuilder& build, int ra)
1058
{
1059
IrBlock& active = build.function.blocks[build.activeBlockIdx];
1060
1061
if (active.start + 2 <= build.function.instructions.size())
1062
{
1063
IrInst& sv = build.function.instructions[build.function.instructions.size() - 2];
1064
IrInst& st = build.function.instructions[build.function.instructions.size() - 1];
1065
1066
// We currently expect to match IR generated from LOADN/LOADK so we match a particular sequence of opcodes
1067
// In the future this can be extended to cover opposite STORE order as well as STORE_SPLIT_TVALUE
1068
if (sv.cmd == IrCmd::STORE_DOUBLE && OP_A(sv).kind == IrOpKind::VmReg && OP_A(sv).index == ra + 1 && OP_B(sv).kind == IrOpKind::Constant &&
1069
st.cmd == IrCmd::STORE_TAG && OP_A(st).kind == IrOpKind::VmReg && OP_A(st).index == ra + 1 &&
1070
build.function.tagOp(OP_B(st)) == LUA_TNUMBER)
1071
return OP_B(sv);
1072
}
1073
1074
return build.undef();
1075
}
1076
1077
void beforeInstForNPrep(IrBuilder& build, const Instruction* pc, int pcpos)
1078
{
1079
int ra = LUAU_INSN_A(*pc);
1080
1081
IrOp stepK = getLoopStepK(build, ra);
1082
build.numericLoopStack.push_back({stepK, pcpos + 1});
1083
}
1084
1085
void afterInstForNLoop(IrBuilder& build, const Instruction* pc)
1086
{
1087
CODEGEN_ASSERT(!build.numericLoopStack.empty());
1088
build.numericLoopStack.pop_back();
1089
}
1090
1091
void translateInstForNPrep(IrBuilder& build, const Instruction* pc, int pcpos)
1092
{
1093
int ra = LUAU_INSN_A(*pc);
1094
1095
IrOp loopStart = build.blockAtInst(pcpos + getOpLength(LuauOpcode(LUAU_INSN_OP(*pc))));
1096
IrOp loopExit = build.blockAtInst(getJumpTarget(*pc, pcpos));
1097
1098
CODEGEN_ASSERT(!build.numericLoopStack.empty());
1099
IrOp stepK = build.numericLoopStack.back().step;
1100
1101
// When loop parameters are not numbers, VM tries to perform type coercion from string and raises an exception if that fails
1102
// Performing that fallback in native code increases code size and complicates CFG, obscuring the values when they are constant
1103
// To avoid that overhead for an extremely rare case (that doesn't even typecheck), we exit to VM to handle it
1104
IrOp tagLimit = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra + 0));
1105
build.inst(IrCmd::CHECK_TAG, tagLimit, build.constTag(LUA_TNUMBER), build.vmExit(pcpos));
1106
IrOp tagIdx = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra + 2));
1107
build.inst(IrCmd::CHECK_TAG, tagIdx, build.constTag(LUA_TNUMBER), build.vmExit(pcpos));
1108
1109
IrOp limit = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(ra + 0));
1110
IrOp idx = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(ra + 2));
1111
1112
if (stepK.kind == IrOpKind::Undef)
1113
{
1114
IrOp tagStep = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra + 1));
1115
build.inst(IrCmd::CHECK_TAG, tagStep, build.constTag(LUA_TNUMBER), build.vmExit(pcpos));
1116
1117
IrOp step = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(ra + 1));
1118
1119
build.inst(IrCmd::JUMP_FORN_LOOP_COND, idx, limit, step, loopStart, loopExit);
1120
}
1121
else
1122
{
1123
double stepN = build.function.doubleOp(stepK);
1124
1125
// Condition to start the loop: step > 0 ? idx <= limit : limit <= idx
1126
// We invert the condition so that loopStart is the fallthrough (false) label
1127
if (stepN > 0)
1128
build.inst(IrCmd::JUMP_CMP_NUM, idx, limit, build.cond(IrCondition::NotLessEqual), loopExit, loopStart);
1129
else
1130
build.inst(IrCmd::JUMP_CMP_NUM, limit, idx, build.cond(IrCondition::NotLessEqual), loopExit, loopStart);
1131
}
1132
1133
// Fallthrough in original bytecode is implicit, so we start next internal block here
1134
if (build.isInternalBlock(loopStart))
1135
build.beginBlock(loopStart);
1136
1137
// VM places interrupt in FORNLOOP, but that creates a likely spill point for short loops that use loop index as INTERRUPT always spills
1138
// We place the interrupt at the beginning of the loop body instead; VM uses FORNLOOP because it doesn't want to waste an extra instruction.
1139
// Because loop block may not have been started yet (as it's started when lowering the first instruction!), we need to defer INTERRUPT placement.
1140
build.interruptRequested = true;
1141
}
1142
1143
void translateInstForNLoop(IrBuilder& build, const Instruction* pc, int pcpos)
1144
{
1145
int ra = LUAU_INSN_A(*pc);
1146
1147
int repeatJumpTarget = getJumpTarget(*pc, pcpos);
1148
IrOp loopRepeat = build.blockAtInst(repeatJumpTarget);
1149
IrOp loopExit = build.blockAtInst(pcpos + getOpLength(LuauOpcode(LUAU_INSN_OP(*pc))));
1150
1151
CODEGEN_ASSERT(!build.numericLoopStack.empty());
1152
IrBuilder::LoopInfo loopInfo = build.numericLoopStack.back();
1153
1154
// normally, the interrupt is placed at the beginning of the loop body by FORNPREP translation
1155
// however, there are rare cases where FORNLOOP might not jump directly to the first loop instruction
1156
// we detect this by checking the starting instruction of the loop body from loop information stack
1157
if (repeatJumpTarget != loopInfo.startpc)
1158
build.inst(IrCmd::INTERRUPT, build.constUint(pcpos));
1159
1160
IrOp stepK = loopInfo.step;
1161
1162
IrOp limit = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(ra + 0));
1163
IrOp step = stepK.kind == IrOpKind::Undef ? build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(ra + 1)) : stepK;
1164
1165
IrOp idx = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(ra + 2));
1166
idx = build.inst(IrCmd::ADD_NUM, idx, step);
1167
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra + 2), idx);
1168
1169
if (stepK.kind == IrOpKind::Undef)
1170
{
1171
build.inst(IrCmd::JUMP_FORN_LOOP_COND, idx, limit, step, loopRepeat, loopExit);
1172
}
1173
else
1174
{
1175
double stepN = build.function.doubleOp(stepK);
1176
1177
if (FFlag::LuauCodegenDseOnCondJump)
1178
{
1179
// Constant step optimization removes all the uses of the step register, but it has potential uses if a VM exit is taken
1180
build.inst(IrCmd::MARK_USED, build.vmReg(ra + 1), build.constInt(1));
1181
}
1182
1183
// Condition to continue the loop: step > 0 ? idx <= limit : limit <= idx
1184
if (stepN > 0)
1185
build.inst(IrCmd::JUMP_CMP_NUM, idx, limit, build.cond(IrCondition::LessEqual), loopRepeat, loopExit);
1186
else
1187
build.inst(IrCmd::JUMP_CMP_NUM, limit, idx, build.cond(IrCondition::LessEqual), loopRepeat, loopExit);
1188
}
1189
1190
// Fallthrough in original bytecode is implicit, so we start next internal block here
1191
if (build.isInternalBlock(loopExit))
1192
build.beginBlock(loopExit);
1193
}
1194
1195
void translateInstForGPrepNext(IrBuilder& build, const Instruction* pc, int pcpos)
1196
{
1197
int ra = LUAU_INSN_A(*pc);
1198
1199
IrOp target = build.blockAtInst(pcpos + 1 + LUAU_INSN_D(*pc));
1200
IrOp fallback = build.fallbackBlock(pcpos);
1201
1202
// fast-path: pairs/next
1203
if (FFlag::LuauCodegenBlockSafeEnv)
1204
build.checkSafeEnv(pcpos);
1205
else
1206
build.inst(IrCmd::CHECK_SAFE_ENV, build.vmExit(pcpos));
1207
1208
IrOp tagB = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra + 1));
1209
build.inst(IrCmd::CHECK_TAG, tagB, build.constTag(LUA_TTABLE), fallback);
1210
IrOp tagC = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra + 2));
1211
build.inst(IrCmd::CHECK_TAG, tagC, build.constTag(LUA_TNIL), fallback);
1212
1213
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNIL));
1214
1215
// setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(0)), LU_TAG_ITERATOR);
1216
build.inst(IrCmd::STORE_POINTER, build.vmReg(ra + 2), build.constInt(0));
1217
build.inst(IrCmd::STORE_EXTRA, build.vmReg(ra + 2), build.constInt(LU_TAG_ITERATOR));
1218
build.inst(IrCmd::STORE_TAG, build.vmReg(ra + 2), build.constTag(LUA_TLIGHTUSERDATA));
1219
1220
build.inst(IrCmd::JUMP, target);
1221
1222
build.beginBlock(fallback);
1223
build.inst(IrCmd::FORGPREP_XNEXT_FALLBACK, build.constUint(pcpos), build.vmReg(ra), target);
1224
}
1225
1226
void translateInstForGPrepInext(IrBuilder& build, const Instruction* pc, int pcpos)
1227
{
1228
int ra = LUAU_INSN_A(*pc);
1229
1230
IrOp target = build.blockAtInst(pcpos + 1 + LUAU_INSN_D(*pc));
1231
IrOp fallback = build.fallbackBlock(pcpos);
1232
IrOp finish = build.block(IrBlockKind::Internal);
1233
1234
// fast-path: ipairs/inext
1235
if (FFlag::LuauCodegenBlockSafeEnv)
1236
build.checkSafeEnv(pcpos);
1237
else
1238
build.inst(IrCmd::CHECK_SAFE_ENV, build.vmExit(pcpos));
1239
1240
IrOp tagB = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra + 1));
1241
build.inst(IrCmd::CHECK_TAG, tagB, build.constTag(LUA_TTABLE), fallback);
1242
IrOp tagC = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra + 2));
1243
build.inst(IrCmd::CHECK_TAG, tagC, build.constTag(LUA_TNUMBER), fallback);
1244
1245
IrOp numC = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(ra + 2));
1246
build.inst(IrCmd::JUMP_CMP_NUM, numC, build.constDouble(0.0), build.cond(IrCondition::NotEqual), fallback, finish);
1247
1248
build.beginBlock(finish);
1249
1250
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNIL));
1251
1252
// setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(0)), LU_TAG_ITERATOR);
1253
build.inst(IrCmd::STORE_POINTER, build.vmReg(ra + 2), build.constInt(0));
1254
build.inst(IrCmd::STORE_EXTRA, build.vmReg(ra + 2), build.constInt(LU_TAG_ITERATOR));
1255
build.inst(IrCmd::STORE_TAG, build.vmReg(ra + 2), build.constTag(LUA_TLIGHTUSERDATA));
1256
1257
build.inst(IrCmd::JUMP, target);
1258
1259
build.beginBlock(fallback);
1260
build.inst(IrCmd::FORGPREP_XNEXT_FALLBACK, build.constUint(pcpos), build.vmReg(ra), target);
1261
}
1262
1263
void translateInstForGLoopIpairs(IrBuilder& build, const Instruction* pc, int pcpos)
1264
{
1265
int ra = LUAU_INSN_A(*pc);
1266
CODEGEN_ASSERT(int(pc[1]) < 0);
1267
1268
IrOp loopRepeat = build.blockAtInst(getJumpTarget(*pc, pcpos));
1269
IrOp loopExit = build.blockAtInst(pcpos + getOpLength(LuauOpcode(LUAU_INSN_OP(*pc))));
1270
IrOp fallback = build.fallbackBlock(pcpos);
1271
1272
IrOp hasElem = build.block(IrBlockKind::Internal);
1273
1274
build.inst(IrCmd::INTERRUPT, build.constUint(pcpos));
1275
1276
// fast-path: builtin table iteration
1277
IrOp tagA = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra));
1278
build.inst(IrCmd::CHECK_TAG, tagA, build.constTag(LUA_TNIL), fallback);
1279
1280
IrOp table = build.inst(IrCmd::LOAD_POINTER, build.vmReg(ra + 1));
1281
IrOp index = build.inst(IrCmd::LOAD_INT, build.vmReg(ra + 2));
1282
1283
IrOp elemPtr = build.inst(IrCmd::GET_ARR_ADDR, table, index);
1284
1285
// Terminate if array has ended
1286
build.inst(IrCmd::CHECK_ARRAY_SIZE, table, index, loopExit);
1287
1288
// Terminate if element is nil
1289
IrOp elemTag = build.inst(IrCmd::LOAD_TAG, elemPtr);
1290
build.inst(IrCmd::JUMP_EQ_TAG, elemTag, build.constTag(LUA_TNIL), loopExit, hasElem);
1291
build.beginBlock(hasElem);
1292
1293
IrOp nextIndex = build.inst(IrCmd::ADD_INT, index, build.constInt(1));
1294
1295
// We update only a dword part of the userdata pointer that's reused in loop iteration as an index
1296
// Upper bits start and remain to be 0
1297
build.inst(IrCmd::STORE_INT, build.vmReg(ra + 2), nextIndex);
1298
// Tag should already be set to lightuserdata
1299
1300
// setnvalue(ra + 3, double(index + 1));
1301
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra + 3), build.inst(IrCmd::INT_TO_NUM, nextIndex));
1302
build.inst(IrCmd::STORE_TAG, build.vmReg(ra + 3), build.constTag(LUA_TNUMBER));
1303
1304
// setobj2s(L, ra + 4, e);
1305
IrOp elemTV = build.inst(IrCmd::LOAD_TVALUE, elemPtr);
1306
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra + 4), elemTV);
1307
1308
build.inst(IrCmd::JUMP, loopRepeat);
1309
1310
build.beginBlock(fallback);
1311
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
1312
build.inst(IrCmd::FORGLOOP_FALLBACK, build.vmReg(ra), build.constInt(int(pc[1])), loopRepeat, loopExit);
1313
1314
// Fallthrough in original bytecode is implicit, so we start next internal block here
1315
if (build.isInternalBlock(loopExit))
1316
build.beginBlock(loopExit);
1317
}
1318
1319
void translateInstGetTableN(IrBuilder& build, const Instruction* pc, int pcpos)
1320
{
1321
int ra = LUAU_INSN_A(*pc);
1322
int rb = LUAU_INSN_B(*pc);
1323
int c = LUAU_INSN_C(*pc);
1324
1325
BytecodeTypes bcTypes = build.function.getBytecodeTypesAt(pcpos);
1326
1327
if (isUserdataBytecodeType(bcTypes.a))
1328
{
1329
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
1330
build.inst(IrCmd::GET_TABLE, build.vmReg(ra), build.vmReg(rb), build.constUint(c + 1));
1331
return;
1332
}
1333
1334
IrOp fallback = build.fallbackBlock(pcpos);
1335
1336
IrOp tb = build.inst(IrCmd::LOAD_TAG, build.vmReg(rb));
1337
build.inst(IrCmd::CHECK_TAG, tb, build.constTag(LUA_TTABLE), bcTypes.a == LBC_TYPE_TABLE ? build.vmExit(pcpos) : fallback);
1338
1339
IrOp vb = build.inst(IrCmd::LOAD_POINTER, build.vmReg(rb));
1340
1341
build.inst(IrCmd::CHECK_ARRAY_SIZE, vb, build.constInt(c), fallback);
1342
build.inst(IrCmd::CHECK_NO_METATABLE, vb, fallback);
1343
1344
IrOp arrEl = build.inst(IrCmd::GET_ARR_ADDR, vb, build.constInt(0));
1345
1346
IrOp arrElTval = build.inst(IrCmd::LOAD_TVALUE, arrEl, build.constInt(c * sizeof(TValue)));
1347
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), arrElTval);
1348
1349
IrOp next = build.blockAtInst(pcpos + 1);
1350
FallbackStreamScope scope(build, fallback, next);
1351
1352
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
1353
build.inst(IrCmd::GET_TABLE, build.vmReg(ra), build.vmReg(rb), build.constUint(c + 1));
1354
build.inst(IrCmd::JUMP, next);
1355
}
1356
1357
void translateInstSetTableN(IrBuilder& build, const Instruction* pc, int pcpos)
1358
{
1359
int ra = LUAU_INSN_A(*pc);
1360
int rb = LUAU_INSN_B(*pc);
1361
int c = LUAU_INSN_C(*pc);
1362
1363
BytecodeTypes bcTypes = build.function.getBytecodeTypesAt(pcpos);
1364
1365
if (isUserdataBytecodeType(bcTypes.a))
1366
{
1367
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
1368
build.inst(IrCmd::SET_TABLE, build.vmReg(ra), build.vmReg(rb), build.constUint(c + 1));
1369
return;
1370
}
1371
1372
IrOp fallback = build.fallbackBlock(pcpos);
1373
1374
IrOp tb = build.inst(IrCmd::LOAD_TAG, build.vmReg(rb));
1375
build.inst(IrCmd::CHECK_TAG, tb, build.constTag(LUA_TTABLE), bcTypes.a == LBC_TYPE_TABLE ? build.vmExit(pcpos) : fallback);
1376
1377
IrOp vb = build.inst(IrCmd::LOAD_POINTER, build.vmReg(rb));
1378
1379
build.inst(IrCmd::CHECK_ARRAY_SIZE, vb, build.constInt(c), fallback);
1380
build.inst(IrCmd::CHECK_NO_METATABLE, vb, fallback);
1381
build.inst(IrCmd::CHECK_READONLY, vb, fallback);
1382
1383
IrOp arrEl = build.inst(IrCmd::GET_ARR_ADDR, vb, build.constInt(0));
1384
1385
IrOp tva = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(ra));
1386
build.inst(IrCmd::STORE_TVALUE, arrEl, tva, build.constInt(c * sizeof(TValue)));
1387
1388
build.inst(IrCmd::BARRIER_TABLE_FORWARD, vb, build.vmReg(ra), build.undef());
1389
1390
IrOp next = build.blockAtInst(pcpos + 1);
1391
FallbackStreamScope scope(build, fallback, next);
1392
1393
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
1394
build.inst(IrCmd::SET_TABLE, build.vmReg(ra), build.vmReg(rb), build.constUint(c + 1));
1395
build.inst(IrCmd::JUMP, next);
1396
}
1397
1398
void translateInstGetTable(IrBuilder& build, const Instruction* pc, int pcpos)
1399
{
1400
int ra = LUAU_INSN_A(*pc);
1401
int rb = LUAU_INSN_B(*pc);
1402
int rc = LUAU_INSN_C(*pc);
1403
1404
BytecodeTypes bcTypes = build.function.getBytecodeTypesAt(pcpos);
1405
1406
if (isUserdataBytecodeType(bcTypes.a) || bcTypes.b == LBC_TYPE_STRING)
1407
{
1408
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
1409
build.inst(IrCmd::GET_TABLE, build.vmReg(ra), build.vmReg(rb), build.vmReg(rc));
1410
return;
1411
}
1412
1413
IrOp fallback = build.fallbackBlock(pcpos);
1414
1415
IrOp tb = build.inst(IrCmd::LOAD_TAG, build.vmReg(rb));
1416
build.inst(IrCmd::CHECK_TAG, tb, build.constTag(LUA_TTABLE), bcTypes.a == LBC_TYPE_TABLE ? build.vmExit(pcpos) : fallback);
1417
IrOp tc = build.inst(IrCmd::LOAD_TAG, build.vmReg(rc));
1418
build.inst(IrCmd::CHECK_TAG, tc, build.constTag(LUA_TNUMBER), bcTypes.b == LBC_TYPE_NUMBER ? build.vmExit(pcpos) : fallback);
1419
1420
// fast-path: table with a number index
1421
IrOp vb = build.inst(IrCmd::LOAD_POINTER, build.vmReg(rb));
1422
IrOp vc = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(rc));
1423
1424
IrOp index = build.inst(IrCmd::TRY_NUM_TO_INDEX, vc, fallback);
1425
1426
index = build.inst(IrCmd::SUB_INT, index, build.constInt(1));
1427
1428
build.inst(IrCmd::CHECK_ARRAY_SIZE, vb, index, fallback);
1429
build.inst(IrCmd::CHECK_NO_METATABLE, vb, fallback);
1430
1431
IrOp arrEl = build.inst(IrCmd::GET_ARR_ADDR, vb, index);
1432
1433
IrOp arrElTval = build.inst(IrCmd::LOAD_TVALUE, arrEl);
1434
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), arrElTval);
1435
1436
IrOp next = build.blockAtInst(pcpos + 1);
1437
FallbackStreamScope scope(build, fallback, next);
1438
1439
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
1440
build.inst(IrCmd::GET_TABLE, build.vmReg(ra), build.vmReg(rb), build.vmReg(rc));
1441
build.inst(IrCmd::JUMP, next);
1442
}
1443
1444
void translateInstSetTable(IrBuilder& build, const Instruction* pc, int pcpos)
1445
{
1446
int ra = LUAU_INSN_A(*pc);
1447
int rb = LUAU_INSN_B(*pc);
1448
int rc = LUAU_INSN_C(*pc);
1449
1450
BytecodeTypes bcTypes = build.function.getBytecodeTypesAt(pcpos);
1451
1452
if (isUserdataBytecodeType(bcTypes.a) || bcTypes.b == LBC_TYPE_STRING)
1453
{
1454
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
1455
build.inst(IrCmd::SET_TABLE, build.vmReg(ra), build.vmReg(rb), build.vmReg(rc));
1456
return;
1457
}
1458
1459
IrOp fallback = build.fallbackBlock(pcpos);
1460
1461
IrOp tb = build.inst(IrCmd::LOAD_TAG, build.vmReg(rb));
1462
build.inst(IrCmd::CHECK_TAG, tb, build.constTag(LUA_TTABLE), bcTypes.a == LBC_TYPE_TABLE ? build.vmExit(pcpos) : fallback);
1463
IrOp tc = build.inst(IrCmd::LOAD_TAG, build.vmReg(rc));
1464
build.inst(IrCmd::CHECK_TAG, tc, build.constTag(LUA_TNUMBER), bcTypes.b == LBC_TYPE_NUMBER ? build.vmExit(pcpos) : fallback);
1465
1466
// fast-path: table with a number index
1467
IrOp vb = build.inst(IrCmd::LOAD_POINTER, build.vmReg(rb));
1468
IrOp vc = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(rc));
1469
1470
IrOp index = build.inst(IrCmd::TRY_NUM_TO_INDEX, vc, fallback);
1471
1472
index = build.inst(IrCmd::SUB_INT, index, build.constInt(1));
1473
1474
build.inst(IrCmd::CHECK_ARRAY_SIZE, vb, index, fallback);
1475
build.inst(IrCmd::CHECK_NO_METATABLE, vb, fallback);
1476
build.inst(IrCmd::CHECK_READONLY, vb, fallback);
1477
1478
IrOp arrEl = build.inst(IrCmd::GET_ARR_ADDR, vb, index);
1479
1480
IrOp tva = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(ra));
1481
build.inst(IrCmd::STORE_TVALUE, arrEl, tva);
1482
1483
build.inst(IrCmd::BARRIER_TABLE_FORWARD, vb, build.vmReg(ra), build.undef());
1484
1485
IrOp next = build.blockAtInst(pcpos + 1);
1486
FallbackStreamScope scope(build, fallback, next);
1487
1488
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
1489
build.inst(IrCmd::SET_TABLE, build.vmReg(ra), build.vmReg(rb), build.vmReg(rc));
1490
build.inst(IrCmd::JUMP, next);
1491
}
1492
1493
void translateInstGetImport(IrBuilder& build, const Instruction* pc, int pcpos)
1494
{
1495
int ra = LUAU_INSN_A(*pc);
1496
int k = LUAU_INSN_D(*pc);
1497
uint32_t aux = pc[1];
1498
1499
if (FFlag::LuauCodegenBlockSafeEnv)
1500
build.checkSafeEnv(pcpos);
1501
else
1502
build.inst(IrCmd::CHECK_SAFE_ENV, build.vmExit(pcpos));
1503
1504
build.inst(IrCmd::GET_CACHED_IMPORT, build.vmReg(ra), build.vmConst(k), build.constImport(aux), build.constUint(pcpos + 1));
1505
}
1506
1507
void translateInstGetTableKS(IrBuilder& build, const Instruction* pc, int pcpos)
1508
{
1509
int ra = LUAU_INSN_A(*pc);
1510
int rb = LUAU_INSN_B(*pc);
1511
uint32_t aux = pc[1];
1512
1513
BytecodeTypes bcTypes = build.function.getBytecodeTypesAt(pcpos);
1514
1515
IrOp tb = build.inst(IrCmd::LOAD_TAG, build.vmReg(rb));
1516
1517
if (bcTypes.a == LBC_TYPE_VECTOR)
1518
{
1519
build.inst(IrCmd::CHECK_TAG, tb, build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
1520
1521
TString* str = gco2ts(build.function.proto->k[aux].value.gc);
1522
const char* field = getstr(str);
1523
1524
if (str->len == 1 && (*field == 'X' || *field == 'x'))
1525
{
1526
IrOp value = build.inst(IrCmd::LOAD_FLOAT, build.vmReg(rb), build.constInt(0));
1527
1528
value = build.inst(IrCmd::FLOAT_TO_NUM, value);
1529
1530
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
1531
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
1532
}
1533
else if (str->len == 1 && (*field == 'Y' || *field == 'y'))
1534
{
1535
IrOp value = build.inst(IrCmd::LOAD_FLOAT, build.vmReg(rb), build.constInt(4));
1536
1537
value = build.inst(IrCmd::FLOAT_TO_NUM, value);
1538
1539
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
1540
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
1541
}
1542
else if (str->len == 1 && (*field == 'Z' || *field == 'z'))
1543
{
1544
IrOp value = build.inst(IrCmd::LOAD_FLOAT, build.vmReg(rb), build.constInt(8));
1545
1546
value = build.inst(IrCmd::FLOAT_TO_NUM, value);
1547
1548
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
1549
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
1550
}
1551
else
1552
{
1553
if (build.hostHooks.vectorAccess && build.hostHooks.vectorAccess(build, field, str->len, ra, rb, pcpos))
1554
return;
1555
1556
build.inst(IrCmd::FALLBACK_GETTABLEKS, build.constUint(pcpos), build.vmReg(ra), build.vmReg(rb), build.vmConst(aux));
1557
}
1558
1559
return;
1560
}
1561
1562
if (isUserdataBytecodeType(bcTypes.a))
1563
{
1564
build.inst(IrCmd::CHECK_TAG, tb, build.constTag(LUA_TUSERDATA), build.vmExit(pcpos));
1565
1566
if (build.hostHooks.userdataAccess)
1567
{
1568
TString* str = gco2ts(build.function.proto->k[aux].value.gc);
1569
const char* field = getstr(str);
1570
1571
if (build.hostHooks.userdataAccess(build, bcTypes.a, field, str->len, ra, rb, pcpos))
1572
return;
1573
}
1574
1575
build.inst(IrCmd::FALLBACK_GETTABLEKS, build.constUint(pcpos), build.vmReg(ra), build.vmReg(rb), build.vmConst(aux));
1576
return;
1577
}
1578
1579
IrOp fallback = build.fallbackBlock(pcpos);
1580
1581
build.inst(IrCmd::CHECK_TAG, tb, build.constTag(LUA_TTABLE), bcTypes.a == LBC_TYPE_TABLE ? build.vmExit(pcpos) : fallback);
1582
1583
IrOp vb = build.inst(IrCmd::LOAD_POINTER, build.vmReg(rb));
1584
1585
IrOp addrSlotEl = build.inst(IrCmd::GET_SLOT_NODE_ADDR, vb, build.constUint(pcpos), build.vmConst(aux));
1586
1587
build.inst(IrCmd::CHECK_SLOT_MATCH, addrSlotEl, build.vmConst(aux), fallback);
1588
1589
IrOp tvn = build.inst(IrCmd::LOAD_TVALUE, addrSlotEl, build.constInt(offsetof(LuaNode, val)));
1590
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), tvn);
1591
1592
IrOp next = build.blockAtInst(pcpos + 2);
1593
FallbackStreamScope scope(build, fallback, next);
1594
1595
build.inst(IrCmd::FALLBACK_GETTABLEKS, build.constUint(pcpos), build.vmReg(ra), build.vmReg(rb), build.vmConst(aux));
1596
build.inst(IrCmd::JUMP, next);
1597
}
1598
1599
void translateInstSetTableKS(IrBuilder& build, const Instruction* pc, int pcpos)
1600
{
1601
int ra = LUAU_INSN_A(*pc);
1602
int rb = LUAU_INSN_B(*pc);
1603
uint32_t aux = pc[1];
1604
1605
BytecodeTypes bcTypes = build.function.getBytecodeTypesAt(pcpos);
1606
1607
IrOp tb = build.inst(IrCmd::LOAD_TAG, build.vmReg(rb));
1608
1609
if (isUserdataBytecodeType(bcTypes.a))
1610
{
1611
build.inst(IrCmd::CHECK_TAG, tb, build.constTag(LUA_TUSERDATA), build.vmExit(pcpos));
1612
1613
build.inst(IrCmd::FALLBACK_SETTABLEKS, build.constUint(pcpos), build.vmReg(ra), build.vmReg(rb), build.vmConst(aux));
1614
return;
1615
}
1616
1617
IrOp fallback = build.fallbackBlock(pcpos);
1618
1619
build.inst(IrCmd::CHECK_TAG, tb, build.constTag(LUA_TTABLE), bcTypes.a == LBC_TYPE_TABLE ? build.vmExit(pcpos) : fallback);
1620
1621
IrOp vb = build.inst(IrCmd::LOAD_POINTER, build.vmReg(rb));
1622
1623
IrOp addrSlotEl = build.inst(IrCmd::GET_SLOT_NODE_ADDR, vb, build.constUint(pcpos), build.vmConst(aux));
1624
1625
build.inst(IrCmd::CHECK_SLOT_MATCH, addrSlotEl, build.vmConst(aux), fallback);
1626
build.inst(IrCmd::CHECK_READONLY, vb, fallback);
1627
1628
IrOp tva = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(ra));
1629
build.inst(IrCmd::STORE_TVALUE, addrSlotEl, tva, build.constInt(offsetof(LuaNode, val)));
1630
1631
build.inst(IrCmd::BARRIER_TABLE_FORWARD, vb, build.vmReg(ra), build.undef());
1632
1633
IrOp next = build.blockAtInst(pcpos + 2);
1634
FallbackStreamScope scope(build, fallback, next);
1635
1636
build.inst(IrCmd::FALLBACK_SETTABLEKS, build.constUint(pcpos), build.vmReg(ra), build.vmReg(rb), build.vmConst(aux));
1637
build.inst(IrCmd::JUMP, next);
1638
}
1639
1640
void translateInstGetGlobal(IrBuilder& build, const Instruction* pc, int pcpos)
1641
{
1642
int ra = LUAU_INSN_A(*pc);
1643
uint32_t aux = pc[1];
1644
1645
IrOp fallback = build.fallbackBlock(pcpos);
1646
1647
IrOp env = build.inst(IrCmd::LOAD_ENV);
1648
IrOp addrSlotEl = build.inst(IrCmd::GET_SLOT_NODE_ADDR, env, build.constUint(pcpos), build.vmConst(aux));
1649
1650
build.inst(IrCmd::CHECK_SLOT_MATCH, addrSlotEl, build.vmConst(aux), fallback);
1651
1652
IrOp tvn = build.inst(IrCmd::LOAD_TVALUE, addrSlotEl, build.constInt(offsetof(LuaNode, val)));
1653
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), tvn);
1654
1655
IrOp next = build.blockAtInst(pcpos + 2);
1656
FallbackStreamScope scope(build, fallback, next);
1657
1658
build.inst(IrCmd::FALLBACK_GETGLOBAL, build.constUint(pcpos), build.vmReg(ra), build.vmConst(aux));
1659
build.inst(IrCmd::JUMP, next);
1660
}
1661
1662
void translateInstSetGlobal(IrBuilder& build, const Instruction* pc, int pcpos)
1663
{
1664
int ra = LUAU_INSN_A(*pc);
1665
uint32_t aux = pc[1];
1666
1667
IrOp fallback = build.fallbackBlock(pcpos);
1668
1669
IrOp env = build.inst(IrCmd::LOAD_ENV);
1670
IrOp addrSlotEl = build.inst(IrCmd::GET_SLOT_NODE_ADDR, env, build.constUint(pcpos), build.vmConst(aux));
1671
1672
build.inst(IrCmd::CHECK_SLOT_MATCH, addrSlotEl, build.vmConst(aux), fallback);
1673
build.inst(IrCmd::CHECK_READONLY, env, fallback);
1674
1675
IrOp tva = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(ra));
1676
build.inst(IrCmd::STORE_TVALUE, addrSlotEl, tva, build.constInt(offsetof(LuaNode, val)));
1677
1678
build.inst(IrCmd::BARRIER_TABLE_FORWARD, env, build.vmReg(ra), build.undef());
1679
1680
IrOp next = build.blockAtInst(pcpos + 2);
1681
FallbackStreamScope scope(build, fallback, next);
1682
1683
build.inst(IrCmd::FALLBACK_SETGLOBAL, build.constUint(pcpos), build.vmReg(ra), build.vmConst(aux));
1684
build.inst(IrCmd::JUMP, next);
1685
}
1686
1687
void translateInstConcat(IrBuilder& build, const Instruction* pc, int pcpos)
1688
{
1689
int ra = LUAU_INSN_A(*pc);
1690
int rb = LUAU_INSN_B(*pc);
1691
int rc = LUAU_INSN_C(*pc);
1692
1693
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
1694
build.inst(IrCmd::CONCAT, build.vmReg(rb), build.constUint(rc - rb + 1));
1695
1696
IrOp tvb = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(rb));
1697
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), tvb);
1698
1699
build.inst(IrCmd::CHECK_GC);
1700
}
1701
1702
void translateInstCapture(IrBuilder& build, const Instruction* pc, int pcpos)
1703
{
1704
int type = LUAU_INSN_A(*pc);
1705
int index = LUAU_INSN_B(*pc);
1706
1707
switch (type)
1708
{
1709
case LCT_VAL:
1710
build.inst(IrCmd::CAPTURE, build.vmReg(index), build.constUint(0));
1711
break;
1712
case LCT_REF:
1713
build.inst(IrCmd::CAPTURE, build.vmReg(index), build.constUint(1));
1714
break;
1715
case LCT_UPVAL:
1716
build.inst(IrCmd::CAPTURE, build.vmUpvalue(index), build.constUint(0));
1717
break;
1718
default:
1719
CODEGEN_ASSERT(!"Unknown upvalue capture type");
1720
}
1721
}
1722
1723
bool translateInstNamecall(IrBuilder& build, const Instruction* pc, int pcpos)
1724
{
1725
int ra = LUAU_INSN_A(*pc);
1726
int rb = LUAU_INSN_B(*pc);
1727
uint32_t aux = pc[1];
1728
1729
BytecodeTypes bcTypes = build.function.getBytecodeTypesAt(pcpos);
1730
1731
if (bcTypes.a == LBC_TYPE_VECTOR)
1732
{
1733
build.loadAndCheckTag(build.vmReg(rb), LUA_TVECTOR, build.vmExit(pcpos));
1734
1735
if (build.hostHooks.vectorNamecall)
1736
{
1737
Instruction call = pc[2];
1738
CODEGEN_ASSERT(LUAU_INSN_OP(call) == LOP_CALL);
1739
1740
int callra = LUAU_INSN_A(call);
1741
int nparams = LUAU_INSN_B(call) - 1;
1742
int nresults = LUAU_INSN_C(call) - 1;
1743
1744
TString* str = gco2ts(build.function.proto->k[aux].value.gc);
1745
const char* field = getstr(str);
1746
1747
if (build.hostHooks.vectorNamecall(build, field, str->len, callra, rb, nparams, nresults, pcpos))
1748
return true;
1749
}
1750
1751
build.inst(IrCmd::FALLBACK_NAMECALL, build.constUint(pcpos), build.vmReg(ra), build.vmReg(rb), build.vmConst(aux));
1752
return false;
1753
}
1754
1755
if (isUserdataBytecodeType(bcTypes.a))
1756
{
1757
build.loadAndCheckTag(build.vmReg(rb), LUA_TUSERDATA, build.vmExit(pcpos));
1758
1759
if (build.hostHooks.userdataNamecall)
1760
{
1761
Instruction call = pc[2];
1762
CODEGEN_ASSERT(LUAU_INSN_OP(call) == LOP_CALL);
1763
1764
int callra = LUAU_INSN_A(call);
1765
int nparams = LUAU_INSN_B(call) - 1;
1766
int nresults = LUAU_INSN_C(call) - 1;
1767
1768
TString* str = gco2ts(build.function.proto->k[aux].value.gc);
1769
const char* field = getstr(str);
1770
1771
if (build.hostHooks.userdataNamecall(build, bcTypes.a, field, str->len, callra, rb, nparams, nresults, pcpos))
1772
return true;
1773
}
1774
1775
build.inst(IrCmd::FALLBACK_NAMECALL, build.constUint(pcpos), build.vmReg(ra), build.vmReg(rb), build.vmConst(aux));
1776
return false;
1777
}
1778
1779
IrOp next = build.blockAtInst(pcpos + getOpLength(LuauOpcode(LOP_NAMECALL)));
1780
IrOp fallback = build.fallbackBlock(pcpos);
1781
IrOp firstFastPathSuccess = build.block(IrBlockKind::Internal);
1782
IrOp secondFastPath = build.block(IrBlockKind::Internal);
1783
1784
build.loadAndCheckTag(build.vmReg(rb), LUA_TTABLE, bcTypes.a == LBC_TYPE_TABLE ? build.vmExit(pcpos) : fallback);
1785
IrOp table = build.inst(IrCmd::LOAD_POINTER, build.vmReg(rb));
1786
1787
CODEGEN_ASSERT(build.function.proto);
1788
IrOp addrNodeEl = build.inst(IrCmd::GET_HASH_NODE_ADDR, table, build.constUint(tsvalue(&build.function.proto->k[aux])->hash));
1789
1790
// We use 'jump' version instead of 'check' guard because we are jumping away into a non-fallback block
1791
// This is required by CFG live range analysis because both non-fallback blocks define the same registers
1792
build.inst(IrCmd::JUMP_SLOT_MATCH, addrNodeEl, build.vmConst(aux), firstFastPathSuccess, secondFastPath);
1793
1794
build.beginBlock(firstFastPathSuccess);
1795
build.inst(IrCmd::STORE_POINTER, build.vmReg(ra + 1), table);
1796
build.inst(IrCmd::STORE_TAG, build.vmReg(ra + 1), build.constTag(LUA_TTABLE));
1797
1798
IrOp nodeEl = build.inst(IrCmd::LOAD_TVALUE, addrNodeEl, build.constInt(offsetof(LuaNode, val)));
1799
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), nodeEl);
1800
build.inst(IrCmd::JUMP, next);
1801
1802
build.beginBlock(secondFastPath);
1803
1804
build.inst(IrCmd::CHECK_NODE_NO_NEXT, addrNodeEl, fallback);
1805
1806
IrOp indexPtr = build.inst(IrCmd::TRY_CALL_FASTGETTM, table, build.constInt(TM_INDEX), fallback);
1807
1808
build.loadAndCheckTag(indexPtr, LUA_TTABLE, fallback);
1809
IrOp index = build.inst(IrCmd::LOAD_POINTER, indexPtr);
1810
1811
IrOp addrIndexNodeEl = build.inst(IrCmd::GET_SLOT_NODE_ADDR, index, build.constUint(pcpos), build.vmConst(aux));
1812
build.inst(IrCmd::CHECK_SLOT_MATCH, addrIndexNodeEl, build.vmConst(aux), fallback);
1813
1814
// TODO: original 'table' was clobbered by a call inside 'FASTGETTM'
1815
// Ideally, such calls should have to effect on SSA IR values, but simple register allocator doesn't support it
1816
IrOp table2 = build.inst(IrCmd::LOAD_POINTER, build.vmReg(rb));
1817
build.inst(IrCmd::STORE_POINTER, build.vmReg(ra + 1), table2);
1818
build.inst(IrCmd::STORE_TAG, build.vmReg(ra + 1), build.constTag(LUA_TTABLE));
1819
1820
IrOp indexNodeEl = build.inst(IrCmd::LOAD_TVALUE, addrIndexNodeEl, build.constInt(offsetof(LuaNode, val)));
1821
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), indexNodeEl);
1822
build.inst(IrCmd::JUMP, next);
1823
1824
build.beginBlock(fallback);
1825
build.inst(IrCmd::FALLBACK_NAMECALL, build.constUint(pcpos), build.vmReg(ra), build.vmReg(rb), build.vmConst(aux));
1826
build.inst(IrCmd::JUMP, next);
1827
1828
build.beginBlock(next);
1829
1830
return false;
1831
}
1832
1833
void translateInstAndX(IrBuilder& build, const Instruction* pc, int pcpos, IrOp c)
1834
{
1835
int ra = LUAU_INSN_A(*pc);
1836
int rb = LUAU_INSN_B(*pc);
1837
1838
// "b and c" -> "truthy(b) ? c : b"
1839
IrOp lhs = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(rb));
1840
IrOp rhs = build.inst(IrCmd::LOAD_TVALUE, c);
1841
1842
IrOp result = build.inst(IrCmd::SELECT_IF_TRUTHY, lhs, rhs, lhs);
1843
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), result);
1844
}
1845
1846
void translateInstOrX(IrBuilder& build, const Instruction* pc, int pcpos, IrOp c)
1847
{
1848
int ra = LUAU_INSN_A(*pc);
1849
int rb = LUAU_INSN_B(*pc);
1850
1851
// "b or c" -> truthy(b) ? b : c
1852
IrOp lhs = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(rb));
1853
IrOp rhs = build.inst(IrCmd::LOAD_TVALUE, c);
1854
1855
IrOp result = build.inst(IrCmd::SELECT_IF_TRUTHY, lhs, lhs, rhs);
1856
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), result);
1857
}
1858
1859
void translateInstNewClosure(IrBuilder& build, const Instruction* pc, int pcpos)
1860
{
1861
CODEGEN_ASSERT(unsigned(LUAU_INSN_D(*pc)) < unsigned(build.function.proto->sizep));
1862
1863
int ra = LUAU_INSN_A(*pc);
1864
Proto* pv = build.function.proto->p[LUAU_INSN_D(*pc)];
1865
1866
build.inst(IrCmd::SET_SAVEDPC, build.constUint(pcpos + 1));
1867
1868
IrOp env = build.inst(IrCmd::LOAD_ENV);
1869
IrOp ncl = build.inst(IrCmd::NEWCLOSURE, build.constUint(pv->nups), env, build.constUint(LUAU_INSN_D(*pc)));
1870
1871
build.inst(IrCmd::STORE_POINTER, build.vmReg(ra), ncl);
1872
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TFUNCTION));
1873
1874
for (int ui = 0; ui < pv->nups; ++ui)
1875
{
1876
Instruction uinsn = pc[ui + 1];
1877
CODEGEN_ASSERT(LUAU_INSN_OP(uinsn) == LOP_CAPTURE);
1878
1879
switch (LUAU_INSN_A(uinsn))
1880
{
1881
case LCT_VAL:
1882
{
1883
IrOp src = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(LUAU_INSN_B(uinsn)));
1884
IrOp dst = build.inst(IrCmd::GET_CLOSURE_UPVAL_ADDR, ncl, build.vmUpvalue(ui));
1885
build.inst(IrCmd::STORE_TVALUE, dst, src);
1886
break;
1887
}
1888
1889
case LCT_REF:
1890
{
1891
IrOp src = build.inst(IrCmd::FINDUPVAL, build.vmReg(LUAU_INSN_B(uinsn)));
1892
IrOp dst = build.inst(IrCmd::GET_CLOSURE_UPVAL_ADDR, ncl, build.vmUpvalue(ui));
1893
build.inst(IrCmd::STORE_POINTER, dst, src);
1894
build.inst(IrCmd::STORE_TAG, dst, build.constTag(LUA_TUPVAL));
1895
break;
1896
}
1897
1898
case LCT_UPVAL:
1899
{
1900
IrOp src = build.inst(IrCmd::GET_CLOSURE_UPVAL_ADDR, build.undef(), build.vmUpvalue(LUAU_INSN_B(uinsn)));
1901
IrOp dst = build.inst(IrCmd::GET_CLOSURE_UPVAL_ADDR, ncl, build.vmUpvalue(ui));
1902
IrOp load = build.inst(IrCmd::LOAD_TVALUE, src);
1903
build.inst(IrCmd::STORE_TVALUE, dst, load);
1904
break;
1905
}
1906
1907
default:
1908
CODEGEN_ASSERT(!"Unknown upvalue capture type");
1909
LUAU_UNREACHABLE(); // improves switch() codegen by eliding opcode bounds checks
1910
}
1911
}
1912
1913
build.inst(IrCmd::CHECK_GC);
1914
}
1915
1916
} // namespace CodeGen
1917
} // namespace Luau
1918
1919