Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Roblox
GitHub Repository: Roblox/luau
Path: blob/master/CodeGen/src/CodeGenUtils.cpp
2725 views
1
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
2
#include "CodeGenUtils.h"
3
4
#include "lvm.h"
5
6
#include "lbuiltins.h"
7
#include "lbytecode.h"
8
#include "ldebug.h"
9
#include "ldo.h"
10
#include "lfunc.h"
11
#include "lgc.h"
12
#include "lmem.h"
13
#include "lnumutils.h"
14
#include "lstate.h"
15
#include "lstring.h"
16
#include "ltable.h"
17
#include "ludata.h"
18
19
#include <string.h>
20
21
LUAU_FASTFLAGVARIABLE(LuauNativeCodeTargetCheck)
22
23
// All external function calls that can cause stack realloc or Lua calls have to be wrapped in VM_PROTECT
24
// This makes sure that we save the pc (in case the Lua call needs to generate a backtrace) before the call,
25
// and restores the stack pointer after in case stack gets reallocated
26
// Should only be used on the slow paths.
27
#define VM_PROTECT(x) \
28
{ \
29
L->ci->savedpc = pc; \
30
{ \
31
x; \
32
}; \
33
base = L->base; \
34
}
35
36
// Some external functions can cause an error, but never reallocate the stack; for these, VM_PROTECT_PC() is
37
// a cheaper version of VM_PROTECT that can be called before the external call.
38
#define VM_PROTECT_PC() L->ci->savedpc = pc
39
40
#define VM_REG(i) (LUAU_ASSERT(unsigned(i) < unsigned(L->top - base)), &base[i])
41
#define VM_KV(i) (LUAU_ASSERT(unsigned(i) < unsigned(cl->l.p->sizek)), &k[i])
42
#define VM_UV(i) (LUAU_ASSERT(unsigned(i) < unsigned(cl->nupvalues)), &cl->l.uprefs[i])
43
44
#define VM_PATCH_C(pc, slot) *const_cast<Instruction*>(pc) = ((uint8_t(slot) << 24) | (0x00ffffffu & *(pc)))
45
#define VM_PATCH_E(pc, slot) *const_cast<Instruction*>(pc) = ((uint32_t(slot) << 8) | (0x000000ffu & *(pc)))
46
47
#define VM_INTERRUPT() \
48
{ \
49
void (*interrupt)(lua_State*, int) = L->global->cb.interrupt; \
50
if (LUAU_UNLIKELY(!!interrupt)) \
51
{ /* the interrupt hook is called right before we advance pc */ \
52
VM_PROTECT(L->ci->savedpc++; interrupt(L, -1)); \
53
if (L->status != 0) \
54
{ \
55
L->ci->savedpc--; \
56
return NULL; \
57
} \
58
} \
59
}
60
61
namespace Luau
62
{
63
namespace CodeGen
64
{
65
66
bool forgLoopTableIter(lua_State* L, LuaTable* h, int index, TValue* ra)
67
{
68
int sizearray = h->sizearray;
69
70
// first we advance index through the array portion
71
while (unsigned(index) < unsigned(sizearray))
72
{
73
TValue* e = &h->array[index];
74
75
if (!ttisnil(e))
76
{
77
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(index + 1)), LU_TAG_ITERATOR);
78
setnvalue(ra + 3, double(index + 1));
79
setobj2s(L, ra + 4, e);
80
81
return true;
82
}
83
84
index++;
85
}
86
87
int sizenode = 1 << h->lsizenode;
88
89
// then we advance index through the hash portion
90
while (unsigned(index - h->sizearray) < unsigned(sizenode))
91
{
92
LuaNode* n = &h->node[index - sizearray];
93
94
if (!ttisnil(gval(n)))
95
{
96
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(index + 1)), LU_TAG_ITERATOR);
97
getnodekey(L, ra + 3, n);
98
setobj(L, ra + 4, gval(n));
99
100
return true;
101
}
102
103
index++;
104
}
105
106
return false;
107
}
108
109
bool forgLoopNodeIter(lua_State* L, LuaTable* h, int index, TValue* ra)
110
{
111
int sizearray = h->sizearray;
112
int sizenode = 1 << h->lsizenode;
113
114
// then we advance index through the hash portion
115
while (unsigned(index - sizearray) < unsigned(sizenode))
116
{
117
LuaNode* n = &h->node[index - sizearray];
118
119
if (!ttisnil(gval(n)))
120
{
121
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(index + 1)), LU_TAG_ITERATOR);
122
getnodekey(L, ra + 3, n);
123
setobj(L, ra + 4, gval(n));
124
125
return true;
126
}
127
128
index++;
129
}
130
131
return false;
132
}
133
134
bool forgLoopNonTableFallback(lua_State* L, int insnA, int aux)
135
{
136
TValue* base = L->base;
137
TValue* ra = VM_REG(insnA);
138
139
// note: it's safe to push arguments past top for complicated reasons (see lvmexecute.cpp)
140
setobj2s(L, ra + 3 + 2, ra + 2);
141
setobj2s(L, ra + 3 + 1, ra + 1);
142
setobj2s(L, ra + 3, ra);
143
144
L->top = ra + 3 + 3; // func + 2 args (state and index)
145
LUAU_ASSERT(L->top <= L->stack_last);
146
147
luaD_call(L, ra + 3, uint8_t(aux));
148
L->top = L->ci->top;
149
150
// recompute ra since stack might have been reallocated
151
base = L->base;
152
ra = VM_REG(insnA);
153
154
// copy first variable back into the iteration index
155
setobj2s(L, ra + 2, ra + 3);
156
157
return !ttisnil(ra + 3);
158
}
159
160
void forgPrepXnextFallback(lua_State* L, TValue* ra, int pc)
161
{
162
if (!ttisfunction(ra))
163
{
164
Closure* cl = clvalue(L->ci->func);
165
L->ci->savedpc = cl->l.p->code + pc;
166
167
luaG_typeerror(L, ra, "iterate over");
168
}
169
}
170
171
Closure* callProlog(lua_State* L, TValue* ra, StkId argtop, int nresults)
172
{
173
// slow-path: not a function call
174
if (LUAU_UNLIKELY(!ttisfunction(ra)))
175
{
176
luaV_tryfuncTM(L, ra);
177
argtop++; // __call adds an extra self
178
}
179
180
Closure* ccl = clvalue(ra);
181
182
CallInfo* ci = incr_ci(L);
183
ci->func = ra;
184
ci->base = ra + 1;
185
ci->top = argtop + ccl->stacksize; // note: technically UB since we haven't reallocated the stack yet
186
ci->savedpc = NULL;
187
ci->flags = 0;
188
ci->nresults = nresults;
189
190
L->base = ci->base;
191
L->top = argtop;
192
193
// note: this reallocs stack, but we don't need to VM_PROTECT this
194
// this is because we're going to modify base/savedpc manually anyhow
195
// crucially, we can't use ra/argtop after this line
196
luaD_checkstackfornewci(L, ccl->stacksize);
197
198
return ccl;
199
}
200
201
void callEpilogC(lua_State* L, int nresults, int n)
202
{
203
// ci is our callinfo, cip is our parent
204
CallInfo* ci = L->ci;
205
CallInfo* cip = ci - 1;
206
207
// copy return values into parent stack (but only up to nresults!), fill the rest with nil
208
// note: in MULTRET context nresults starts as -1 so i != 0 condition never activates intentionally
209
StkId res = ci->func;
210
StkId vali = L->top - n;
211
StkId valend = L->top;
212
213
int i;
214
for (i = nresults; i != 0 && vali < valend; i--)
215
setobj2s(L, res++, vali++);
216
while (i-- > 0)
217
setnilvalue(res++);
218
219
// pop the stack frame
220
L->ci = cip;
221
L->base = cip->base;
222
L->top = (nresults == LUA_MULTRET) ? res : cip->top;
223
}
224
225
Udata* newUserdata(lua_State* L, size_t s, int tag)
226
{
227
Udata* u = luaU_newudata(L, s, tag);
228
229
if (LuaTable* h = L->global->udatamt[tag])
230
{
231
// currently, we always allocate unmarked objects, so forward barrier can be skipped
232
LUAU_ASSERT(!isblack(obj2gco(u)));
233
234
u->metatable = h;
235
}
236
237
return u;
238
}
239
240
void getImport(lua_State* L, StkId res, unsigned id, unsigned pc)
241
{
242
Closure* cl = clvalue(L->ci->func);
243
L->ci->savedpc = cl->l.p->code + pc;
244
245
luaV_getimport(L, cl->env, cl->l.p->k, res, id, /*propagatenil*/ false);
246
}
247
248
// Extracted as-is from lvmexecute.cpp with the exception of control flow (reentry) and removed interrupts/savedpc
249
Closure* callFallback(lua_State* L, StkId ra, StkId argtop, int nresults)
250
{
251
// slow-path: not a function call
252
if (LUAU_UNLIKELY(!ttisfunction(ra)))
253
{
254
luaV_tryfuncTM(L, ra);
255
argtop++; // __call adds an extra self
256
}
257
258
Closure* ccl = clvalue(ra);
259
260
CallInfo* ci = incr_ci(L);
261
ci->func = ra;
262
ci->base = ra + 1;
263
ci->top = argtop + ccl->stacksize; // note: technically UB since we haven't reallocated the stack yet
264
ci->savedpc = NULL;
265
ci->flags = 0;
266
ci->nresults = nresults;
267
268
L->base = ci->base;
269
L->top = argtop;
270
271
// note: this reallocs stack, but we don't need to VM_PROTECT this
272
// this is because we're going to modify base/savedpc manually anyhow
273
// crucially, we can't use ra/argtop after this line
274
luaD_checkstackfornewci(L, ccl->stacksize);
275
276
LUAU_ASSERT(ci->top <= L->stack_last);
277
278
if (!ccl->isC)
279
{
280
Proto* p = ccl->l.p;
281
282
// fill unused parameters with nil
283
StkId argi = L->top;
284
StkId argend = L->base + p->numparams;
285
while (argi < argend)
286
setnilvalue(argi++); // complete missing arguments
287
L->top = p->is_vararg ? argi : ci->top;
288
289
// keep executing new function
290
ci->savedpc = p->code;
291
292
if (LUAU_LIKELY(FFlag::LuauNativeCodeTargetCheck ? p->exectarget != 0 : p->execdata != NULL))
293
ci->flags = LUA_CALLINFO_NATIVE;
294
295
return ccl;
296
}
297
else
298
{
299
lua_CFunction func = ccl->c.f;
300
int n = func(L);
301
302
// yield
303
if (n < 0)
304
return (Closure*)CALL_FALLBACK_YIELD;
305
306
// ci is our callinfo, cip is our parent
307
CallInfo* ci = L->ci;
308
CallInfo* cip = ci - 1;
309
310
// copy return values into parent stack (but only up to nresults!), fill the rest with nil
311
// note: in MULTRET context nresults starts as -1 so i != 0 condition never activates intentionally
312
StkId res = ci->func;
313
StkId vali = L->top - n;
314
StkId valend = L->top;
315
316
int i;
317
for (i = nresults; i != 0 && vali < valend; i--)
318
setobj2s(L, res++, vali++);
319
while (i-- > 0)
320
setnilvalue(res++);
321
322
// pop the stack frame
323
L->ci = cip;
324
L->base = cip->base;
325
L->top = (nresults == LUA_MULTRET) ? res : cip->top;
326
327
// keep executing current function
328
return NULL;
329
}
330
}
331
332
const Instruction* executeGETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
333
{
334
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
335
Instruction insn = *pc++;
336
StkId ra = VM_REG(LUAU_INSN_A(insn));
337
uint32_t aux = *pc++;
338
TValue* kv = VM_KV(aux);
339
LUAU_ASSERT(ttisstring(kv));
340
341
// fast-path should already have been checked, so we skip checking for it here
342
LuaTable* h = cl->env;
343
int slot = LUAU_INSN_C(insn) & h->nodemask8;
344
345
// slow-path, may invoke Lua calls via __index metamethod
346
TValue g;
347
sethvalue(L, &g, h);
348
L->cachedslot = slot;
349
VM_PROTECT(luaV_gettable(L, &g, kv, ra));
350
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
351
VM_PATCH_C(pc - 2, L->cachedslot);
352
return pc;
353
}
354
355
const Instruction* executeSETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
356
{
357
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
358
Instruction insn = *pc++;
359
StkId ra = VM_REG(LUAU_INSN_A(insn));
360
uint32_t aux = *pc++;
361
TValue* kv = VM_KV(aux);
362
LUAU_ASSERT(ttisstring(kv));
363
364
// fast-path should already have been checked, so we skip checking for it here
365
LuaTable* h = cl->env;
366
int slot = LUAU_INSN_C(insn) & h->nodemask8;
367
368
// slow-path, may invoke Lua calls via __newindex metamethod
369
TValue g;
370
sethvalue(L, &g, h);
371
L->cachedslot = slot;
372
VM_PROTECT(luaV_settable(L, &g, kv, ra));
373
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
374
VM_PATCH_C(pc - 2, L->cachedslot);
375
return pc;
376
}
377
378
const Instruction* executeGETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
379
{
380
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
381
Instruction insn = *pc++;
382
StkId ra = VM_REG(LUAU_INSN_A(insn));
383
StkId rb = VM_REG(LUAU_INSN_B(insn));
384
uint32_t aux = *pc++;
385
TValue* kv = VM_KV(aux);
386
LUAU_ASSERT(ttisstring(kv));
387
388
// fast-path: built-in table
389
if (ttistable(rb))
390
{
391
LuaTable* h = hvalue(rb);
392
393
// we ignore the fast path that checks for the cached slot since IrTranslation already checks for it.
394
395
if (!h->metatable)
396
{
397
// fast-path: value is not in expected slot, but the table lookup doesn't involve metatable
398
const TValue* res = luaH_getstr(h, tsvalue(kv));
399
400
if (res != luaO_nilobject)
401
{
402
int cachedslot = gval2slot(h, res);
403
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
404
VM_PATCH_C(pc - 2, cachedslot);
405
}
406
407
setobj2s(L, ra, res);
408
return pc;
409
}
410
else
411
{
412
// slow-path, may invoke Lua calls via __index metamethod
413
int slot = LUAU_INSN_C(insn) & h->nodemask8;
414
L->cachedslot = slot;
415
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
416
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
417
VM_PATCH_C(pc - 2, L->cachedslot);
418
return pc;
419
}
420
}
421
else
422
{
423
// fast-path: user data with C __index TM
424
const TValue* fn = 0;
425
if (ttisuserdata(rb) && (fn = fasttm(L, uvalue(rb)->metatable, TM_INDEX)) && ttisfunction(fn) && clvalue(fn)->isC)
426
{
427
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
428
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
429
StkId top = L->top;
430
setobj2s(L, top + 0, fn);
431
setobj2s(L, top + 1, rb);
432
setobj2s(L, top + 2, kv);
433
L->top = top + 3;
434
435
L->cachedslot = LUAU_INSN_C(insn);
436
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
437
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
438
VM_PATCH_C(pc - 2, L->cachedslot);
439
return pc;
440
}
441
else if (ttisvector(rb))
442
{
443
// fast-path: quick case-insensitive comparison with "X"/"Y"/"Z"
444
const char* name = getstr(tsvalue(kv));
445
int ic = (name[0] | ' ') - 'x';
446
447
#if LUA_VECTOR_SIZE == 4
448
// 'w' is before 'x' in ascii, so ic is -1 when indexing with 'w'
449
if (ic == -1)
450
ic = 3;
451
#endif
452
453
if (unsigned(ic) < LUA_VECTOR_SIZE && name[1] == '\0')
454
{
455
const float* v = vvalue(rb); // silences ubsan when indexing v[]
456
setnvalue(ra, v[ic]);
457
return pc;
458
}
459
460
fn = fasttm(L, L->global->mt[LUA_TVECTOR], TM_INDEX);
461
462
if (fn && ttisfunction(fn) && clvalue(fn)->isC)
463
{
464
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
465
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
466
StkId top = L->top;
467
setobj2s(L, top + 0, fn);
468
setobj2s(L, top + 1, rb);
469
setobj2s(L, top + 2, kv);
470
L->top = top + 3;
471
472
L->cachedslot = LUAU_INSN_C(insn);
473
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
474
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
475
VM_PATCH_C(pc - 2, L->cachedslot);
476
return pc;
477
}
478
479
// fall through to slow path
480
}
481
482
// fall through to slow path
483
}
484
485
// slow-path, may invoke Lua calls via __index metamethod
486
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
487
return pc;
488
}
489
490
const Instruction* executeSETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
491
{
492
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
493
Instruction insn = *pc++;
494
StkId ra = VM_REG(LUAU_INSN_A(insn));
495
StkId rb = VM_REG(LUAU_INSN_B(insn));
496
uint32_t aux = *pc++;
497
TValue* kv = VM_KV(aux);
498
LUAU_ASSERT(ttisstring(kv));
499
500
// fast-path: built-in table
501
if (ttistable(rb))
502
{
503
LuaTable* h = hvalue(rb);
504
505
// we ignore the fast path that checks for the cached slot since IrTranslation already checks for it.
506
507
if (fastnotm(h->metatable, TM_NEWINDEX) && !h->readonly)
508
{
509
VM_PROTECT_PC(); // set may fail
510
511
TValue* res = luaH_setstr(L, h, tsvalue(kv));
512
int cachedslot = gval2slot(h, res);
513
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
514
VM_PATCH_C(pc - 2, cachedslot);
515
setobj2t(L, res, ra);
516
luaC_barriert(L, h, ra);
517
return pc;
518
}
519
else
520
{
521
// slow-path, may invoke Lua calls via __newindex metamethod
522
int slot = LUAU_INSN_C(insn) & h->nodemask8;
523
L->cachedslot = slot;
524
VM_PROTECT(luaV_settable(L, rb, kv, ra));
525
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
526
VM_PATCH_C(pc - 2, L->cachedslot);
527
return pc;
528
}
529
}
530
else
531
{
532
// fast-path: user data with C __newindex TM
533
const TValue* fn = 0;
534
if (ttisuserdata(rb) && (fn = fasttm(L, uvalue(rb)->metatable, TM_NEWINDEX)) && ttisfunction(fn) && clvalue(fn)->isC)
535
{
536
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
537
LUAU_ASSERT(L->top + 4 < L->stack + L->stacksize);
538
StkId top = L->top;
539
setobj2s(L, top + 0, fn);
540
setobj2s(L, top + 1, rb);
541
setobj2s(L, top + 2, kv);
542
setobj2s(L, top + 3, ra);
543
L->top = top + 4;
544
545
L->cachedslot = LUAU_INSN_C(insn);
546
VM_PROTECT(luaV_callTM(L, 3, -1));
547
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
548
VM_PATCH_C(pc - 2, L->cachedslot);
549
return pc;
550
}
551
else
552
{
553
// slow-path, may invoke Lua calls via __newindex metamethod
554
VM_PROTECT(luaV_settable(L, rb, kv, ra));
555
return pc;
556
}
557
}
558
}
559
560
const Instruction* executeNAMECALL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
561
{
562
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
563
Instruction insn = *pc++;
564
StkId ra = VM_REG(LUAU_INSN_A(insn));
565
StkId rb = VM_REG(LUAU_INSN_B(insn));
566
uint32_t aux = *pc++;
567
TValue* kv = VM_KV(aux);
568
LUAU_ASSERT(ttisstring(kv));
569
570
if (ttistable(rb))
571
{
572
// note: lvmexecute.cpp version of NAMECALL has two fast paths, but both fast paths are inlined into IR
573
// as such, if we get here we can just use the generic path which makes the fallback path a little faster
574
575
// slow-path: handles full table lookup
576
setobj2s(L, ra + 1, rb);
577
L->cachedslot = LUAU_INSN_C(insn);
578
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
579
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
580
VM_PATCH_C(pc - 2, L->cachedslot);
581
// recompute ra since stack might have been reallocated
582
ra = VM_REG(LUAU_INSN_A(insn));
583
if (ttisnil(ra))
584
luaG_methoderror(L, ra + 1, tsvalue(kv));
585
}
586
else
587
{
588
LuaTable* mt = ttisuserdata(rb) ? uvalue(rb)->metatable : L->global->mt[ttype(rb)];
589
const TValue* tmi = 0;
590
591
// fast-path: metatable with __namecall
592
if (const TValue* fn = fasttm(L, mt, TM_NAMECALL))
593
{
594
// note: order of copies allows rb to alias ra+1 or ra
595
setobj2s(L, ra + 1, rb);
596
setobj2s(L, ra, fn);
597
598
L->namecall = tsvalue(kv);
599
}
600
else if ((tmi = fasttm(L, mt, TM_INDEX)) && ttistable(tmi))
601
{
602
LuaTable* h = hvalue(tmi);
603
int slot = LUAU_INSN_C(insn) & h->nodemask8;
604
LuaNode* n = &h->node[slot];
605
606
// fast-path: metatable with __index that has method in expected slot
607
if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n))))
608
{
609
// note: order of copies allows rb to alias ra+1 or ra
610
setobj2s(L, ra + 1, rb);
611
setobj2s(L, ra, gval(n));
612
}
613
else
614
{
615
// slow-path: handles slot mismatch
616
setobj2s(L, ra + 1, rb);
617
L->cachedslot = slot;
618
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
619
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
620
VM_PATCH_C(pc - 2, L->cachedslot);
621
// recompute ra since stack might have been reallocated
622
ra = VM_REG(LUAU_INSN_A(insn));
623
if (ttisnil(ra))
624
luaG_methoderror(L, ra + 1, tsvalue(kv));
625
}
626
}
627
else
628
{
629
// slow-path: handles non-table __index
630
setobj2s(L, ra + 1, rb);
631
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
632
// recompute ra since stack might have been reallocated
633
ra = VM_REG(LUAU_INSN_A(insn));
634
if (ttisnil(ra))
635
luaG_methoderror(L, ra + 1, tsvalue(kv));
636
}
637
}
638
639
// intentional fallthrough to CALL
640
LUAU_ASSERT(LUAU_INSN_OP(*pc) == LOP_CALL);
641
return pc;
642
}
643
644
const Instruction* executeSETLIST(lua_State* L, const Instruction* pc, StkId base, TValue* k)
645
{
646
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
647
Instruction insn = *pc++;
648
StkId ra = VM_REG(LUAU_INSN_A(insn));
649
StkId rb = &base[LUAU_INSN_B(insn)]; // note: this can point to L->top if c == LUA_MULTRET making VM_REG unsafe to use
650
int c = LUAU_INSN_C(insn) - 1;
651
uint32_t index = *pc++;
652
653
if (c == LUA_MULTRET)
654
{
655
c = int(L->top - rb);
656
L->top = L->ci->top;
657
}
658
659
LuaTable* h = hvalue(ra);
660
661
// TODO: we really don't need this anymore
662
if (!ttistable(ra))
663
return NULL; // temporary workaround to weaken a rather powerful exploitation primitive in case of a MITM attack on bytecode
664
665
int last = index + c - 1;
666
if (last > h->sizearray)
667
{
668
VM_PROTECT_PC(); // luaH_resizearray may fail due to OOM
669
670
luaH_resizearray(L, h, last);
671
}
672
673
TValue* array = h->array;
674
675
for (int i = 0; i < c; ++i)
676
setobj2t(L, &array[index + i - 1], rb + i);
677
678
luaC_barrierfast(L, h);
679
return pc;
680
}
681
682
const Instruction* executeFORGPREP(lua_State* L, const Instruction* pc, StkId base, TValue* k)
683
{
684
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
685
Instruction insn = *pc++;
686
StkId ra = VM_REG(LUAU_INSN_A(insn));
687
688
if (ttisfunction(ra))
689
{
690
// will be called during FORGLOOP
691
}
692
else
693
{
694
LuaTable* mt = ttistable(ra) ? hvalue(ra)->metatable : ttisuserdata(ra) ? uvalue(ra)->metatable : cast_to(LuaTable*, NULL);
695
696
if (const TValue* fn = fasttm(L, mt, TM_ITER))
697
{
698
setobj2s(L, ra + 1, ra);
699
setobj2s(L, ra, fn);
700
701
L->top = ra + 2; // func + self arg
702
LUAU_ASSERT(L->top <= L->stack_last);
703
704
VM_PROTECT(luaD_call(L, ra, 3));
705
L->top = L->ci->top;
706
707
// recompute ra since stack might have been reallocated
708
ra = VM_REG(LUAU_INSN_A(insn));
709
710
// protect against __iter returning nil, since nil is used as a marker for builtin iteration in FORGLOOP
711
if (ttisnil(ra))
712
{
713
VM_PROTECT_PC(); // next call always errors
714
luaG_typeerror(L, ra, "call");
715
}
716
}
717
else if (fasttm(L, mt, TM_CALL))
718
{
719
// table or userdata with __call, will be called during FORGLOOP
720
// TODO: we might be able to stop supporting this depending on whether it's used in practice
721
}
722
else if (ttistable(ra))
723
{
724
// set up registers for builtin iteration
725
setobj2s(L, ra + 1, ra);
726
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(0)), LU_TAG_ITERATOR);
727
setnilvalue(ra);
728
}
729
else
730
{
731
VM_PROTECT_PC(); // next call always errors
732
luaG_typeerror(L, ra, "iterate over");
733
}
734
}
735
736
pc += LUAU_INSN_D(insn);
737
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
738
return pc;
739
}
740
741
void executeGETVARARGSMultRet(lua_State* L, const Instruction* pc, StkId base, int rai)
742
{
743
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
744
int n = cast_int(base - L->ci->func) - cl->l.p->numparams - 1;
745
746
VM_PROTECT(luaD_checkstack(L, n));
747
StkId ra = VM_REG(rai); // previous call may change the stack
748
749
for (int j = 0; j < n; j++)
750
setobj2s(L, ra + j, base - n + j);
751
752
L->top = ra + n;
753
}
754
755
void executeGETVARARGSConst(lua_State* L, StkId base, int rai, int b)
756
{
757
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
758
int n = cast_int(base - L->ci->func) - cl->l.p->numparams - 1;
759
760
StkId ra = VM_REG(rai);
761
762
for (int j = 0; j < b && j < n; j++)
763
setobj2s(L, ra + j, base - n + j);
764
for (int j = n; j < b; j++)
765
setnilvalue(ra + j);
766
}
767
768
const Instruction* executeDUPCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k)
769
{
770
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
771
Instruction insn = *pc++;
772
StkId ra = VM_REG(LUAU_INSN_A(insn));
773
TValue* kv = VM_KV(LUAU_INSN_D(insn));
774
775
Closure* kcl = clvalue(kv);
776
777
VM_PROTECT_PC(); // luaF_newLclosure may fail due to OOM
778
779
// clone closure if the environment is not shared
780
// note: we save closure to stack early in case the code below wants to capture it by value
781
Closure* ncl = (kcl->env == cl->env) ? kcl : luaF_newLclosure(L, kcl->nupvalues, cl->env, kcl->l.p);
782
setclvalue(L, ra, ncl);
783
784
// this loop does three things:
785
// - if the closure was created anew, it just fills it with upvalues
786
// - if the closure from the constant table is used, it fills it with upvalues so that it can be shared in the future
787
// - if the closure is reused, it checks if the reuse is safe via rawequal, and falls back to duplicating the closure
788
// normally this would use two separate loops, for reuse check and upvalue setup, but MSVC codegen goes crazy if you do that
789
for (int ui = 0; ui < kcl->nupvalues; ++ui)
790
{
791
Instruction uinsn = pc[ui];
792
LUAU_ASSERT(LUAU_INSN_OP(uinsn) == LOP_CAPTURE);
793
LUAU_ASSERT(LUAU_INSN_A(uinsn) == LCT_VAL || LUAU_INSN_A(uinsn) == LCT_UPVAL);
794
795
TValue* uv = (LUAU_INSN_A(uinsn) == LCT_VAL) ? VM_REG(LUAU_INSN_B(uinsn)) : VM_UV(LUAU_INSN_B(uinsn));
796
797
// check if the existing closure is safe to reuse
798
if (ncl == kcl && luaO_rawequalObj(&ncl->l.uprefs[ui], uv))
799
continue;
800
801
// lazily clone the closure and update the upvalues
802
if (ncl == kcl && kcl->preload == 0)
803
{
804
ncl = luaF_newLclosure(L, kcl->nupvalues, cl->env, kcl->l.p);
805
setclvalue(L, ra, ncl);
806
807
ui = -1; // restart the loop to fill all upvalues
808
continue;
809
}
810
811
// this updates a newly created closure, or an existing closure created during preload, in which case we need a barrier
812
setobj(L, &ncl->l.uprefs[ui], uv);
813
luaC_barrier(L, ncl, uv);
814
}
815
816
// this is a noop if ncl is newly created or shared successfully, but it has to run after the closure is preloaded for the first time
817
ncl->preload = 0;
818
819
if (kcl != ncl)
820
VM_PROTECT(luaC_checkGC(L));
821
822
pc += kcl->nupvalues;
823
return pc;
824
}
825
826
const Instruction* executePREPVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
827
{
828
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
829
Instruction insn = *pc++;
830
int numparams = LUAU_INSN_A(insn);
831
832
// all fixed parameters are copied after the top so we need more stack space
833
VM_PROTECT(luaD_checkstack(L, cl->stacksize + numparams));
834
835
// the caller must have filled extra fixed arguments with nil
836
LUAU_ASSERT(cast_int(L->top - base) >= numparams);
837
838
// move fixed parameters to final position
839
StkId fixed = base; // first fixed argument
840
base = L->top; // final position of first argument
841
842
for (int i = 0; i < numparams; ++i)
843
{
844
setobj2s(L, base + i, fixed + i);
845
setnilvalue(fixed + i);
846
}
847
848
// rewire our stack frame to point to the new base
849
L->ci->base = base;
850
L->ci->top = base + cl->stacksize;
851
852
L->base = base;
853
L->top = L->ci->top;
854
return pc;
855
}
856
857
} // namespace CodeGen
858
} // namespace Luau
859
860