Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Roblox
GitHub Repository: Roblox/luau
Path: blob/master/VM/src/lvmexecute.cpp
2725 views
1
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
2
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
3
#include "lvm.h"
4
5
#include "lstate.h"
6
#include "ltable.h"
7
#include "lfunc.h"
8
#include "lstring.h"
9
#include "lgc.h"
10
#include "lmem.h"
11
#include "ldebug.h"
12
#include "ldo.h"
13
#include "lbuiltins.h"
14
#include "lnumutils.h"
15
#include "lbytecode.h"
16
17
#include <string.h>
18
19
LUAU_FASTFLAG(LuauIntegerType)
20
21
// Disable c99-designator to avoid the warning in computed goto dispatch table
22
#ifdef __clang__
23
#if __has_warning("-Wc99-designator")
24
#pragma clang diagnostic ignored "-Wc99-designator"
25
#endif
26
#endif
27
28
// When working with VM code, pay attention to these rules for correctness:
29
// 1. Many external Lua functions can fail; for them to fail and be able to generate a proper stack, we need to copy pc to L->ci->savedpc before the
30
// call
31
// 2. Many external Lua functions can reallocate the stack. This invalidates stack pointers in VM C stack frame, most importantly base, but also
32
// ra/rb/rc!
33
// 3. VM_PROTECT macro saves savedpc and restores base for you; most external calls need to be wrapped into that. However, it does NOT restore
34
// ra/rb/rc!
35
// 4. When copying an object to any existing object as a field, generally speaking you need to call luaC_barrier! Be careful with all setobj calls
36
// 5. To make 4 easier to follow, please use setobj2s for copies to stack, setobj2t for writes to tables, and setobj for other copies.
37
// 6. You can define HARDSTACKTESTS in luaconf.h which will aggressively realloc stack; with address sanitizer this should be effective at finding
38
// stack corruption bugs
39
// 7. Many external Lua functions can call GC! GC will *not* traverse pointers to new objects that aren't reachable from Lua root. Be careful when
40
// creating new Lua objects, store them to stack soon.
41
42
// When calling luau_callTM, we usually push the arguments to the top of the stack.
43
// This is safe to do for complicated reasons:
44
// - stack guarantees EXTRA_STACK room beyond stack_last (see luaD_reallocstack)
45
// - stack reallocation copies values past stack_last
46
47
// All external function calls that can cause stack realloc or Lua calls have to be wrapped in VM_PROTECT
48
// This makes sure that we save the pc (in case the Lua call needs to generate a backtrace) before the call,
49
// and restores the stack pointer after in case stack gets reallocated
50
// Should only be used on the slow paths.
51
#define VM_PROTECT(x) \
52
{ \
53
L->ci->savedpc = pc; \
54
{ \
55
x; \
56
}; \
57
base = L->base; \
58
}
59
60
// Some external functions can cause an error, but never reallocate the stack; for these, VM_PROTECT_PC() is
61
// a cheaper version of VM_PROTECT that can be called before the external call.
62
#define VM_PROTECT_PC() L->ci->savedpc = pc
63
64
#define VM_REG(i) (LUAU_ASSERT(unsigned(i) < unsigned(L->top - base)), &base[i])
65
#define VM_KV(i) (LUAU_ASSERT(unsigned(i) < unsigned(cl->l.p->sizek)), &k[i])
66
#define VM_UV(i) (LUAU_ASSERT(unsigned(i) < unsigned(cl->nupvalues)), &cl->l.uprefs[i])
67
68
#define VM_PATCH_C(pc, slot) *const_cast<Instruction*>(pc) = ((uint8_t(slot) << 24) | (0x00ffffffu & *(pc)))
69
#define VM_PATCH_E(pc, slot) *const_cast<Instruction*>(pc) = ((uint32_t(slot) << 8) | (0x000000ffu & *(pc)))
70
71
#define VM_INTERRUPT() \
72
{ \
73
void (*interrupt)(lua_State*, int) = L->global->cb.interrupt; \
74
if (LUAU_UNLIKELY(!!interrupt)) \
75
{ /* the interrupt hook is called right before we advance pc */ \
76
VM_PROTECT(L->ci->savedpc++; interrupt(L, -1)); \
77
if (L->status != 0) \
78
{ \
79
L->ci->savedpc--; \
80
goto exit; \
81
} \
82
} \
83
}
84
85
#define VM_DISPATCH_OP(op) &&CASE_##op
86
87
#define VM_DISPATCH_TABLE() \
88
VM_DISPATCH_OP(LOP_NOP), VM_DISPATCH_OP(LOP_BREAK), VM_DISPATCH_OP(LOP_LOADNIL), VM_DISPATCH_OP(LOP_LOADB), VM_DISPATCH_OP(LOP_LOADN), \
89
VM_DISPATCH_OP(LOP_LOADK), VM_DISPATCH_OP(LOP_MOVE), VM_DISPATCH_OP(LOP_GETGLOBAL), VM_DISPATCH_OP(LOP_SETGLOBAL), \
90
VM_DISPATCH_OP(LOP_GETUPVAL), VM_DISPATCH_OP(LOP_SETUPVAL), VM_DISPATCH_OP(LOP_CLOSEUPVALS), VM_DISPATCH_OP(LOP_GETIMPORT), \
91
VM_DISPATCH_OP(LOP_GETTABLE), VM_DISPATCH_OP(LOP_SETTABLE), VM_DISPATCH_OP(LOP_GETTABLEKS), VM_DISPATCH_OP(LOP_SETTABLEKS), \
92
VM_DISPATCH_OP(LOP_GETTABLEN), VM_DISPATCH_OP(LOP_SETTABLEN), VM_DISPATCH_OP(LOP_NEWCLOSURE), VM_DISPATCH_OP(LOP_NAMECALL), \
93
VM_DISPATCH_OP(LOP_CALL), VM_DISPATCH_OP(LOP_RETURN), VM_DISPATCH_OP(LOP_JUMP), VM_DISPATCH_OP(LOP_JUMPBACK), VM_DISPATCH_OP(LOP_JUMPIF), \
94
VM_DISPATCH_OP(LOP_JUMPIFNOT), VM_DISPATCH_OP(LOP_JUMPIFEQ), VM_DISPATCH_OP(LOP_JUMPIFLE), VM_DISPATCH_OP(LOP_JUMPIFLT), \
95
VM_DISPATCH_OP(LOP_JUMPIFNOTEQ), VM_DISPATCH_OP(LOP_JUMPIFNOTLE), VM_DISPATCH_OP(LOP_JUMPIFNOTLT), VM_DISPATCH_OP(LOP_ADD), \
96
VM_DISPATCH_OP(LOP_SUB), VM_DISPATCH_OP(LOP_MUL), VM_DISPATCH_OP(LOP_DIV), VM_DISPATCH_OP(LOP_MOD), VM_DISPATCH_OP(LOP_POW), \
97
VM_DISPATCH_OP(LOP_ADDK), VM_DISPATCH_OP(LOP_SUBK), VM_DISPATCH_OP(LOP_MULK), VM_DISPATCH_OP(LOP_DIVK), VM_DISPATCH_OP(LOP_MODK), \
98
VM_DISPATCH_OP(LOP_POWK), VM_DISPATCH_OP(LOP_AND), VM_DISPATCH_OP(LOP_OR), VM_DISPATCH_OP(LOP_ANDK), VM_DISPATCH_OP(LOP_ORK), \
99
VM_DISPATCH_OP(LOP_CONCAT), VM_DISPATCH_OP(LOP_NOT), VM_DISPATCH_OP(LOP_MINUS), VM_DISPATCH_OP(LOP_LENGTH), VM_DISPATCH_OP(LOP_NEWTABLE), \
100
VM_DISPATCH_OP(LOP_DUPTABLE), VM_DISPATCH_OP(LOP_SETLIST), VM_DISPATCH_OP(LOP_FORNPREP), VM_DISPATCH_OP(LOP_FORNLOOP), \
101
VM_DISPATCH_OP(LOP_FORGLOOP), VM_DISPATCH_OP(LOP_FORGPREP_INEXT), VM_DISPATCH_OP(LOP_FASTCALL3), VM_DISPATCH_OP(LOP_FORGPREP_NEXT), \
102
VM_DISPATCH_OP(LOP_NATIVECALL), VM_DISPATCH_OP(LOP_GETVARARGS), VM_DISPATCH_OP(LOP_DUPCLOSURE), VM_DISPATCH_OP(LOP_PREPVARARGS), \
103
VM_DISPATCH_OP(LOP_LOADKX), VM_DISPATCH_OP(LOP_JUMPX), VM_DISPATCH_OP(LOP_FASTCALL), VM_DISPATCH_OP(LOP_COVERAGE), \
104
VM_DISPATCH_OP(LOP_CAPTURE), VM_DISPATCH_OP(LOP_SUBRK), VM_DISPATCH_OP(LOP_DIVRK), VM_DISPATCH_OP(LOP_FASTCALL1), \
105
VM_DISPATCH_OP(LOP_FASTCALL2), VM_DISPATCH_OP(LOP_FASTCALL2K), VM_DISPATCH_OP(LOP_FORGPREP), VM_DISPATCH_OP(LOP_JUMPXEQKNIL), \
106
VM_DISPATCH_OP(LOP_JUMPXEQKB), VM_DISPATCH_OP(LOP_JUMPXEQKN), VM_DISPATCH_OP(LOP_JUMPXEQKS), VM_DISPATCH_OP(LOP_IDIV), \
107
VM_DISPATCH_OP(LOP_IDIVK),
108
109
#if defined(__GNUC__) || defined(__clang__)
110
#define VM_USE_CGOTO 1
111
#else
112
#define VM_USE_CGOTO 0
113
#endif
114
115
/**
116
* These macros help dispatching Luau opcodes using either case
117
* statements or computed goto.
118
* VM_CASE(op) Generates either a case statement or a label
119
* VM_NEXT() fetch a byte and dispatch or jump to the beginning of the switch statement
120
* VM_CONTINUE() Use an opcode override to dispatch with computed goto or
121
* switch statement to skip a LOP_BREAK instruction.
122
*/
123
#if VM_USE_CGOTO
124
#define VM_CASE(op) CASE_##op:
125
#define VM_NEXT() goto*(SingleStep ? &&dispatch : kDispatchTable[LUAU_INSN_OP(*pc)])
126
#define VM_CONTINUE(op) goto* kDispatchTable[uint8_t(op)]
127
#else
128
#define VM_CASE(op) case op:
129
#define VM_NEXT() goto dispatch
130
#define VM_CONTINUE(op) \
131
dispatchOp = uint8_t(op); \
132
goto dispatchContinue
133
#endif
134
135
// Does VM support native execution via ExecutionCallbacks? We mostly assume it does but keep the define to make it easy to quantify the cost.
136
#define VM_HAS_NATIVE 1
137
138
LUAU_NOINLINE void luau_callhook(lua_State* L, lua_Hook hook, void* userdata)
139
{
140
ptrdiff_t base = savestack(L, L->base);
141
ptrdiff_t top = savestack(L, L->top);
142
ptrdiff_t ci_top = savestack(L, L->ci->top);
143
int status = L->status;
144
145
// if the hook is called externally on a paused thread, we need to make sure the paused thread can emit Luau calls
146
if (status == LUA_YIELD || status == LUA_BREAK)
147
{
148
L->status = 0;
149
L->base = L->ci->base;
150
}
151
152
Closure* cl = clvalue(L->ci->func);
153
154
// note: the pc expectations of the hook are matching the general "pc points to next instruction"
155
// however, for the hook to be able to continue execution from the same point, this is called with savedpc at the *current* instruction
156
// this needs to be called before luaD_checkstack in case it fails to reallocate stack
157
const Instruction* oldsavedpc = L->ci->savedpc;
158
159
if (L->ci->savedpc && L->ci->savedpc != cl->l.p->code + cl->l.p->sizecode)
160
L->ci->savedpc++;
161
162
luaD_checkstack(L, LUA_MINSTACK); // ensure minimum stack size
163
L->ci->top = L->top + LUA_MINSTACK;
164
LUAU_ASSERT(L->ci->top <= L->stack_last);
165
166
lua_Debug ar;
167
ar.currentline = cl->isC ? -1 : luaG_getline(cl->l.p, pcRel(L->ci->savedpc, cl->l.p));
168
ar.userdata = userdata;
169
170
hook(L, &ar);
171
172
L->ci->savedpc = oldsavedpc;
173
174
L->ci->top = restorestack(L, ci_top);
175
L->top = restorestack(L, top);
176
177
// note that we only restore the paused state if the hook hasn't yielded by itself
178
if (status == LUA_YIELD && L->status != LUA_YIELD)
179
{
180
L->status = LUA_YIELD;
181
L->base = restorestack(L, base);
182
}
183
else if (status == LUA_BREAK)
184
{
185
LUAU_ASSERT(L->status != LUA_BREAK); // hook shouldn't break again
186
187
L->status = LUA_BREAK;
188
L->base = restorestack(L, base);
189
}
190
}
191
192
inline bool luau_skipstep(uint8_t op)
193
{
194
return op == LOP_PREPVARARGS || op == LOP_BREAK;
195
}
196
197
template<bool SingleStep>
198
static void luau_execute(lua_State* L)
199
{
200
#if VM_USE_CGOTO
201
static const void* kDispatchTable[256] = {VM_DISPATCH_TABLE()};
202
#endif
203
204
// the critical interpreter state, stored in locals for performance
205
// the hope is that these map to registers without spilling (which is not true for x86 :/)
206
Closure* cl;
207
StkId base;
208
TValue* k;
209
const Instruction* pc;
210
211
LUAU_ASSERT(isLua(L->ci));
212
LUAU_ASSERT(L->isactive);
213
LUAU_ASSERT(!isblack(obj2gco(L))); // we don't use luaC_threadbarrier because active threads never turn black
214
215
#if VM_HAS_NATIVE
216
if ((L->ci->flags & LUA_CALLINFO_NATIVE) && !SingleStep)
217
{
218
Proto* p = clvalue(L->ci->func)->l.p;
219
LUAU_ASSERT(p->execdata);
220
221
if (L->global->ecb.enter(L, p) == 0)
222
return;
223
}
224
225
reentry:
226
#endif
227
228
LUAU_ASSERT(isLua(L->ci));
229
230
pc = L->ci->savedpc;
231
cl = clvalue(L->ci->func);
232
base = L->base;
233
k = cl->l.p->k;
234
235
VM_NEXT(); // starts the interpreter "loop"
236
237
{
238
dispatch:
239
// Note: this code doesn't always execute! on some platforms we use computed goto which bypasses all of this unless we run in single-step mode
240
// Therefore only ever put assertions here.
241
LUAU_ASSERT(base == L->base && L->base == L->ci->base);
242
LUAU_ASSERT(base <= L->top && L->top <= L->stack + L->stacksize);
243
244
// ... and singlestep logic :)
245
if (SingleStep)
246
{
247
if (L->global->cb.debugstep && !luau_skipstep(LUAU_INSN_OP(*pc)))
248
{
249
VM_PROTECT(luau_callhook(L, L->global->cb.debugstep, NULL));
250
251
// allow debugstep hook to put thread into error/yield state
252
if (L->status != 0)
253
goto exit;
254
}
255
256
#if VM_USE_CGOTO
257
VM_CONTINUE(LUAU_INSN_OP(*pc));
258
#endif
259
}
260
261
#if !VM_USE_CGOTO
262
size_t dispatchOp = LUAU_INSN_OP(*pc);
263
264
dispatchContinue:
265
switch (dispatchOp)
266
#endif
267
{
268
VM_CASE(LOP_NOP)
269
{
270
Instruction insn = *pc++;
271
LUAU_ASSERT(insn == 0);
272
VM_NEXT();
273
}
274
275
VM_CASE(LOP_LOADNIL)
276
{
277
Instruction insn = *pc++;
278
StkId ra = VM_REG(LUAU_INSN_A(insn));
279
280
setnilvalue(ra);
281
VM_NEXT();
282
}
283
284
VM_CASE(LOP_LOADB)
285
{
286
Instruction insn = *pc++;
287
StkId ra = VM_REG(LUAU_INSN_A(insn));
288
289
setbvalue(ra, LUAU_INSN_B(insn));
290
291
pc += LUAU_INSN_C(insn);
292
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
293
VM_NEXT();
294
}
295
296
VM_CASE(LOP_LOADN)
297
{
298
Instruction insn = *pc++;
299
StkId ra = VM_REG(LUAU_INSN_A(insn));
300
301
setnvalue(ra, LUAU_INSN_D(insn));
302
VM_NEXT();
303
}
304
305
VM_CASE(LOP_LOADK)
306
{
307
Instruction insn = *pc++;
308
StkId ra = VM_REG(LUAU_INSN_A(insn));
309
TValue* kv = VM_KV(LUAU_INSN_D(insn));
310
311
setobj2s(L, ra, kv);
312
VM_NEXT();
313
}
314
315
VM_CASE(LOP_MOVE)
316
{
317
Instruction insn = *pc++;
318
StkId ra = VM_REG(LUAU_INSN_A(insn));
319
StkId rb = VM_REG(LUAU_INSN_B(insn));
320
321
setobj2s(L, ra, rb);
322
VM_NEXT();
323
}
324
325
VM_CASE(LOP_GETGLOBAL)
326
{
327
Instruction insn = *pc++;
328
StkId ra = VM_REG(LUAU_INSN_A(insn));
329
uint32_t aux = *pc++;
330
TValue* kv = VM_KV(aux);
331
LUAU_ASSERT(ttisstring(kv));
332
333
// fast-path: value is in expected slot
334
LuaTable* h = cl->env;
335
int slot = LUAU_INSN_C(insn) & h->nodemask8;
336
LuaNode* n = &h->node[slot];
337
338
if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv)) && !ttisnil(gval(n)))
339
{
340
setobj2s(L, ra, gval(n));
341
VM_NEXT();
342
}
343
else
344
{
345
// slow-path, may invoke Lua calls via __index metamethod
346
TValue g;
347
sethvalue(L, &g, h);
348
L->cachedslot = slot;
349
VM_PROTECT(luaV_gettable(L, &g, kv, ra));
350
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
351
VM_PATCH_C(pc - 2, L->cachedslot);
352
VM_NEXT();
353
}
354
}
355
356
VM_CASE(LOP_SETGLOBAL)
357
{
358
Instruction insn = *pc++;
359
StkId ra = VM_REG(LUAU_INSN_A(insn));
360
uint32_t aux = *pc++;
361
TValue* kv = VM_KV(aux);
362
LUAU_ASSERT(ttisstring(kv));
363
364
// fast-path: value is in expected slot
365
LuaTable* h = cl->env;
366
int slot = LUAU_INSN_C(insn) & h->nodemask8;
367
LuaNode* n = &h->node[slot];
368
369
if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n)) && !h->readonly))
370
{
371
setobj2t(L, gval(n), ra);
372
luaC_barriert(L, h, ra);
373
VM_NEXT();
374
}
375
else
376
{
377
// slow-path, may invoke Lua calls via __newindex metamethod
378
TValue g;
379
sethvalue(L, &g, h);
380
L->cachedslot = slot;
381
VM_PROTECT(luaV_settable(L, &g, kv, ra));
382
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
383
VM_PATCH_C(pc - 2, L->cachedslot);
384
VM_NEXT();
385
}
386
}
387
388
VM_CASE(LOP_GETUPVAL)
389
{
390
Instruction insn = *pc++;
391
StkId ra = VM_REG(LUAU_INSN_A(insn));
392
TValue* ur = VM_UV(LUAU_INSN_B(insn));
393
TValue* v = ttisupval(ur) ? upvalue(ur)->v : ur;
394
395
setobj2s(L, ra, v);
396
VM_NEXT();
397
}
398
399
VM_CASE(LOP_SETUPVAL)
400
{
401
Instruction insn = *pc++;
402
StkId ra = VM_REG(LUAU_INSN_A(insn));
403
TValue* ur = VM_UV(LUAU_INSN_B(insn));
404
UpVal* uv = upvalue(ur);
405
406
setobj(L, uv->v, ra);
407
luaC_barrier(L, uv, ra);
408
VM_NEXT();
409
}
410
411
VM_CASE(LOP_CLOSEUPVALS)
412
{
413
Instruction insn = *pc++;
414
StkId ra = VM_REG(LUAU_INSN_A(insn));
415
416
if (L->openupval && L->openupval->v >= ra)
417
luaF_close(L, ra);
418
VM_NEXT();
419
}
420
421
VM_CASE(LOP_GETIMPORT)
422
{
423
Instruction insn = *pc++;
424
StkId ra = VM_REG(LUAU_INSN_A(insn));
425
TValue* kv = VM_KV(LUAU_INSN_D(insn));
426
427
// fast-path: import resolution was successful and closure environment is "safe" for import
428
if (!ttisnil(kv) && cl->env->safeenv)
429
{
430
setobj2s(L, ra, kv);
431
pc++; // skip over AUX
432
VM_NEXT();
433
}
434
else
435
{
436
uint32_t aux = *pc++;
437
438
VM_PROTECT(luaV_getimport(L, cl->env, k, ra, aux, /* propagatenil= */ false));
439
VM_NEXT();
440
}
441
}
442
443
VM_CASE(LOP_GETTABLEKS)
444
{
445
Instruction insn = *pc++;
446
StkId ra = VM_REG(LUAU_INSN_A(insn));
447
StkId rb = VM_REG(LUAU_INSN_B(insn));
448
uint32_t aux = *pc++;
449
TValue* kv = VM_KV(aux);
450
LUAU_ASSERT(ttisstring(kv));
451
452
// fast-path: built-in table
453
if (LUAU_LIKELY(ttistable(rb)))
454
{
455
LuaTable* h = hvalue(rb);
456
457
int slot = LUAU_INSN_C(insn) & h->nodemask8;
458
LuaNode* n = &h->node[slot];
459
460
// fast-path: value is in expected slot
461
if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n))))
462
{
463
setobj2s(L, ra, gval(n));
464
VM_NEXT();
465
}
466
else if (!h->metatable)
467
{
468
// fast-path: value is not in expected slot, but the table lookup doesn't involve metatable
469
const TValue* res = luaH_getstr(h, tsvalue(kv));
470
471
if (res != luaO_nilobject)
472
{
473
int cachedslot = gval2slot(h, res);
474
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
475
VM_PATCH_C(pc - 2, cachedslot);
476
}
477
478
setobj2s(L, ra, res);
479
VM_NEXT();
480
}
481
else
482
{
483
// slow-path, may invoke Lua calls via __index metamethod
484
L->cachedslot = slot;
485
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
486
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
487
VM_PATCH_C(pc - 2, L->cachedslot);
488
VM_NEXT();
489
}
490
}
491
else
492
{
493
// fast-path: user data with C __index TM
494
const TValue* fn = 0;
495
if (ttisuserdata(rb) && (fn = fasttm(L, uvalue(rb)->metatable, TM_INDEX)) && ttisfunction(fn) && clvalue(fn)->isC)
496
{
497
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
498
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
499
StkId top = L->top;
500
setobj2s(L, top + 0, fn);
501
setobj2s(L, top + 1, rb);
502
setobj2s(L, top + 2, kv);
503
L->top = top + 3;
504
505
L->cachedslot = LUAU_INSN_C(insn);
506
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
507
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
508
VM_PATCH_C(pc - 2, L->cachedslot);
509
VM_NEXT();
510
}
511
else if (ttisvector(rb))
512
{
513
// fast-path: quick case-insensitive comparison with "X"/"Y"/"Z"
514
const char* name = getstr(tsvalue(kv));
515
int ic = (name[0] | ' ') - 'x';
516
517
#if LUA_VECTOR_SIZE == 4
518
// 'w' is before 'x' in ascii, so ic is -1 when indexing with 'w'
519
if (ic == -1)
520
ic = 3;
521
#endif
522
523
if (unsigned(ic) < LUA_VECTOR_SIZE && name[1] == '\0')
524
{
525
const float* v = vvalue(rb); // silences ubsan when indexing v[]
526
setnvalue(ra, v[ic]);
527
VM_NEXT();
528
}
529
530
fn = fasttm(L, L->global->mt[LUA_TVECTOR], TM_INDEX);
531
532
if (fn && ttisfunction(fn) && clvalue(fn)->isC)
533
{
534
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
535
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
536
StkId top = L->top;
537
setobj2s(L, top + 0, fn);
538
setobj2s(L, top + 1, rb);
539
setobj2s(L, top + 2, kv);
540
L->top = top + 3;
541
542
L->cachedslot = LUAU_INSN_C(insn);
543
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
544
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
545
VM_PATCH_C(pc - 2, L->cachedslot);
546
VM_NEXT();
547
}
548
549
// fall through to slow path
550
}
551
552
// fall through to slow path
553
}
554
555
// slow-path, may invoke Lua calls via __index metamethod
556
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
557
VM_NEXT();
558
}
559
560
VM_CASE(LOP_SETTABLEKS)
561
{
562
Instruction insn = *pc++;
563
StkId ra = VM_REG(LUAU_INSN_A(insn));
564
StkId rb = VM_REG(LUAU_INSN_B(insn));
565
uint32_t aux = *pc++;
566
TValue* kv = VM_KV(aux);
567
LUAU_ASSERT(ttisstring(kv));
568
569
// fast-path: built-in table
570
if (LUAU_LIKELY(ttistable(rb)))
571
{
572
LuaTable* h = hvalue(rb);
573
574
int slot = LUAU_INSN_C(insn) & h->nodemask8;
575
LuaNode* n = &h->node[slot];
576
577
// fast-path: value is in expected slot
578
if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n)) && !h->readonly))
579
{
580
setobj2t(L, gval(n), ra);
581
luaC_barriert(L, h, ra);
582
VM_NEXT();
583
}
584
else if (fastnotm(h->metatable, TM_NEWINDEX) && !h->readonly)
585
{
586
VM_PROTECT_PC(); // set may fail
587
588
TValue* res = luaH_setstr(L, h, tsvalue(kv));
589
int cachedslot = gval2slot(h, res);
590
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
591
VM_PATCH_C(pc - 2, cachedslot);
592
setobj2t(L, res, ra);
593
luaC_barriert(L, h, ra);
594
VM_NEXT();
595
}
596
else
597
{
598
// slow-path, may invoke Lua calls via __newindex metamethod
599
L->cachedslot = slot;
600
VM_PROTECT(luaV_settable(L, rb, kv, ra));
601
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
602
VM_PATCH_C(pc - 2, L->cachedslot);
603
VM_NEXT();
604
}
605
}
606
else
607
{
608
// fast-path: user data with C __newindex TM
609
const TValue* fn = 0;
610
if (ttisuserdata(rb) && (fn = fasttm(L, uvalue(rb)->metatable, TM_NEWINDEX)) && ttisfunction(fn) && clvalue(fn)->isC)
611
{
612
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
613
LUAU_ASSERT(L->top + 4 < L->stack + L->stacksize);
614
StkId top = L->top;
615
setobj2s(L, top + 0, fn);
616
setobj2s(L, top + 1, rb);
617
setobj2s(L, top + 2, kv);
618
setobj2s(L, top + 3, ra);
619
L->top = top + 4;
620
621
L->cachedslot = LUAU_INSN_C(insn);
622
VM_PROTECT(luaV_callTM(L, 3, -1));
623
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
624
VM_PATCH_C(pc - 2, L->cachedslot);
625
VM_NEXT();
626
}
627
else
628
{
629
// slow-path, may invoke Lua calls via __newindex metamethod
630
VM_PROTECT(luaV_settable(L, rb, kv, ra));
631
VM_NEXT();
632
}
633
}
634
}
635
636
VM_CASE(LOP_GETTABLE)
637
{
638
Instruction insn = *pc++;
639
StkId ra = VM_REG(LUAU_INSN_A(insn));
640
StkId rb = VM_REG(LUAU_INSN_B(insn));
641
StkId rc = VM_REG(LUAU_INSN_C(insn));
642
643
// fast-path: array lookup
644
if (ttistable(rb) && ttisnumber(rc))
645
{
646
LuaTable* h = hvalue(rb);
647
648
double indexd = nvalue(rc);
649
int index = int(indexd);
650
651
// index has to be an exact integer and in-bounds for the array portion
652
if (LUAU_LIKELY(unsigned(index) - 1 < unsigned(h->sizearray) && !h->metatable && double(index) == indexd))
653
{
654
setobj2s(L, ra, &h->array[unsigned(index - 1)]);
655
VM_NEXT();
656
}
657
658
// fall through to slow path
659
}
660
661
// slow-path: handles out of bounds array lookups, non-integer numeric keys, non-array table lookup, __index MT calls
662
VM_PROTECT(luaV_gettable(L, rb, rc, ra));
663
VM_NEXT();
664
}
665
666
VM_CASE(LOP_SETTABLE)
667
{
668
Instruction insn = *pc++;
669
StkId ra = VM_REG(LUAU_INSN_A(insn));
670
StkId rb = VM_REG(LUAU_INSN_B(insn));
671
StkId rc = VM_REG(LUAU_INSN_C(insn));
672
673
// fast-path: array assign
674
if (ttistable(rb) && ttisnumber(rc))
675
{
676
LuaTable* h = hvalue(rb);
677
678
double indexd = nvalue(rc);
679
int index = int(indexd);
680
681
// index has to be an exact integer and in-bounds for the array portion
682
if (LUAU_LIKELY(unsigned(index) - 1 < unsigned(h->sizearray) && !h->metatable && !h->readonly && double(index) == indexd))
683
{
684
setobj2t(L, &h->array[unsigned(index - 1)], ra);
685
luaC_barriert(L, h, ra);
686
VM_NEXT();
687
}
688
689
// fall through to slow path
690
}
691
692
// slow-path: handles out of bounds array assignments, non-integer numeric keys, non-array table access, __newindex MT calls
693
VM_PROTECT(luaV_settable(L, rb, rc, ra));
694
VM_NEXT();
695
}
696
697
VM_CASE(LOP_GETTABLEN)
698
{
699
Instruction insn = *pc++;
700
StkId ra = VM_REG(LUAU_INSN_A(insn));
701
StkId rb = VM_REG(LUAU_INSN_B(insn));
702
int c = LUAU_INSN_C(insn);
703
704
// fast-path: array lookup
705
if (ttistable(rb))
706
{
707
LuaTable* h = hvalue(rb);
708
709
if (LUAU_LIKELY(unsigned(c) < unsigned(h->sizearray) && !h->metatable))
710
{
711
setobj2s(L, ra, &h->array[c]);
712
VM_NEXT();
713
}
714
715
// fall through to slow path
716
}
717
718
// slow-path: handles out of bounds array lookups
719
TValue n;
720
setnvalue(&n, c + 1);
721
VM_PROTECT(luaV_gettable(L, rb, &n, ra));
722
VM_NEXT();
723
}
724
725
VM_CASE(LOP_SETTABLEN)
726
{
727
Instruction insn = *pc++;
728
StkId ra = VM_REG(LUAU_INSN_A(insn));
729
StkId rb = VM_REG(LUAU_INSN_B(insn));
730
int c = LUAU_INSN_C(insn);
731
732
// fast-path: array assign
733
if (ttistable(rb))
734
{
735
LuaTable* h = hvalue(rb);
736
737
if (LUAU_LIKELY(unsigned(c) < unsigned(h->sizearray) && !h->metatable && !h->readonly))
738
{
739
setobj2t(L, &h->array[c], ra);
740
luaC_barriert(L, h, ra);
741
VM_NEXT();
742
}
743
744
// fall through to slow path
745
}
746
747
// slow-path: handles out of bounds array lookups
748
TValue n;
749
setnvalue(&n, c + 1);
750
VM_PROTECT(luaV_settable(L, rb, &n, ra));
751
VM_NEXT();
752
}
753
754
VM_CASE(LOP_NEWCLOSURE)
755
{
756
Instruction insn = *pc++;
757
StkId ra = VM_REG(LUAU_INSN_A(insn));
758
759
Proto* pv = cl->l.p->p[LUAU_INSN_D(insn)];
760
LUAU_ASSERT(unsigned(LUAU_INSN_D(insn)) < unsigned(cl->l.p->sizep));
761
762
VM_PROTECT_PC(); // luaF_newLclosure may fail due to OOM
763
764
// note: we save closure to stack early in case the code below wants to capture it by value
765
Closure* ncl = luaF_newLclosure(L, pv->nups, cl->env, pv);
766
setclvalue(L, ra, ncl);
767
768
for (int ui = 0; ui < pv->nups; ++ui)
769
{
770
Instruction uinsn = *pc++;
771
LUAU_ASSERT(LUAU_INSN_OP(uinsn) == LOP_CAPTURE);
772
773
switch (LUAU_INSN_A(uinsn))
774
{
775
case LCT_VAL:
776
setobj(L, &ncl->l.uprefs[ui], VM_REG(LUAU_INSN_B(uinsn)));
777
break;
778
779
case LCT_REF:
780
setupvalue(L, &ncl->l.uprefs[ui], luaF_findupval(L, VM_REG(LUAU_INSN_B(uinsn))));
781
break;
782
783
case LCT_UPVAL:
784
setobj(L, &ncl->l.uprefs[ui], VM_UV(LUAU_INSN_B(uinsn)));
785
break;
786
787
default:
788
LUAU_ASSERT(!"Unknown upvalue capture type");
789
LUAU_UNREACHABLE(); // improves switch() codegen by eliding opcode bounds checks
790
}
791
}
792
793
VM_PROTECT(luaC_checkGC(L));
794
VM_NEXT();
795
}
796
797
VM_CASE(LOP_NAMECALL)
798
{
799
Instruction insn = *pc++;
800
StkId ra = VM_REG(LUAU_INSN_A(insn));
801
StkId rb = VM_REG(LUAU_INSN_B(insn));
802
uint32_t aux = *pc++;
803
TValue* kv = VM_KV(aux);
804
LUAU_ASSERT(ttisstring(kv));
805
806
if (LUAU_LIKELY(ttistable(rb)))
807
{
808
LuaTable* h = hvalue(rb);
809
// note: we can't use nodemask8 here because we need to query the main position of the table, and 8-bit nodemask8 only works
810
// for predictive lookups
811
LuaNode* n = &h->node[tsvalue(kv)->hash & (sizenode(h) - 1)];
812
813
const TValue* mt = 0;
814
const LuaNode* mtn = 0;
815
816
// fast-path: key is in the table in expected slot
817
if (ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n)))
818
{
819
// note: order of copies allows rb to alias ra+1 or ra
820
setobj2s(L, ra + 1, rb);
821
setobj2s(L, ra, gval(n));
822
}
823
// fast-path: key is absent from the base, table has an __index table, and it has the result in the expected slot
824
else if (gnext(n) == 0 && (mt = fasttm(L, hvalue(rb)->metatable, TM_INDEX)) && ttistable(mt) &&
825
(mtn = &hvalue(mt)->node[LUAU_INSN_C(insn) & hvalue(mt)->nodemask8]) && ttisstring(gkey(mtn)) &&
826
tsvalue(gkey(mtn)) == tsvalue(kv) && !ttisnil(gval(mtn)))
827
{
828
// note: order of copies allows rb to alias ra+1 or ra
829
setobj2s(L, ra + 1, rb);
830
setobj2s(L, ra, gval(mtn));
831
}
832
else
833
{
834
// slow-path: handles full table lookup
835
setobj2s(L, ra + 1, rb);
836
L->cachedslot = LUAU_INSN_C(insn);
837
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
838
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
839
VM_PATCH_C(pc - 2, L->cachedslot);
840
// recompute ra since stack might have been reallocated
841
ra = VM_REG(LUAU_INSN_A(insn));
842
if (ttisnil(ra))
843
luaG_methoderror(L, ra + 1, tsvalue(kv));
844
}
845
}
846
else
847
{
848
LuaTable* mt = ttisuserdata(rb) ? uvalue(rb)->metatable : L->global->mt[ttype(rb)];
849
const TValue* tmi = 0;
850
851
// fast-path: metatable with __namecall
852
if (const TValue* fn = fasttm(L, mt, TM_NAMECALL))
853
{
854
// note: order of copies allows rb to alias ra+1 or ra
855
setobj2s(L, ra + 1, rb);
856
setobj2s(L, ra, fn);
857
858
L->namecall = tsvalue(kv);
859
}
860
else if ((tmi = fasttm(L, mt, TM_INDEX)) && ttistable(tmi))
861
{
862
LuaTable* h = hvalue(tmi);
863
int slot = LUAU_INSN_C(insn) & h->nodemask8;
864
LuaNode* n = &h->node[slot];
865
866
// fast-path: metatable with __index that has method in expected slot
867
if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n))))
868
{
869
// note: order of copies allows rb to alias ra+1 or ra
870
setobj2s(L, ra + 1, rb);
871
setobj2s(L, ra, gval(n));
872
}
873
else
874
{
875
// slow-path: handles slot mismatch
876
setobj2s(L, ra + 1, rb);
877
L->cachedslot = slot;
878
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
879
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
880
VM_PATCH_C(pc - 2, L->cachedslot);
881
// recompute ra since stack might have been reallocated
882
ra = VM_REG(LUAU_INSN_A(insn));
883
if (ttisnil(ra))
884
luaG_methoderror(L, ra + 1, tsvalue(kv));
885
}
886
}
887
else
888
{
889
// slow-path: handles non-table __index
890
setobj2s(L, ra + 1, rb);
891
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
892
// recompute ra since stack might have been reallocated
893
ra = VM_REG(LUAU_INSN_A(insn));
894
if (ttisnil(ra))
895
luaG_methoderror(L, ra + 1, tsvalue(kv));
896
}
897
}
898
899
// intentional fallthrough to CALL
900
LUAU_ASSERT(LUAU_INSN_OP(*pc) == LOP_CALL);
901
}
902
903
VM_CASE(LOP_CALL)
904
{
905
VM_INTERRUPT();
906
Instruction insn = *pc++;
907
StkId ra = VM_REG(LUAU_INSN_A(insn));
908
909
int nparams = LUAU_INSN_B(insn) - 1;
910
int nresults = LUAU_INSN_C(insn) - 1;
911
912
StkId argtop = L->top;
913
argtop = (nparams == LUA_MULTRET) ? argtop : ra + 1 + nparams;
914
915
// slow-path: not a function call
916
if (LUAU_UNLIKELY(!ttisfunction(ra)))
917
{
918
VM_PROTECT_PC(); // luaV_tryfuncTM may fail
919
920
luaV_tryfuncTM(L, ra);
921
argtop++; // __call adds an extra self
922
}
923
924
Closure* ccl = clvalue(ra);
925
L->ci->savedpc = pc;
926
927
CallInfo* ci = incr_ci(L);
928
ci->func = ra;
929
ci->base = ra + 1;
930
ci->top = argtop + ccl->stacksize; // note: technically UB since we haven't reallocated the stack yet
931
ci->savedpc = NULL;
932
ci->flags = 0;
933
ci->nresults = nresults;
934
935
L->base = ci->base;
936
L->top = argtop;
937
938
// note: this reallocs stack, but we don't need to VM_PROTECT this
939
// this is because we're going to modify base/savedpc manually anyhow
940
// crucially, we can't use ra/argtop after this line
941
luaD_checkstackfornewci(L, ccl->stacksize);
942
943
LUAU_ASSERT(ci->top <= L->stack_last);
944
945
if (!ccl->isC)
946
{
947
Proto* p = ccl->l.p;
948
949
// fill unused parameters with nil
950
StkId argi = L->top;
951
StkId argend = L->base + p->numparams;
952
while (argi < argend)
953
setnilvalue(argi++); // complete missing arguments
954
L->top = p->is_vararg ? argi : ci->top;
955
956
// reentry
957
// codeentry may point to NATIVECALL instruction when proto is compiled to native code
958
// this will result in execution continuing in native code, and is equivalent to if (p->execdata) but has no additional overhead
959
// note that p->codeentry may point *outside* of p->code..p->code+p->sizecode, but that pointer never gets saved to savedpc.
960
pc = SingleStep ? p->code : p->codeentry;
961
cl = ccl;
962
base = L->base;
963
k = p->k;
964
VM_NEXT();
965
}
966
else
967
{
968
lua_CFunction func = ccl->c.f;
969
int n = func(L);
970
971
// yield
972
if (n < 0)
973
goto exit;
974
975
// ci is our callinfo, cip is our parent
976
CallInfo* ci = L->ci;
977
CallInfo* cip = ci - 1;
978
979
// copy return values into parent stack (but only up to nresults!), fill the rest with nil
980
// note: in MULTRET context nresults starts as -1 so i != 0 condition never activates intentionally
981
StkId res = ci->func;
982
StkId vali = L->top - n;
983
StkId valend = L->top;
984
985
int i;
986
for (i = nresults; i != 0 && vali < valend; i--)
987
setobj2s(L, res++, vali++);
988
while (i-- > 0)
989
setnilvalue(res++);
990
991
// pop the stack frame
992
L->ci = cip;
993
L->base = cip->base;
994
L->top = (nresults == LUA_MULTRET) ? res : cip->top;
995
996
base = L->base; // stack may have been reallocated, so we need to refresh base ptr
997
VM_NEXT();
998
}
999
}
1000
1001
VM_CASE(LOP_RETURN)
1002
{
1003
VM_INTERRUPT();
1004
Instruction insn = *pc++;
1005
StkId ra = &base[LUAU_INSN_A(insn)]; // note: this can point to L->top if b == LUA_MULTRET making VM_REG unsafe to use
1006
int b = LUAU_INSN_B(insn) - 1;
1007
1008
// ci is our callinfo, cip is our parent
1009
CallInfo* ci = L->ci;
1010
CallInfo* cip = ci - 1;
1011
1012
StkId res = ci->func; // note: we assume CALL always puts func+args and expects results to start at func
1013
1014
StkId vali = ra;
1015
StkId valend =
1016
(b == LUA_MULTRET) ? L->top : ra + b; // copy as much as possible for MULTRET calls, and only as much as needed otherwise
1017
1018
int nresults = ci->nresults;
1019
1020
// copy return values into parent stack (but only up to nresults!), fill the rest with nil
1021
// note: in MULTRET context nresults starts as -1 so i != 0 condition never activates intentionally
1022
int i;
1023
for (i = nresults; i != 0 && vali < valend; i--)
1024
setobj2s(L, res++, vali++);
1025
while (i-- > 0)
1026
setnilvalue(res++);
1027
1028
// pop the stack frame
1029
L->ci = cip;
1030
L->base = cip->base;
1031
L->top = (nresults == LUA_MULTRET) ? res : cip->top;
1032
1033
// we're done!
1034
if (LUAU_UNLIKELY(ci->flags & LUA_CALLINFO_RETURN))
1035
{
1036
goto exit;
1037
}
1038
1039
LUAU_ASSERT(isLua(L->ci));
1040
1041
Closure* nextcl = clvalue(cip->func);
1042
Proto* nextproto = nextcl->l.p;
1043
1044
#if VM_HAS_NATIVE
1045
if (LUAU_UNLIKELY((cip->flags & LUA_CALLINFO_NATIVE) && !SingleStep))
1046
{
1047
if (L->global->ecb.enter(L, nextproto) == 1)
1048
goto reentry;
1049
else
1050
goto exit;
1051
}
1052
#endif
1053
1054
// reentry
1055
pc = cip->savedpc;
1056
cl = nextcl;
1057
base = L->base;
1058
k = nextproto->k;
1059
VM_NEXT();
1060
}
1061
1062
VM_CASE(LOP_JUMP)
1063
{
1064
Instruction insn = *pc++;
1065
1066
pc += LUAU_INSN_D(insn);
1067
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1068
VM_NEXT();
1069
}
1070
1071
VM_CASE(LOP_JUMPIF)
1072
{
1073
Instruction insn = *pc++;
1074
StkId ra = VM_REG(LUAU_INSN_A(insn));
1075
1076
pc += l_isfalse(ra) ? 0 : LUAU_INSN_D(insn);
1077
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1078
VM_NEXT();
1079
}
1080
1081
VM_CASE(LOP_JUMPIFNOT)
1082
{
1083
Instruction insn = *pc++;
1084
StkId ra = VM_REG(LUAU_INSN_A(insn));
1085
1086
pc += l_isfalse(ra) ? LUAU_INSN_D(insn) : 0;
1087
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1088
VM_NEXT();
1089
}
1090
1091
VM_CASE(LOP_JUMPIFEQ)
1092
{
1093
Instruction insn = *pc++;
1094
uint32_t aux = *pc;
1095
StkId ra = VM_REG(LUAU_INSN_A(insn));
1096
StkId rb = VM_REG(aux);
1097
1098
// Note that all jumps below jump by 1 in the "false" case to skip over aux
1099
if (ttype(ra) == ttype(rb))
1100
{
1101
switch (ttype(ra))
1102
{
1103
case LUA_TNIL:
1104
pc += LUAU_INSN_D(insn);
1105
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1106
VM_NEXT();
1107
1108
case LUA_TBOOLEAN:
1109
pc += bvalue(ra) == bvalue(rb) ? LUAU_INSN_D(insn) : 1;
1110
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1111
VM_NEXT();
1112
1113
case LUA_TLIGHTUSERDATA:
1114
pc += (pvalue(ra) == pvalue(rb) && lightuserdatatag(ra) == lightuserdatatag(rb)) ? LUAU_INSN_D(insn) : 1;
1115
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1116
VM_NEXT();
1117
1118
case LUA_TNUMBER:
1119
pc += nvalue(ra) == nvalue(rb) ? LUAU_INSN_D(insn) : 1;
1120
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1121
VM_NEXT();
1122
1123
case LUA_TVECTOR:
1124
pc += luai_veceq(vvalue(ra), vvalue(rb)) ? LUAU_INSN_D(insn) : 1;
1125
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1126
VM_NEXT();
1127
1128
case LUA_TSTRING:
1129
case LUA_TFUNCTION:
1130
case LUA_TTHREAD:
1131
case LUA_TBUFFER:
1132
pc += gcvalue(ra) == gcvalue(rb) ? LUAU_INSN_D(insn) : 1;
1133
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1134
VM_NEXT();
1135
1136
case LUA_TTABLE:
1137
// fast-path: same metatable, no EQ metamethod
1138
if (hvalue(ra)->metatable == hvalue(rb)->metatable)
1139
{
1140
const TValue* fn = fasttm(L, hvalue(ra)->metatable, TM_EQ);
1141
1142
if (!fn)
1143
{
1144
pc += hvalue(ra) == hvalue(rb) ? LUAU_INSN_D(insn) : 1;
1145
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1146
VM_NEXT();
1147
}
1148
}
1149
// slow path after switch()
1150
break;
1151
1152
case LUA_TUSERDATA:
1153
// fast-path: same metatable, no EQ metamethod or C metamethod
1154
if (uvalue(ra)->metatable == uvalue(rb)->metatable)
1155
{
1156
const TValue* fn = fasttm(L, uvalue(ra)->metatable, TM_EQ);
1157
1158
if (!fn)
1159
{
1160
pc += uvalue(ra) == uvalue(rb) ? LUAU_INSN_D(insn) : 1;
1161
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1162
VM_NEXT();
1163
}
1164
else if (ttisfunction(fn) && clvalue(fn)->isC)
1165
{
1166
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
1167
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
1168
StkId top = L->top;
1169
setobj2s(L, top + 0, fn);
1170
setobj2s(L, top + 1, ra);
1171
setobj2s(L, top + 2, rb);
1172
int res = int(top - base);
1173
L->top = top + 3;
1174
1175
VM_PROTECT(luaV_callTM(L, 2, res));
1176
pc += !l_isfalse(&base[res]) ? LUAU_INSN_D(insn) : 1;
1177
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1178
VM_NEXT();
1179
}
1180
}
1181
// slow path after switch()
1182
break;
1183
1184
case LUA_TINTEGER:
1185
if (FFlag::LuauIntegerType)
1186
{
1187
pc += lvalue(ra) == lvalue(rb) ? LUAU_INSN_D(insn) : 1;
1188
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1189
VM_NEXT();
1190
}
1191
[[fallthrough]];
1192
1193
default:
1194
LUAU_ASSERT(!"Unknown value type");
1195
LUAU_UNREACHABLE(); // improves switch() codegen by eliding opcode bounds checks
1196
}
1197
1198
// slow-path: tables with metatables and userdata values
1199
// note that we don't have a fast path for userdata values without metatables, since that's very rare
1200
int res;
1201
VM_PROTECT(res = luaV_equalval(L, ra, rb));
1202
1203
pc += (res == 1) ? LUAU_INSN_D(insn) : 1;
1204
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1205
VM_NEXT();
1206
}
1207
else
1208
{
1209
pc += 1;
1210
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1211
VM_NEXT();
1212
}
1213
}
1214
1215
VM_CASE(LOP_JUMPIFNOTEQ)
1216
{
1217
Instruction insn = *pc++;
1218
uint32_t aux = *pc;
1219
StkId ra = VM_REG(LUAU_INSN_A(insn));
1220
StkId rb = VM_REG(aux);
1221
1222
// Note that all jumps below jump by 1 in the "true" case to skip over aux
1223
if (ttype(ra) == ttype(rb))
1224
{
1225
switch (ttype(ra))
1226
{
1227
case LUA_TNIL:
1228
pc += 1;
1229
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1230
VM_NEXT();
1231
1232
case LUA_TBOOLEAN:
1233
pc += bvalue(ra) != bvalue(rb) ? LUAU_INSN_D(insn) : 1;
1234
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1235
VM_NEXT();
1236
1237
case LUA_TLIGHTUSERDATA:
1238
pc += (pvalue(ra) != pvalue(rb) || lightuserdatatag(ra) != lightuserdatatag(rb)) ? LUAU_INSN_D(insn) : 1;
1239
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1240
VM_NEXT();
1241
1242
case LUA_TNUMBER:
1243
pc += nvalue(ra) != nvalue(rb) ? LUAU_INSN_D(insn) : 1;
1244
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1245
VM_NEXT();
1246
1247
case LUA_TVECTOR:
1248
pc += !luai_veceq(vvalue(ra), vvalue(rb)) ? LUAU_INSN_D(insn) : 1;
1249
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1250
VM_NEXT();
1251
1252
case LUA_TSTRING:
1253
case LUA_TFUNCTION:
1254
case LUA_TTHREAD:
1255
case LUA_TBUFFER:
1256
pc += gcvalue(ra) != gcvalue(rb) ? LUAU_INSN_D(insn) : 1;
1257
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1258
VM_NEXT();
1259
1260
case LUA_TTABLE:
1261
// fast-path: same metatable, no EQ metamethod
1262
if (hvalue(ra)->metatable == hvalue(rb)->metatable)
1263
{
1264
const TValue* fn = fasttm(L, hvalue(ra)->metatable, TM_EQ);
1265
1266
if (!fn)
1267
{
1268
pc += hvalue(ra) != hvalue(rb) ? LUAU_INSN_D(insn) : 1;
1269
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1270
VM_NEXT();
1271
}
1272
}
1273
// slow path after switch()
1274
break;
1275
1276
case LUA_TUSERDATA:
1277
// fast-path: same metatable, no EQ metamethod or C metamethod
1278
if (uvalue(ra)->metatable == uvalue(rb)->metatable)
1279
{
1280
const TValue* fn = fasttm(L, uvalue(ra)->metatable, TM_EQ);
1281
1282
if (!fn)
1283
{
1284
pc += uvalue(ra) != uvalue(rb) ? LUAU_INSN_D(insn) : 1;
1285
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1286
VM_NEXT();
1287
}
1288
else if (ttisfunction(fn) && clvalue(fn)->isC)
1289
{
1290
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
1291
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
1292
StkId top = L->top;
1293
setobj2s(L, top + 0, fn);
1294
setobj2s(L, top + 1, ra);
1295
setobj2s(L, top + 2, rb);
1296
int res = int(top - base);
1297
L->top = top + 3;
1298
1299
VM_PROTECT(luaV_callTM(L, 2, res));
1300
pc += l_isfalse(&base[res]) ? LUAU_INSN_D(insn) : 1;
1301
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1302
VM_NEXT();
1303
}
1304
}
1305
// slow path after switch()
1306
break;
1307
1308
case LUA_TINTEGER:
1309
if (FFlag::LuauIntegerType)
1310
{
1311
pc += lvalue(ra) != lvalue(rb) ? LUAU_INSN_D(insn) : 1;
1312
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1313
VM_NEXT();
1314
}
1315
[[fallthrough]];
1316
1317
default:
1318
LUAU_ASSERT(!"Unknown value type");
1319
LUAU_UNREACHABLE(); // improves switch() codegen by eliding opcode bounds checks
1320
}
1321
1322
// slow-path: tables with metatables and userdata values
1323
// note that we don't have a fast path for userdata values without metatables, since that's very rare
1324
int res;
1325
VM_PROTECT(res = luaV_equalval(L, ra, rb));
1326
1327
pc += (res == 0) ? LUAU_INSN_D(insn) : 1;
1328
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1329
VM_NEXT();
1330
}
1331
else
1332
{
1333
pc += LUAU_INSN_D(insn);
1334
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1335
VM_NEXT();
1336
}
1337
}
1338
1339
VM_CASE(LOP_JUMPIFLE)
1340
{
1341
Instruction insn = *pc++;
1342
uint32_t aux = *pc;
1343
StkId ra = VM_REG(LUAU_INSN_A(insn));
1344
StkId rb = VM_REG(aux);
1345
1346
// fast-path: number
1347
// Note that all jumps below jump by 1 in the "false" case to skip over aux
1348
if (LUAU_LIKELY(ttisnumber(ra) && ttisnumber(rb)))
1349
{
1350
pc += nvalue(ra) <= nvalue(rb) ? LUAU_INSN_D(insn) : 1;
1351
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1352
VM_NEXT();
1353
}
1354
// fast-path: string
1355
else if (ttisstring(ra) && ttisstring(rb))
1356
{
1357
pc += luaV_strcmp(tsvalue(ra), tsvalue(rb)) <= 0 ? LUAU_INSN_D(insn) : 1;
1358
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1359
VM_NEXT();
1360
}
1361
else
1362
{
1363
int res;
1364
VM_PROTECT(res = luaV_lessequal(L, ra, rb));
1365
1366
pc += (res == 1) ? LUAU_INSN_D(insn) : 1;
1367
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1368
VM_NEXT();
1369
}
1370
}
1371
1372
VM_CASE(LOP_JUMPIFNOTLE)
1373
{
1374
Instruction insn = *pc++;
1375
uint32_t aux = *pc;
1376
StkId ra = VM_REG(LUAU_INSN_A(insn));
1377
StkId rb = VM_REG(aux);
1378
1379
// fast-path: number
1380
// Note that all jumps below jump by 1 in the "true" case to skip over aux
1381
if (LUAU_LIKELY(ttisnumber(ra) && ttisnumber(rb)))
1382
{
1383
pc += !(nvalue(ra) <= nvalue(rb)) ? LUAU_INSN_D(insn) : 1;
1384
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1385
VM_NEXT();
1386
}
1387
// fast-path: string
1388
else if (ttisstring(ra) && ttisstring(rb))
1389
{
1390
pc += !(luaV_strcmp(tsvalue(ra), tsvalue(rb)) <= 0) ? LUAU_INSN_D(insn) : 1;
1391
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1392
VM_NEXT();
1393
}
1394
else
1395
{
1396
int res;
1397
VM_PROTECT(res = luaV_lessequal(L, ra, rb));
1398
1399
pc += (res == 0) ? LUAU_INSN_D(insn) : 1;
1400
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1401
VM_NEXT();
1402
}
1403
}
1404
1405
VM_CASE(LOP_JUMPIFLT)
1406
{
1407
Instruction insn = *pc++;
1408
uint32_t aux = *pc;
1409
StkId ra = VM_REG(LUAU_INSN_A(insn));
1410
StkId rb = VM_REG(aux);
1411
1412
// fast-path: number
1413
// Note that all jumps below jump by 1 in the "false" case to skip over aux
1414
if (LUAU_LIKELY(ttisnumber(ra) && ttisnumber(rb)))
1415
{
1416
pc += nvalue(ra) < nvalue(rb) ? LUAU_INSN_D(insn) : 1;
1417
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1418
VM_NEXT();
1419
}
1420
// fast-path: string
1421
else if (ttisstring(ra) && ttisstring(rb))
1422
{
1423
pc += luaV_strcmp(tsvalue(ra), tsvalue(rb)) < 0 ? LUAU_INSN_D(insn) : 1;
1424
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1425
VM_NEXT();
1426
}
1427
else
1428
{
1429
int res;
1430
VM_PROTECT(res = luaV_lessthan(L, ra, rb));
1431
1432
pc += (res == 1) ? LUAU_INSN_D(insn) : 1;
1433
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1434
VM_NEXT();
1435
}
1436
}
1437
1438
VM_CASE(LOP_JUMPIFNOTLT)
1439
{
1440
Instruction insn = *pc++;
1441
uint32_t aux = *pc;
1442
StkId ra = VM_REG(LUAU_INSN_A(insn));
1443
StkId rb = VM_REG(aux);
1444
1445
// fast-path: number
1446
// Note that all jumps below jump by 1 in the "true" case to skip over aux
1447
if (LUAU_LIKELY(ttisnumber(ra) && ttisnumber(rb)))
1448
{
1449
pc += !(nvalue(ra) < nvalue(rb)) ? LUAU_INSN_D(insn) : 1;
1450
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1451
VM_NEXT();
1452
}
1453
// fast-path: string
1454
else if (ttisstring(ra) && ttisstring(rb))
1455
{
1456
pc += !(luaV_strcmp(tsvalue(ra), tsvalue(rb)) < 0) ? LUAU_INSN_D(insn) : 1;
1457
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1458
VM_NEXT();
1459
}
1460
else
1461
{
1462
int res;
1463
VM_PROTECT(res = luaV_lessthan(L, ra, rb));
1464
1465
pc += (res == 0) ? LUAU_INSN_D(insn) : 1;
1466
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
1467
VM_NEXT();
1468
}
1469
}
1470
1471
VM_CASE(LOP_ADD)
1472
{
1473
Instruction insn = *pc++;
1474
StkId ra = VM_REG(LUAU_INSN_A(insn));
1475
StkId rb = VM_REG(LUAU_INSN_B(insn));
1476
StkId rc = VM_REG(LUAU_INSN_C(insn));
1477
1478
// fast-path
1479
if (LUAU_LIKELY(ttisnumber(rb) && ttisnumber(rc)))
1480
{
1481
setnvalue(ra, nvalue(rb) + nvalue(rc));
1482
VM_NEXT();
1483
}
1484
else if (ttisvector(rb) && ttisvector(rc))
1485
{
1486
const float* vb = vvalue(rb);
1487
const float* vc = vvalue(rc);
1488
setvvalue(ra, vb[0] + vc[0], vb[1] + vc[1], vb[2] + vc[2], vb[3] + vc[3]);
1489
VM_NEXT();
1490
}
1491
else
1492
{
1493
// fast-path for userdata with C functions
1494
const TValue* fn = 0;
1495
if (ttisuserdata(rb) && (fn = luaT_gettmbyobj(L, rb, TM_ADD)) && ttisfunction(fn) && clvalue(fn)->isC)
1496
{
1497
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
1498
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
1499
StkId top = L->top;
1500
setobj2s(L, top + 0, fn);
1501
setobj2s(L, top + 1, rb);
1502
setobj2s(L, top + 2, rc);
1503
L->top = top + 3;
1504
1505
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
1506
VM_NEXT();
1507
}
1508
else
1509
{
1510
// slow-path, may invoke C/Lua via metamethods
1511
VM_PROTECT(luaV_doarithimpl<TM_ADD>(L, ra, rb, rc));
1512
VM_NEXT();
1513
}
1514
}
1515
}
1516
1517
VM_CASE(LOP_SUB)
1518
{
1519
Instruction insn = *pc++;
1520
StkId ra = VM_REG(LUAU_INSN_A(insn));
1521
StkId rb = VM_REG(LUAU_INSN_B(insn));
1522
StkId rc = VM_REG(LUAU_INSN_C(insn));
1523
1524
// fast-path
1525
if (LUAU_LIKELY(ttisnumber(rb) && ttisnumber(rc)))
1526
{
1527
setnvalue(ra, nvalue(rb) - nvalue(rc));
1528
VM_NEXT();
1529
}
1530
else if (ttisvector(rb) && ttisvector(rc))
1531
{
1532
const float* vb = vvalue(rb);
1533
const float* vc = vvalue(rc);
1534
setvvalue(ra, vb[0] - vc[0], vb[1] - vc[1], vb[2] - vc[2], vb[3] - vc[3]);
1535
VM_NEXT();
1536
}
1537
else
1538
{
1539
// fast-path for userdata with C functions
1540
const TValue* fn = 0;
1541
if (ttisuserdata(rb) && (fn = luaT_gettmbyobj(L, rb, TM_SUB)) && ttisfunction(fn) && clvalue(fn)->isC)
1542
{
1543
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
1544
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
1545
StkId top = L->top;
1546
setobj2s(L, top + 0, fn);
1547
setobj2s(L, top + 1, rb);
1548
setobj2s(L, top + 2, rc);
1549
L->top = top + 3;
1550
1551
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
1552
VM_NEXT();
1553
}
1554
else
1555
{
1556
// slow-path, may invoke C/Lua via metamethods
1557
VM_PROTECT(luaV_doarithimpl<TM_SUB>(L, ra, rb, rc));
1558
VM_NEXT();
1559
}
1560
}
1561
}
1562
1563
VM_CASE(LOP_MUL)
1564
{
1565
Instruction insn = *pc++;
1566
StkId ra = VM_REG(LUAU_INSN_A(insn));
1567
StkId rb = VM_REG(LUAU_INSN_B(insn));
1568
StkId rc = VM_REG(LUAU_INSN_C(insn));
1569
1570
// fast-path
1571
if (LUAU_LIKELY(ttisnumber(rb) && ttisnumber(rc)))
1572
{
1573
setnvalue(ra, nvalue(rb) * nvalue(rc));
1574
VM_NEXT();
1575
}
1576
else if (ttisvector(rb) && ttisnumber(rc))
1577
{
1578
const float* vb = vvalue(rb);
1579
float vc = cast_to(float, nvalue(rc));
1580
setvvalue(ra, vb[0] * vc, vb[1] * vc, vb[2] * vc, vb[3] * vc);
1581
VM_NEXT();
1582
}
1583
else if (ttisvector(rb) && ttisvector(rc))
1584
{
1585
const float* vb = vvalue(rb);
1586
const float* vc = vvalue(rc);
1587
setvvalue(ra, vb[0] * vc[0], vb[1] * vc[1], vb[2] * vc[2], vb[3] * vc[3]);
1588
VM_NEXT();
1589
}
1590
else if (ttisnumber(rb) && ttisvector(rc))
1591
{
1592
float vb = cast_to(float, nvalue(rb));
1593
const float* vc = vvalue(rc);
1594
setvvalue(ra, vb * vc[0], vb * vc[1], vb * vc[2], vb * vc[3]);
1595
VM_NEXT();
1596
}
1597
else
1598
{
1599
// fast-path for userdata with C functions
1600
StkId rbc = ttisnumber(rb) ? rc : rb;
1601
const TValue* fn = 0;
1602
if (ttisuserdata(rbc) && (fn = luaT_gettmbyobj(L, rbc, TM_MUL)) && ttisfunction(fn) && clvalue(fn)->isC)
1603
{
1604
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
1605
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
1606
StkId top = L->top;
1607
setobj2s(L, top + 0, fn);
1608
setobj2s(L, top + 1, rb);
1609
setobj2s(L, top + 2, rc);
1610
L->top = top + 3;
1611
1612
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
1613
VM_NEXT();
1614
}
1615
else
1616
{
1617
// slow-path, may invoke C/Lua via metamethods
1618
VM_PROTECT(luaV_doarithimpl<TM_MUL>(L, ra, rb, rc));
1619
VM_NEXT();
1620
}
1621
}
1622
}
1623
1624
VM_CASE(LOP_DIV)
1625
{
1626
Instruction insn = *pc++;
1627
StkId ra = VM_REG(LUAU_INSN_A(insn));
1628
StkId rb = VM_REG(LUAU_INSN_B(insn));
1629
StkId rc = VM_REG(LUAU_INSN_C(insn));
1630
1631
// fast-path
1632
if (LUAU_LIKELY(ttisnumber(rb) && ttisnumber(rc)))
1633
{
1634
setnvalue(ra, nvalue(rb) / nvalue(rc));
1635
VM_NEXT();
1636
}
1637
else if (ttisvector(rb) && ttisnumber(rc))
1638
{
1639
const float* vb = vvalue(rb);
1640
float vc = cast_to(float, nvalue(rc));
1641
setvvalue(ra, vb[0] / vc, vb[1] / vc, vb[2] / vc, vb[3] / vc);
1642
VM_NEXT();
1643
}
1644
else if (ttisvector(rb) && ttisvector(rc))
1645
{
1646
const float* vb = vvalue(rb);
1647
const float* vc = vvalue(rc);
1648
setvvalue(ra, vb[0] / vc[0], vb[1] / vc[1], vb[2] / vc[2], vb[3] / vc[3]);
1649
VM_NEXT();
1650
}
1651
else if (ttisnumber(rb) && ttisvector(rc))
1652
{
1653
float vb = cast_to(float, nvalue(rb));
1654
const float* vc = vvalue(rc);
1655
setvvalue(ra, vb / vc[0], vb / vc[1], vb / vc[2], vb / vc[3]);
1656
VM_NEXT();
1657
}
1658
else
1659
{
1660
// fast-path for userdata with C functions
1661
StkId rbc = ttisnumber(rb) ? rc : rb;
1662
const TValue* fn = 0;
1663
if (ttisuserdata(rbc) && (fn = luaT_gettmbyobj(L, rbc, TM_DIV)) && ttisfunction(fn) && clvalue(fn)->isC)
1664
{
1665
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
1666
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
1667
StkId top = L->top;
1668
setobj2s(L, top + 0, fn);
1669
setobj2s(L, top + 1, rb);
1670
setobj2s(L, top + 2, rc);
1671
L->top = top + 3;
1672
1673
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
1674
VM_NEXT();
1675
}
1676
else
1677
{
1678
// slow-path, may invoke C/Lua via metamethods
1679
VM_PROTECT(luaV_doarithimpl<TM_DIV>(L, ra, rb, rc));
1680
VM_NEXT();
1681
}
1682
}
1683
}
1684
1685
VM_CASE(LOP_IDIV)
1686
{
1687
Instruction insn = *pc++;
1688
StkId ra = VM_REG(LUAU_INSN_A(insn));
1689
StkId rb = VM_REG(LUAU_INSN_B(insn));
1690
StkId rc = VM_REG(LUAU_INSN_C(insn));
1691
1692
// fast-path
1693
if (LUAU_LIKELY(ttisnumber(rb) && ttisnumber(rc)))
1694
{
1695
setnvalue(ra, luai_numidiv(nvalue(rb), nvalue(rc)));
1696
VM_NEXT();
1697
}
1698
else if (ttisvector(rb) && ttisnumber(rc))
1699
{
1700
const float* vb = vvalue(rb);
1701
float vc = cast_to(float, nvalue(rc));
1702
setvvalue(
1703
ra,
1704
float(luai_numidiv(vb[0], vc)),
1705
float(luai_numidiv(vb[1], vc)),
1706
float(luai_numidiv(vb[2], vc)),
1707
float(luai_numidiv(vb[3], vc))
1708
);
1709
VM_NEXT();
1710
}
1711
else
1712
{
1713
// fast-path for userdata with C functions
1714
StkId rbc = ttisnumber(rb) ? rc : rb;
1715
const TValue* fn = 0;
1716
if (ttisuserdata(rbc) && (fn = luaT_gettmbyobj(L, rbc, TM_IDIV)) && ttisfunction(fn) && clvalue(fn)->isC)
1717
{
1718
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
1719
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
1720
StkId top = L->top;
1721
setobj2s(L, top + 0, fn);
1722
setobj2s(L, top + 1, rb);
1723
setobj2s(L, top + 2, rc);
1724
L->top = top + 3;
1725
1726
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
1727
VM_NEXT();
1728
}
1729
else
1730
{
1731
// slow-path, may invoke C/Lua via metamethods
1732
VM_PROTECT(luaV_doarithimpl<TM_IDIV>(L, ra, rb, rc));
1733
VM_NEXT();
1734
}
1735
}
1736
}
1737
1738
VM_CASE(LOP_MOD)
1739
{
1740
Instruction insn = *pc++;
1741
StkId ra = VM_REG(LUAU_INSN_A(insn));
1742
StkId rb = VM_REG(LUAU_INSN_B(insn));
1743
StkId rc = VM_REG(LUAU_INSN_C(insn));
1744
1745
// fast-path
1746
if (ttisnumber(rb) && ttisnumber(rc))
1747
{
1748
double nb = nvalue(rb);
1749
double nc = nvalue(rc);
1750
setnvalue(ra, luai_nummod(nb, nc));
1751
VM_NEXT();
1752
}
1753
else
1754
{
1755
// slow-path, may invoke C/Lua via metamethods
1756
VM_PROTECT(luaV_doarithimpl<TM_MOD>(L, ra, rb, rc));
1757
VM_NEXT();
1758
}
1759
}
1760
1761
VM_CASE(LOP_POW)
1762
{
1763
Instruction insn = *pc++;
1764
StkId ra = VM_REG(LUAU_INSN_A(insn));
1765
StkId rb = VM_REG(LUAU_INSN_B(insn));
1766
StkId rc = VM_REG(LUAU_INSN_C(insn));
1767
1768
// fast-path
1769
if (ttisnumber(rb) && ttisnumber(rc))
1770
{
1771
setnvalue(ra, pow(nvalue(rb), nvalue(rc)));
1772
VM_NEXT();
1773
}
1774
else
1775
{
1776
// slow-path, may invoke C/Lua via metamethods
1777
VM_PROTECT(luaV_doarithimpl<TM_POW>(L, ra, rb, rc));
1778
VM_NEXT();
1779
}
1780
}
1781
1782
VM_CASE(LOP_ADDK)
1783
{
1784
Instruction insn = *pc++;
1785
StkId ra = VM_REG(LUAU_INSN_A(insn));
1786
StkId rb = VM_REG(LUAU_INSN_B(insn));
1787
TValue* kv = VM_KV(LUAU_INSN_C(insn));
1788
1789
// fast-path
1790
if (ttisnumber(rb))
1791
{
1792
setnvalue(ra, nvalue(rb) + nvalue(kv));
1793
VM_NEXT();
1794
}
1795
else
1796
{
1797
// slow-path, may invoke C/Lua via metamethods
1798
VM_PROTECT(luaV_doarithimpl<TM_ADD>(L, ra, rb, kv));
1799
VM_NEXT();
1800
}
1801
}
1802
1803
VM_CASE(LOP_SUBK)
1804
{
1805
Instruction insn = *pc++;
1806
StkId ra = VM_REG(LUAU_INSN_A(insn));
1807
StkId rb = VM_REG(LUAU_INSN_B(insn));
1808
TValue* kv = VM_KV(LUAU_INSN_C(insn));
1809
1810
// fast-path
1811
if (ttisnumber(rb))
1812
{
1813
setnvalue(ra, nvalue(rb) - nvalue(kv));
1814
VM_NEXT();
1815
}
1816
else
1817
{
1818
// slow-path, may invoke C/Lua via metamethods
1819
VM_PROTECT(luaV_doarithimpl<TM_SUB>(L, ra, rb, kv));
1820
VM_NEXT();
1821
}
1822
}
1823
1824
VM_CASE(LOP_MULK)
1825
{
1826
Instruction insn = *pc++;
1827
StkId ra = VM_REG(LUAU_INSN_A(insn));
1828
StkId rb = VM_REG(LUAU_INSN_B(insn));
1829
TValue* kv = VM_KV(LUAU_INSN_C(insn));
1830
1831
// fast-path
1832
if (LUAU_LIKELY(ttisnumber(rb)))
1833
{
1834
setnvalue(ra, nvalue(rb) * nvalue(kv));
1835
VM_NEXT();
1836
}
1837
else if (ttisvector(rb))
1838
{
1839
const float* vb = vvalue(rb);
1840
float vc = cast_to(float, nvalue(kv));
1841
setvvalue(ra, vb[0] * vc, vb[1] * vc, vb[2] * vc, vb[3] * vc);
1842
VM_NEXT();
1843
}
1844
else
1845
{
1846
// fast-path for userdata with C functions
1847
const TValue* fn = 0;
1848
if (ttisuserdata(rb) && (fn = luaT_gettmbyobj(L, rb, TM_MUL)) && ttisfunction(fn) && clvalue(fn)->isC)
1849
{
1850
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
1851
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
1852
StkId top = L->top;
1853
setobj2s(L, top + 0, fn);
1854
setobj2s(L, top + 1, rb);
1855
setobj2s(L, top + 2, kv);
1856
L->top = top + 3;
1857
1858
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
1859
VM_NEXT();
1860
}
1861
else
1862
{
1863
// slow-path, may invoke C/Lua via metamethods
1864
VM_PROTECT(luaV_doarithimpl<TM_MUL>(L, ra, rb, kv));
1865
VM_NEXT();
1866
}
1867
}
1868
}
1869
1870
VM_CASE(LOP_DIVK)
1871
{
1872
Instruction insn = *pc++;
1873
StkId ra = VM_REG(LUAU_INSN_A(insn));
1874
StkId rb = VM_REG(LUAU_INSN_B(insn));
1875
TValue* kv = VM_KV(LUAU_INSN_C(insn));
1876
1877
// fast-path
1878
if (LUAU_LIKELY(ttisnumber(rb)))
1879
{
1880
setnvalue(ra, nvalue(rb) / nvalue(kv));
1881
VM_NEXT();
1882
}
1883
else if (ttisvector(rb))
1884
{
1885
const float* vb = vvalue(rb);
1886
float nc = cast_to(float, nvalue(kv));
1887
setvvalue(ra, vb[0] / nc, vb[1] / nc, vb[2] / nc, vb[3] / nc);
1888
VM_NEXT();
1889
}
1890
else
1891
{
1892
// fast-path for userdata with C functions
1893
const TValue* fn = 0;
1894
if (ttisuserdata(rb) && (fn = luaT_gettmbyobj(L, rb, TM_DIV)) && ttisfunction(fn) && clvalue(fn)->isC)
1895
{
1896
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
1897
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
1898
StkId top = L->top;
1899
setobj2s(L, top + 0, fn);
1900
setobj2s(L, top + 1, rb);
1901
setobj2s(L, top + 2, kv);
1902
L->top = top + 3;
1903
1904
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
1905
VM_NEXT();
1906
}
1907
else
1908
{
1909
// slow-path, may invoke C/Lua via metamethods
1910
VM_PROTECT(luaV_doarithimpl<TM_DIV>(L, ra, rb, kv));
1911
VM_NEXT();
1912
}
1913
}
1914
}
1915
1916
VM_CASE(LOP_IDIVK)
1917
{
1918
Instruction insn = *pc++;
1919
StkId ra = VM_REG(LUAU_INSN_A(insn));
1920
StkId rb = VM_REG(LUAU_INSN_B(insn));
1921
TValue* kv = VM_KV(LUAU_INSN_C(insn));
1922
1923
// fast-path
1924
if (LUAU_LIKELY(ttisnumber(rb)))
1925
{
1926
setnvalue(ra, luai_numidiv(nvalue(rb), nvalue(kv)));
1927
VM_NEXT();
1928
}
1929
else if (ttisvector(rb))
1930
{
1931
const float* vb = vvalue(rb);
1932
float vc = cast_to(float, nvalue(kv));
1933
setvvalue(
1934
ra,
1935
float(luai_numidiv(vb[0], vc)),
1936
float(luai_numidiv(vb[1], vc)),
1937
float(luai_numidiv(vb[2], vc)),
1938
float(luai_numidiv(vb[3], vc))
1939
);
1940
VM_NEXT();
1941
}
1942
else
1943
{
1944
// fast-path for userdata with C functions
1945
const TValue* fn = 0;
1946
if (ttisuserdata(rb) && (fn = luaT_gettmbyobj(L, rb, TM_IDIV)) && ttisfunction(fn) && clvalue(fn)->isC)
1947
{
1948
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
1949
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
1950
StkId top = L->top;
1951
setobj2s(L, top + 0, fn);
1952
setobj2s(L, top + 1, rb);
1953
setobj2s(L, top + 2, kv);
1954
L->top = top + 3;
1955
1956
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
1957
VM_NEXT();
1958
}
1959
else
1960
{
1961
// slow-path, may invoke C/Lua via metamethods
1962
VM_PROTECT(luaV_doarithimpl<TM_IDIV>(L, ra, rb, kv));
1963
VM_NEXT();
1964
}
1965
}
1966
}
1967
1968
VM_CASE(LOP_MODK)
1969
{
1970
Instruction insn = *pc++;
1971
StkId ra = VM_REG(LUAU_INSN_A(insn));
1972
StkId rb = VM_REG(LUAU_INSN_B(insn));
1973
TValue* kv = VM_KV(LUAU_INSN_C(insn));
1974
1975
// fast-path
1976
if (ttisnumber(rb))
1977
{
1978
double nb = nvalue(rb);
1979
double nk = nvalue(kv);
1980
setnvalue(ra, luai_nummod(nb, nk));
1981
VM_NEXT();
1982
}
1983
else
1984
{
1985
// slow-path, may invoke C/Lua via metamethods
1986
VM_PROTECT(luaV_doarithimpl<TM_MOD>(L, ra, rb, kv));
1987
VM_NEXT();
1988
}
1989
}
1990
1991
VM_CASE(LOP_POWK)
1992
{
1993
Instruction insn = *pc++;
1994
StkId ra = VM_REG(LUAU_INSN_A(insn));
1995
StkId rb = VM_REG(LUAU_INSN_B(insn));
1996
TValue* kv = VM_KV(LUAU_INSN_C(insn));
1997
1998
// fast-path
1999
if (ttisnumber(rb))
2000
{
2001
double nb = nvalue(rb);
2002
double nk = nvalue(kv);
2003
2004
// pow is very slow so we specialize this for ^2, ^0.5 and ^3
2005
double r = (nk == 2.0) ? nb * nb : (nk == 0.5) ? sqrt(nb) : (nk == 3.0) ? nb * nb * nb : pow(nb, nk);
2006
2007
setnvalue(ra, r);
2008
VM_NEXT();
2009
}
2010
else
2011
{
2012
// slow-path, may invoke C/Lua via metamethods
2013
VM_PROTECT(luaV_doarithimpl<TM_POW>(L, ra, rb, kv));
2014
VM_NEXT();
2015
}
2016
}
2017
2018
VM_CASE(LOP_AND)
2019
{
2020
Instruction insn = *pc++;
2021
StkId ra = VM_REG(LUAU_INSN_A(insn));
2022
StkId rb = VM_REG(LUAU_INSN_B(insn));
2023
StkId rc = VM_REG(LUAU_INSN_C(insn));
2024
2025
setobj2s(L, ra, l_isfalse(rb) ? rb : rc);
2026
VM_NEXT();
2027
}
2028
2029
VM_CASE(LOP_OR)
2030
{
2031
Instruction insn = *pc++;
2032
StkId ra = VM_REG(LUAU_INSN_A(insn));
2033
StkId rb = VM_REG(LUAU_INSN_B(insn));
2034
StkId rc = VM_REG(LUAU_INSN_C(insn));
2035
2036
setobj2s(L, ra, l_isfalse(rb) ? rc : rb);
2037
VM_NEXT();
2038
}
2039
2040
VM_CASE(LOP_ANDK)
2041
{
2042
Instruction insn = *pc++;
2043
StkId ra = VM_REG(LUAU_INSN_A(insn));
2044
StkId rb = VM_REG(LUAU_INSN_B(insn));
2045
TValue* kv = VM_KV(LUAU_INSN_C(insn));
2046
2047
setobj2s(L, ra, l_isfalse(rb) ? rb : kv);
2048
VM_NEXT();
2049
}
2050
2051
VM_CASE(LOP_ORK)
2052
{
2053
Instruction insn = *pc++;
2054
StkId ra = VM_REG(LUAU_INSN_A(insn));
2055
StkId rb = VM_REG(LUAU_INSN_B(insn));
2056
TValue* kv = VM_KV(LUAU_INSN_C(insn));
2057
2058
setobj2s(L, ra, l_isfalse(rb) ? kv : rb);
2059
VM_NEXT();
2060
}
2061
2062
VM_CASE(LOP_CONCAT)
2063
{
2064
Instruction insn = *pc++;
2065
int b = LUAU_INSN_B(insn);
2066
int c = LUAU_INSN_C(insn);
2067
2068
// This call may realloc the stack! So we need to query args further down
2069
VM_PROTECT(luaV_concat(L, c - b + 1, c));
2070
2071
StkId ra = VM_REG(LUAU_INSN_A(insn));
2072
2073
setobj2s(L, ra, base + b);
2074
VM_PROTECT(luaC_checkGC(L));
2075
VM_NEXT();
2076
}
2077
2078
VM_CASE(LOP_NOT)
2079
{
2080
Instruction insn = *pc++;
2081
StkId ra = VM_REG(LUAU_INSN_A(insn));
2082
StkId rb = VM_REG(LUAU_INSN_B(insn));
2083
2084
int res = l_isfalse(rb);
2085
setbvalue(ra, res);
2086
VM_NEXT();
2087
}
2088
2089
VM_CASE(LOP_MINUS)
2090
{
2091
Instruction insn = *pc++;
2092
StkId ra = VM_REG(LUAU_INSN_A(insn));
2093
StkId rb = VM_REG(LUAU_INSN_B(insn));
2094
2095
// fast-path
2096
if (LUAU_LIKELY(ttisnumber(rb)))
2097
{
2098
setnvalue(ra, -nvalue(rb));
2099
VM_NEXT();
2100
}
2101
else if (ttisvector(rb))
2102
{
2103
const float* vb = vvalue(rb);
2104
setvvalue(ra, -vb[0], -vb[1], -vb[2], -vb[3]);
2105
VM_NEXT();
2106
}
2107
else
2108
{
2109
// fast-path for userdata with C functions
2110
const TValue* fn = 0;
2111
if (ttisuserdata(rb) && (fn = luaT_gettmbyobj(L, rb, TM_UNM)) && ttisfunction(fn) && clvalue(fn)->isC)
2112
{
2113
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
2114
LUAU_ASSERT(L->top + 2 < L->stack + L->stacksize);
2115
StkId top = L->top;
2116
setobj2s(L, top + 0, fn);
2117
setobj2s(L, top + 1, rb);
2118
L->top = top + 2;
2119
2120
VM_PROTECT(luaV_callTM(L, 1, LUAU_INSN_A(insn)));
2121
VM_NEXT();
2122
}
2123
else
2124
{
2125
// slow-path, may invoke C/Lua via metamethods
2126
VM_PROTECT(luaV_doarithimpl<TM_UNM>(L, ra, rb, rb));
2127
VM_NEXT();
2128
}
2129
}
2130
}
2131
2132
VM_CASE(LOP_LENGTH)
2133
{
2134
Instruction insn = *pc++;
2135
StkId ra = VM_REG(LUAU_INSN_A(insn));
2136
StkId rb = VM_REG(LUAU_INSN_B(insn));
2137
2138
// fast-path #1: tables
2139
if (LUAU_LIKELY(ttistable(rb)))
2140
{
2141
LuaTable* h = hvalue(rb);
2142
2143
if (fastnotm(h->metatable, TM_LEN))
2144
{
2145
setnvalue(ra, cast_num(luaH_getn(h)));
2146
VM_NEXT();
2147
}
2148
else
2149
{
2150
// slow-path, may invoke C/Lua via metamethods
2151
VM_PROTECT(luaV_dolen(L, ra, rb));
2152
VM_NEXT();
2153
}
2154
}
2155
// fast-path #2: strings (not very important but easy to do)
2156
else if (ttisstring(rb))
2157
{
2158
TString* ts = tsvalue(rb);
2159
setnvalue(ra, cast_num(ts->len));
2160
VM_NEXT();
2161
}
2162
else
2163
{
2164
// slow-path, may invoke C/Lua via metamethods
2165
VM_PROTECT(luaV_dolen(L, ra, rb));
2166
VM_NEXT();
2167
}
2168
}
2169
2170
VM_CASE(LOP_NEWTABLE)
2171
{
2172
Instruction insn = *pc++;
2173
StkId ra = VM_REG(LUAU_INSN_A(insn));
2174
int b = LUAU_INSN_B(insn);
2175
uint32_t aux = *pc++;
2176
2177
VM_PROTECT_PC(); // luaH_new may fail due to OOM
2178
2179
sethvalue(L, ra, luaH_new(L, aux, b == 0 ? 0 : (1 << (b - 1))));
2180
VM_PROTECT(luaC_checkGC(L));
2181
VM_NEXT();
2182
}
2183
2184
VM_CASE(LOP_DUPTABLE)
2185
{
2186
Instruction insn = *pc++;
2187
StkId ra = VM_REG(LUAU_INSN_A(insn));
2188
TValue* kv = VM_KV(LUAU_INSN_D(insn));
2189
2190
VM_PROTECT_PC(); // luaH_clone may fail due to OOM
2191
2192
sethvalue(L, ra, luaH_clone(L, hvalue(kv)));
2193
VM_PROTECT(luaC_checkGC(L));
2194
VM_NEXT();
2195
}
2196
2197
VM_CASE(LOP_SETLIST)
2198
{
2199
Instruction insn = *pc++;
2200
StkId ra = VM_REG(LUAU_INSN_A(insn));
2201
StkId rb = &base[LUAU_INSN_B(insn)]; // note: this can point to L->top if c == LUA_MULTRET making VM_REG unsafe to use
2202
int c = LUAU_INSN_C(insn) - 1;
2203
uint32_t index = *pc++;
2204
2205
if (c == LUA_MULTRET)
2206
{
2207
c = int(L->top - rb);
2208
L->top = L->ci->top;
2209
}
2210
2211
LuaTable* h = hvalue(ra);
2212
2213
// TODO: we really don't need this anymore
2214
if (!ttistable(ra))
2215
return; // temporary workaround to weaken a rather powerful exploitation primitive in case of a MITM attack on bytecode
2216
2217
int last = index + c - 1;
2218
if (last > h->sizearray)
2219
{
2220
VM_PROTECT_PC(); // luaH_resizearray may fail due to OOM
2221
2222
luaH_resizearray(L, h, last);
2223
}
2224
2225
TValue* array = h->array;
2226
2227
for (int i = 0; i < c; ++i)
2228
setobj2t(L, &array[index + i - 1], rb + i);
2229
2230
luaC_barrierfast(L, h);
2231
VM_NEXT();
2232
}
2233
2234
VM_CASE(LOP_FORNPREP)
2235
{
2236
Instruction insn = *pc++;
2237
StkId ra = VM_REG(LUAU_INSN_A(insn));
2238
2239
if (!ttisnumber(ra + 0) || !ttisnumber(ra + 1) || !ttisnumber(ra + 2))
2240
{
2241
// slow-path: can convert arguments to numbers and trigger Lua errors
2242
// Note: this doesn't reallocate stack so we don't need to recompute ra/base
2243
VM_PROTECT_PC();
2244
2245
luaV_prepareFORN(L, ra + 0, ra + 1, ra + 2);
2246
}
2247
2248
double limit = nvalue(ra + 0);
2249
double step = nvalue(ra + 1);
2250
double idx = nvalue(ra + 2);
2251
2252
// Note: make sure the loop condition is exactly the same between this and LOP_FORNLOOP so that we handle NaN/etc. consistently
2253
pc += (step > 0 ? idx <= limit : limit <= idx) ? 0 : LUAU_INSN_D(insn);
2254
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
2255
VM_NEXT();
2256
}
2257
2258
VM_CASE(LOP_FORNLOOP)
2259
{
2260
VM_INTERRUPT();
2261
Instruction insn = *pc++;
2262
StkId ra = VM_REG(LUAU_INSN_A(insn));
2263
LUAU_ASSERT(ttisnumber(ra + 0) && ttisnumber(ra + 1) && ttisnumber(ra + 2));
2264
2265
double limit = nvalue(ra + 0);
2266
double step = nvalue(ra + 1);
2267
double idx = nvalue(ra + 2) + step;
2268
2269
setnvalue(ra + 2, idx);
2270
2271
// Note: make sure the loop condition is exactly the same between this and LOP_FORNPREP so that we handle NaN/etc. consistently
2272
if (step > 0 ? idx <= limit : limit <= idx)
2273
{
2274
pc += LUAU_INSN_D(insn);
2275
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
2276
VM_NEXT();
2277
}
2278
else
2279
{
2280
// fallthrough to exit
2281
VM_NEXT();
2282
}
2283
}
2284
2285
VM_CASE(LOP_FORGPREP)
2286
{
2287
Instruction insn = *pc++;
2288
StkId ra = VM_REG(LUAU_INSN_A(insn));
2289
2290
if (ttisfunction(ra))
2291
{
2292
// will be called during FORGLOOP
2293
}
2294
else
2295
{
2296
LuaTable* mt = ttistable(ra) ? hvalue(ra)->metatable : ttisuserdata(ra) ? uvalue(ra)->metatable : cast_to(LuaTable*, NULL);
2297
2298
if (const TValue* fn = fasttm(L, mt, TM_ITER))
2299
{
2300
setobj2s(L, ra + 1, ra);
2301
setobj2s(L, ra, fn);
2302
2303
L->top = ra + 2; // func + self arg
2304
LUAU_ASSERT(L->top <= L->stack_last);
2305
2306
VM_PROTECT(luaD_call(L, ra, 3));
2307
L->top = L->ci->top;
2308
2309
// recompute ra since stack might have been reallocated
2310
ra = VM_REG(LUAU_INSN_A(insn));
2311
2312
// protect against __iter returning nil, since nil is used as a marker for builtin iteration in FORGLOOP
2313
if (ttisnil(ra))
2314
{
2315
VM_PROTECT_PC(); // next call always errors
2316
luaG_typeerror(L, ra, "call");
2317
}
2318
}
2319
else if (fasttm(L, mt, TM_CALL))
2320
{
2321
// table or userdata with __call, will be called during FORGLOOP
2322
// TODO: we might be able to stop supporting this depending on whether it's used in practice
2323
}
2324
else if (ttistable(ra))
2325
{
2326
// set up registers for builtin iteration
2327
setobj2s(L, ra + 1, ra);
2328
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(0)), LU_TAG_ITERATOR);
2329
setnilvalue(ra);
2330
}
2331
else
2332
{
2333
VM_PROTECT_PC(); // next call always errors
2334
luaG_typeerror(L, ra, "iterate over");
2335
}
2336
}
2337
2338
pc += LUAU_INSN_D(insn);
2339
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
2340
VM_NEXT();
2341
}
2342
2343
VM_CASE(LOP_FORGLOOP)
2344
{
2345
VM_INTERRUPT();
2346
Instruction insn = *pc++;
2347
StkId ra = VM_REG(LUAU_INSN_A(insn));
2348
uint32_t aux = *pc;
2349
2350
// fast-path: builtin table iteration
2351
// note: ra=nil guarantees ra+1=table and ra+2=userdata because of the setup by FORGPREP* opcodes
2352
// TODO: remove the table check per guarantee above
2353
if (ttisnil(ra) && ttistable(ra + 1))
2354
{
2355
LuaTable* h = hvalue(ra + 1);
2356
int index = int(reinterpret_cast<uintptr_t>(pvalue(ra + 2)));
2357
2358
int sizearray = h->sizearray;
2359
2360
// clear extra variables since we might have more than two
2361
// note: while aux encodes ipairs bit, when set we always use 2 variables, so it's safe to check this via a signed comparison
2362
if (LUAU_UNLIKELY(int(aux) > 2))
2363
for (int i = 2; i < int(aux); ++i)
2364
setnilvalue(ra + 3 + i);
2365
2366
// terminate ipairs-style traversal early when encountering nil
2367
if (int(aux) < 0 && (unsigned(index) >= unsigned(sizearray) || ttisnil(&h->array[index])))
2368
{
2369
pc++;
2370
VM_NEXT();
2371
}
2372
2373
// first we advance index through the array portion
2374
while (unsigned(index) < unsigned(sizearray))
2375
{
2376
TValue* e = &h->array[index];
2377
2378
if (!ttisnil(e))
2379
{
2380
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(index + 1)), LU_TAG_ITERATOR);
2381
setnvalue(ra + 3, double(index + 1));
2382
setobj2s(L, ra + 4, e);
2383
2384
pc += LUAU_INSN_D(insn);
2385
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
2386
VM_NEXT();
2387
}
2388
2389
index++;
2390
}
2391
2392
int sizenode = 1 << h->lsizenode;
2393
2394
// then we advance index through the hash portion
2395
while (unsigned(index - sizearray) < unsigned(sizenode))
2396
{
2397
LuaNode* n = &h->node[index - sizearray];
2398
2399
if (!ttisnil(gval(n)))
2400
{
2401
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(index + 1)), LU_TAG_ITERATOR);
2402
getnodekey(L, ra + 3, n);
2403
setobj2s(L, ra + 4, gval(n));
2404
2405
pc += LUAU_INSN_D(insn);
2406
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
2407
VM_NEXT();
2408
}
2409
2410
index++;
2411
}
2412
2413
// fallthrough to exit
2414
pc++;
2415
VM_NEXT();
2416
}
2417
else
2418
{
2419
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
2420
setobj2s(L, ra + 3 + 2, ra + 2);
2421
setobj2s(L, ra + 3 + 1, ra + 1);
2422
setobj2s(L, ra + 3, ra);
2423
2424
L->top = ra + 3 + 3; // func + 2 args (state and index)
2425
LUAU_ASSERT(L->top <= L->stack_last);
2426
2427
VM_PROTECT(luaD_call(L, ra + 3, uint8_t(aux)));
2428
L->top = L->ci->top;
2429
2430
// recompute ra since stack might have been reallocated
2431
ra = VM_REG(LUAU_INSN_A(insn));
2432
2433
// copy first variable back into the iteration index
2434
setobj2s(L, ra + 2, ra + 3);
2435
2436
// note that we need to increment pc by 1 to exit the loop since we need to skip over aux
2437
pc += ttisnil(ra + 3) ? 1 : LUAU_INSN_D(insn);
2438
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
2439
VM_NEXT();
2440
}
2441
}
2442
2443
VM_CASE(LOP_FORGPREP_INEXT)
2444
{
2445
Instruction insn = *pc++;
2446
StkId ra = VM_REG(LUAU_INSN_A(insn));
2447
2448
// fast-path: ipairs/inext
2449
if (cl->env->safeenv && ttistable(ra + 1) && ttisnumber(ra + 2) && nvalue(ra + 2) == 0.0)
2450
{
2451
setnilvalue(ra);
2452
// ra+1 is already the table
2453
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(0)), LU_TAG_ITERATOR);
2454
}
2455
else if (!ttisfunction(ra))
2456
{
2457
VM_PROTECT_PC(); // next call always errors
2458
luaG_typeerror(L, ra, "iterate over");
2459
}
2460
2461
pc += LUAU_INSN_D(insn);
2462
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
2463
VM_NEXT();
2464
}
2465
2466
VM_CASE(LOP_FORGPREP_NEXT)
2467
{
2468
Instruction insn = *pc++;
2469
StkId ra = VM_REG(LUAU_INSN_A(insn));
2470
2471
// fast-path: pairs/next
2472
if (cl->env->safeenv && ttistable(ra + 1) && ttisnil(ra + 2))
2473
{
2474
setnilvalue(ra);
2475
// ra+1 is already the table
2476
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(0)), LU_TAG_ITERATOR);
2477
}
2478
else if (!ttisfunction(ra))
2479
{
2480
VM_PROTECT_PC(); // next call always errors
2481
luaG_typeerror(L, ra, "iterate over");
2482
}
2483
2484
pc += LUAU_INSN_D(insn);
2485
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
2486
VM_NEXT();
2487
}
2488
2489
VM_CASE(LOP_NATIVECALL)
2490
{
2491
Proto* p = cl->l.p;
2492
LUAU_ASSERT(p->execdata);
2493
2494
CallInfo* ci = L->ci;
2495
ci->flags = LUA_CALLINFO_NATIVE;
2496
ci->savedpc = p->code;
2497
2498
#if VM_HAS_NATIVE
2499
if (L->global->ecb.enter(L, p) == 1)
2500
goto reentry;
2501
else
2502
goto exit;
2503
#else
2504
LUAU_ASSERT(!"Opcode is only valid when VM_HAS_NATIVE is defined");
2505
LUAU_UNREACHABLE();
2506
#endif
2507
}
2508
2509
VM_CASE(LOP_GETVARARGS)
2510
{
2511
Instruction insn = *pc++;
2512
int b = LUAU_INSN_B(insn) - 1;
2513
int n = cast_int(base - L->ci->func) - cl->l.p->numparams - 1;
2514
2515
if (b == LUA_MULTRET)
2516
{
2517
VM_PROTECT(luaD_checkstack(L, n));
2518
StkId ra = VM_REG(LUAU_INSN_A(insn)); // previous call may change the stack
2519
2520
for (int j = 0; j < n; j++)
2521
setobj2s(L, ra + j, base - n + j);
2522
2523
L->top = ra + n;
2524
VM_NEXT();
2525
}
2526
else
2527
{
2528
StkId ra = VM_REG(LUAU_INSN_A(insn));
2529
2530
for (int j = 0; j < b && j < n; j++)
2531
setobj2s(L, ra + j, base - n + j);
2532
for (int j = n; j < b; j++)
2533
setnilvalue(ra + j);
2534
VM_NEXT();
2535
}
2536
}
2537
2538
VM_CASE(LOP_DUPCLOSURE)
2539
{
2540
Instruction insn = *pc++;
2541
StkId ra = VM_REG(LUAU_INSN_A(insn));
2542
TValue* kv = VM_KV(LUAU_INSN_D(insn));
2543
2544
Closure* kcl = clvalue(kv);
2545
2546
VM_PROTECT_PC(); // luaF_newLclosure may fail due to OOM
2547
2548
// clone closure if the environment is not shared
2549
// note: we save closure to stack early in case the code below wants to capture it by value
2550
Closure* ncl = (kcl->env == cl->env) ? kcl : luaF_newLclosure(L, kcl->nupvalues, cl->env, kcl->l.p);
2551
setclvalue(L, ra, ncl);
2552
2553
// this loop does three things:
2554
// - if the closure was created anew, it just fills it with upvalues
2555
// - if the closure from the constant table is used, it fills it with upvalues so that it can be shared in the future
2556
// - if the closure is reused, it checks if the reuse is safe via rawequal, and falls back to duplicating the closure
2557
// normally this would use two separate loops, for reuse check and upvalue setup, but MSVC codegen goes crazy if you do that
2558
for (int ui = 0; ui < kcl->nupvalues; ++ui)
2559
{
2560
Instruction uinsn = pc[ui];
2561
LUAU_ASSERT(LUAU_INSN_OP(uinsn) == LOP_CAPTURE);
2562
LUAU_ASSERT(LUAU_INSN_A(uinsn) == LCT_VAL || LUAU_INSN_A(uinsn) == LCT_UPVAL);
2563
2564
TValue* uv = (LUAU_INSN_A(uinsn) == LCT_VAL) ? VM_REG(LUAU_INSN_B(uinsn)) : VM_UV(LUAU_INSN_B(uinsn));
2565
2566
// check if the existing closure is safe to reuse
2567
if (ncl == kcl && luaO_rawequalObj(&ncl->l.uprefs[ui], uv))
2568
continue;
2569
2570
// lazily clone the closure and update the upvalues
2571
if (ncl == kcl && kcl->preload == 0)
2572
{
2573
ncl = luaF_newLclosure(L, kcl->nupvalues, cl->env, kcl->l.p);
2574
setclvalue(L, ra, ncl);
2575
2576
ui = -1; // restart the loop to fill all upvalues
2577
continue;
2578
}
2579
2580
// this updates a newly created closure, or an existing closure created during preload, in which case we need a barrier
2581
setobj(L, &ncl->l.uprefs[ui], uv);
2582
luaC_barrier(L, ncl, uv);
2583
}
2584
2585
// this is a noop if ncl is newly created or shared successfully, but it has to run after the closure is preloaded for the first time
2586
ncl->preload = 0;
2587
2588
if (kcl != ncl)
2589
VM_PROTECT(luaC_checkGC(L));
2590
2591
pc += kcl->nupvalues;
2592
VM_NEXT();
2593
}
2594
2595
VM_CASE(LOP_PREPVARARGS)
2596
{
2597
Instruction insn = *pc++;
2598
int numparams = LUAU_INSN_A(insn);
2599
2600
// all fixed parameters are copied after the top so we need more stack space
2601
VM_PROTECT(luaD_checkstack(L, cl->stacksize + numparams));
2602
2603
// the caller must have filled extra fixed arguments with nil
2604
LUAU_ASSERT(cast_int(L->top - base) >= numparams);
2605
2606
// move fixed parameters to final position
2607
StkId fixed = base; // first fixed argument
2608
base = L->top; // final position of first argument
2609
2610
for (int i = 0; i < numparams; ++i)
2611
{
2612
setobj2s(L, base + i, fixed + i);
2613
setnilvalue(fixed + i);
2614
}
2615
2616
// rewire our stack frame to point to the new base
2617
L->ci->base = base;
2618
L->ci->top = base + cl->stacksize;
2619
2620
L->base = base;
2621
L->top = L->ci->top;
2622
VM_NEXT();
2623
}
2624
2625
VM_CASE(LOP_JUMPBACK)
2626
{
2627
VM_INTERRUPT();
2628
Instruction insn = *pc++;
2629
2630
pc += LUAU_INSN_D(insn);
2631
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
2632
VM_NEXT();
2633
}
2634
2635
VM_CASE(LOP_LOADKX)
2636
{
2637
Instruction insn = *pc++;
2638
StkId ra = VM_REG(LUAU_INSN_A(insn));
2639
uint32_t aux = *pc++;
2640
TValue* kv = VM_KV(aux);
2641
2642
setobj2s(L, ra, kv);
2643
VM_NEXT();
2644
}
2645
2646
VM_CASE(LOP_JUMPX)
2647
{
2648
VM_INTERRUPT();
2649
Instruction insn = *pc++;
2650
2651
pc += LUAU_INSN_E(insn);
2652
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
2653
VM_NEXT();
2654
}
2655
2656
VM_CASE(LOP_FASTCALL)
2657
{
2658
Instruction insn = *pc++;
2659
int bfid = LUAU_INSN_A(insn);
2660
int skip = LUAU_INSN_C(insn);
2661
LUAU_ASSERT(unsigned(pc - cl->l.p->code + skip) < unsigned(cl->l.p->sizecode));
2662
2663
Instruction call = pc[skip];
2664
LUAU_ASSERT(LUAU_INSN_OP(call) == LOP_CALL);
2665
2666
StkId ra = VM_REG(LUAU_INSN_A(call));
2667
2668
int nparams = LUAU_INSN_B(call) - 1;
2669
int nresults = LUAU_INSN_C(call) - 1;
2670
2671
nparams = (nparams == LUA_MULTRET) ? int(L->top - ra - 1) : nparams;
2672
2673
luau_FastFunction f = luauF_table[bfid];
2674
LUAU_ASSERT(f);
2675
2676
if (cl->env->safeenv)
2677
{
2678
VM_PROTECT_PC(); // f may fail due to OOM
2679
2680
int n = f(L, ra, ra + 1, nresults, ra + 2, nparams);
2681
2682
if (n >= 0)
2683
{
2684
// when nresults != MULTRET, L->top might be pointing to the middle of stack frame if nparams is equal to MULTRET
2685
// instead of restoring L->top to L->ci->top if nparams is MULTRET, we do it unconditionally to skip an extra check
2686
L->top = (nresults == LUA_MULTRET) ? ra + n : L->ci->top;
2687
2688
pc += skip + 1; // skip instructions that compute function as well as CALL
2689
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
2690
VM_NEXT();
2691
}
2692
else
2693
{
2694
// continue execution through the fallback code
2695
VM_NEXT();
2696
}
2697
}
2698
else
2699
{
2700
// continue execution through the fallback code
2701
VM_NEXT();
2702
}
2703
}
2704
2705
VM_CASE(LOP_COVERAGE)
2706
{
2707
Instruction insn = *pc++;
2708
int hits = LUAU_INSN_E(insn);
2709
2710
// update hits with saturated add and patch the instruction in place
2711
hits = (hits < (1 << 23) - 1) ? hits + 1 : hits;
2712
VM_PATCH_E(pc - 1, hits);
2713
2714
VM_NEXT();
2715
}
2716
2717
VM_CASE(LOP_CAPTURE)
2718
{
2719
LUAU_ASSERT(!"CAPTURE is a pseudo-opcode and must be executed as part of NEWCLOSURE");
2720
LUAU_UNREACHABLE();
2721
}
2722
2723
VM_CASE(LOP_SUBRK)
2724
{
2725
Instruction insn = *pc++;
2726
StkId ra = VM_REG(LUAU_INSN_A(insn));
2727
TValue* kv = VM_KV(LUAU_INSN_B(insn));
2728
StkId rc = VM_REG(LUAU_INSN_C(insn));
2729
2730
// fast-path
2731
if (ttisnumber(rc))
2732
{
2733
setnvalue(ra, nvalue(kv) - nvalue(rc));
2734
VM_NEXT();
2735
}
2736
else
2737
{
2738
// slow-path, may invoke C/Lua via metamethods
2739
VM_PROTECT(luaV_doarithimpl<TM_SUB>(L, ra, kv, rc));
2740
VM_NEXT();
2741
}
2742
}
2743
2744
VM_CASE(LOP_DIVRK)
2745
{
2746
Instruction insn = *pc++;
2747
StkId ra = VM_REG(LUAU_INSN_A(insn));
2748
TValue* kv = VM_KV(LUAU_INSN_B(insn));
2749
StkId rc = VM_REG(LUAU_INSN_C(insn));
2750
2751
// fast-path
2752
if (LUAU_LIKELY(ttisnumber(rc)))
2753
{
2754
setnvalue(ra, nvalue(kv) / nvalue(rc));
2755
VM_NEXT();
2756
}
2757
else if (ttisvector(rc))
2758
{
2759
float nb = cast_to(float, nvalue(kv));
2760
const float* vc = vvalue(rc);
2761
setvvalue(ra, nb / vc[0], nb / vc[1], nb / vc[2], nb / vc[3]);
2762
VM_NEXT();
2763
}
2764
else
2765
{
2766
// slow-path, may invoke C/Lua via metamethods
2767
VM_PROTECT(luaV_doarithimpl<TM_DIV>(L, ra, kv, rc));
2768
VM_NEXT();
2769
}
2770
}
2771
2772
VM_CASE(LOP_FASTCALL1)
2773
{
2774
Instruction insn = *pc++;
2775
int bfid = LUAU_INSN_A(insn);
2776
TValue* arg = VM_REG(LUAU_INSN_B(insn));
2777
int skip = LUAU_INSN_C(insn);
2778
2779
LUAU_ASSERT(unsigned(pc - cl->l.p->code + skip) < unsigned(cl->l.p->sizecode));
2780
2781
Instruction call = pc[skip];
2782
LUAU_ASSERT(LUAU_INSN_OP(call) == LOP_CALL);
2783
2784
StkId ra = VM_REG(LUAU_INSN_A(call));
2785
2786
int nparams = 1;
2787
int nresults = LUAU_INSN_C(call) - 1;
2788
2789
luau_FastFunction f = luauF_table[bfid];
2790
LUAU_ASSERT(f);
2791
2792
if (cl->env->safeenv)
2793
{
2794
VM_PROTECT_PC(); // f may fail due to OOM
2795
2796
int n = f(L, ra, arg, nresults, NULL, nparams);
2797
2798
if (n >= 0)
2799
{
2800
if (nresults == LUA_MULTRET)
2801
L->top = ra + n;
2802
2803
pc += skip + 1; // skip instructions that compute function as well as CALL
2804
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
2805
VM_NEXT();
2806
}
2807
else
2808
{
2809
// continue execution through the fallback code
2810
VM_NEXT();
2811
}
2812
}
2813
else
2814
{
2815
// continue execution through the fallback code
2816
VM_NEXT();
2817
}
2818
}
2819
2820
VM_CASE(LOP_FASTCALL2)
2821
{
2822
Instruction insn = *pc++;
2823
int bfid = LUAU_INSN_A(insn);
2824
int skip = LUAU_INSN_C(insn) - 1;
2825
uint32_t aux = *pc++;
2826
TValue* arg1 = VM_REG(LUAU_INSN_B(insn));
2827
TValue* arg2 = VM_REG(aux);
2828
2829
LUAU_ASSERT(unsigned(pc - cl->l.p->code + skip) < unsigned(cl->l.p->sizecode));
2830
2831
Instruction call = pc[skip];
2832
LUAU_ASSERT(LUAU_INSN_OP(call) == LOP_CALL);
2833
2834
StkId ra = VM_REG(LUAU_INSN_A(call));
2835
2836
int nparams = 2;
2837
int nresults = LUAU_INSN_C(call) - 1;
2838
2839
luau_FastFunction f = luauF_table[bfid];
2840
LUAU_ASSERT(f);
2841
2842
if (cl->env->safeenv)
2843
{
2844
VM_PROTECT_PC(); // f may fail due to OOM
2845
2846
int n = f(L, ra, arg1, nresults, arg2, nparams);
2847
2848
if (n >= 0)
2849
{
2850
if (nresults == LUA_MULTRET)
2851
L->top = ra + n;
2852
2853
pc += skip + 1; // skip instructions that compute function as well as CALL
2854
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
2855
VM_NEXT();
2856
}
2857
else
2858
{
2859
// continue execution through the fallback code
2860
VM_NEXT();
2861
}
2862
}
2863
else
2864
{
2865
// continue execution through the fallback code
2866
VM_NEXT();
2867
}
2868
}
2869
2870
VM_CASE(LOP_FASTCALL2K)
2871
{
2872
Instruction insn = *pc++;
2873
int bfid = LUAU_INSN_A(insn);
2874
int skip = LUAU_INSN_C(insn) - 1;
2875
uint32_t aux = *pc++;
2876
TValue* arg1 = VM_REG(LUAU_INSN_B(insn));
2877
TValue* arg2 = VM_KV(aux);
2878
2879
LUAU_ASSERT(unsigned(pc - cl->l.p->code + skip) < unsigned(cl->l.p->sizecode));
2880
2881
Instruction call = pc[skip];
2882
LUAU_ASSERT(LUAU_INSN_OP(call) == LOP_CALL);
2883
2884
StkId ra = VM_REG(LUAU_INSN_A(call));
2885
2886
int nparams = 2;
2887
int nresults = LUAU_INSN_C(call) - 1;
2888
2889
luau_FastFunction f = luauF_table[bfid];
2890
LUAU_ASSERT(f);
2891
2892
if (cl->env->safeenv)
2893
{
2894
VM_PROTECT_PC(); // f may fail due to OOM
2895
2896
int n = f(L, ra, arg1, nresults, arg2, nparams);
2897
2898
if (n >= 0)
2899
{
2900
if (nresults == LUA_MULTRET)
2901
L->top = ra + n;
2902
2903
pc += skip + 1; // skip instructions that compute function as well as CALL
2904
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
2905
VM_NEXT();
2906
}
2907
else
2908
{
2909
// continue execution through the fallback code
2910
VM_NEXT();
2911
}
2912
}
2913
else
2914
{
2915
// continue execution through the fallback code
2916
VM_NEXT();
2917
}
2918
}
2919
2920
VM_CASE(LOP_FASTCALL3)
2921
{
2922
Instruction insn = *pc++;
2923
int bfid = LUAU_INSN_A(insn);
2924
int skip = LUAU_INSN_C(insn) - 1;
2925
uint32_t aux = *pc++;
2926
TValue* arg1 = VM_REG(LUAU_INSN_B(insn));
2927
TValue* arg2 = VM_REG(LUAU_INSN_AUX_A(aux));
2928
TValue* arg3 = VM_REG(LUAU_INSN_AUX_B(aux));
2929
2930
LUAU_ASSERT(unsigned(pc - cl->l.p->code + skip) < unsigned(cl->l.p->sizecode));
2931
2932
Instruction call = pc[skip];
2933
LUAU_ASSERT(LUAU_INSN_OP(call) == LOP_CALL);
2934
2935
StkId ra = VM_REG(LUAU_INSN_A(call));
2936
2937
int nparams = 3;
2938
int nresults = LUAU_INSN_C(call) - 1;
2939
2940
luau_FastFunction f = luauF_table[bfid];
2941
LUAU_ASSERT(f);
2942
2943
if (cl->env->safeenv)
2944
{
2945
VM_PROTECT_PC(); // f may fail due to OOM
2946
2947
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
2948
LUAU_ASSERT(L->top + 2 < L->stack + L->stacksize);
2949
StkId top = L->top;
2950
setobj2s(L, top, arg2);
2951
setobj2s(L, top + 1, arg3);
2952
2953
int n = f(L, ra, arg1, nresults, top, nparams);
2954
2955
if (n >= 0)
2956
{
2957
if (nresults == LUA_MULTRET)
2958
L->top = ra + n;
2959
2960
pc += skip + 1; // skip instructions that compute function as well as CALL
2961
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
2962
VM_NEXT();
2963
}
2964
else
2965
{
2966
// continue execution through the fallback code
2967
VM_NEXT();
2968
}
2969
}
2970
else
2971
{
2972
// continue execution through the fallback code
2973
VM_NEXT();
2974
}
2975
}
2976
2977
VM_CASE(LOP_BREAK)
2978
{
2979
LUAU_ASSERT(cl->l.p->debuginsn);
2980
2981
uint8_t op = cl->l.p->debuginsn[unsigned(pc - cl->l.p->code)];
2982
LUAU_ASSERT(op != LOP_BREAK);
2983
2984
if (L->global->cb.debugbreak)
2985
{
2986
VM_PROTECT(luau_callhook(L, L->global->cb.debugbreak, NULL));
2987
2988
// allow debugbreak hook to put thread into error/yield state
2989
if (L->status != 0)
2990
goto exit;
2991
}
2992
2993
VM_CONTINUE(op);
2994
}
2995
2996
VM_CASE(LOP_JUMPXEQKNIL)
2997
{
2998
Instruction insn = *pc++;
2999
uint32_t aux = *pc;
3000
StkId ra = VM_REG(LUAU_INSN_A(insn));
3001
3002
static_assert(LUA_TNIL == 0, "we expect type-1 to be negative iff type is nil");
3003
// condition is equivalent to: int(ttisnil(ra)) != LUAU_INSN_AUX_NOT(aux)
3004
pc += int((ttype(ra) - 1) ^ aux) < 0 ? LUAU_INSN_D(insn) : 1;
3005
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
3006
VM_NEXT();
3007
}
3008
3009
VM_CASE(LOP_JUMPXEQKB)
3010
{
3011
Instruction insn = *pc++;
3012
uint32_t aux = *pc;
3013
StkId ra = VM_REG(LUAU_INSN_A(insn));
3014
3015
pc += int(ttisboolean(ra) && bvalue(ra) == int(LUAU_INSN_AUX_KB(aux))) != LUAU_INSN_AUX_NOT(aux) ? LUAU_INSN_D(insn) : 1;
3016
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
3017
VM_NEXT();
3018
}
3019
3020
VM_CASE(LOP_JUMPXEQKN)
3021
{
3022
Instruction insn = *pc++;
3023
uint32_t aux = *pc;
3024
StkId ra = VM_REG(LUAU_INSN_A(insn));
3025
TValue* kv = VM_KV(LUAU_INSN_AUX_KV(aux));
3026
LUAU_ASSERT(ttisnumber(kv));
3027
3028
#if defined(__aarch64__)
3029
// On several ARM chips (Apple M1/M2, Neoverse N1), comparing the result of a floating-point comparison is expensive, and a branch
3030
// is much cheaper; on some 32-bit ARM chips (Cortex A53) the performance is about the same so we prefer less branchy variant there
3031
if (LUAU_INSN_AUX_NOT(aux))
3032
pc += !(ttisnumber(ra) && nvalue(ra) == nvalue(kv)) ? LUAU_INSN_D(insn) : 1;
3033
else
3034
pc += (ttisnumber(ra) && nvalue(ra) == nvalue(kv)) ? LUAU_INSN_D(insn) : 1;
3035
#else
3036
pc += int(ttisnumber(ra) && nvalue(ra) == nvalue(kv)) != LUAU_INSN_AUX_NOT(aux) ? LUAU_INSN_D(insn) : 1;
3037
#endif
3038
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
3039
VM_NEXT();
3040
}
3041
3042
VM_CASE(LOP_JUMPXEQKS)
3043
{
3044
Instruction insn = *pc++;
3045
uint32_t aux = *pc;
3046
StkId ra = VM_REG(LUAU_INSN_A(insn));
3047
TValue* kv = VM_KV(LUAU_INSN_AUX_KV(aux));
3048
LUAU_ASSERT(ttisstring(kv));
3049
3050
pc += int(ttisstring(ra) && gcvalue(ra) == gcvalue(kv)) != LUAU_INSN_AUX_NOT(aux) ? LUAU_INSN_D(insn) : 1;
3051
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
3052
VM_NEXT();
3053
}
3054
3055
#if !VM_USE_CGOTO
3056
default:
3057
LUAU_ASSERT(!"Unknown opcode");
3058
LUAU_UNREACHABLE(); // improves switch() codegen by eliding opcode bounds checks
3059
#endif
3060
}
3061
}
3062
3063
exit:;
3064
}
3065
3066
void luau_execute(lua_State* L)
3067
{
3068
if (L->singlestep)
3069
luau_execute<true>(L);
3070
else
3071
luau_execute<false>(L);
3072
}
3073
3074
int luau_precall(lua_State* L, StkId func, int nresults)
3075
{
3076
if (!ttisfunction(func))
3077
{
3078
luaV_tryfuncTM(L, func);
3079
// L->top is incremented by tryfuncTM
3080
}
3081
3082
Closure* ccl = clvalue(func);
3083
3084
CallInfo* ci = incr_ci(L);
3085
ci->func = func;
3086
ci->base = func + 1;
3087
ci->top = L->top + ccl->stacksize;
3088
ci->savedpc = NULL;
3089
ci->flags = 0;
3090
ci->nresults = nresults;
3091
3092
L->base = ci->base;
3093
// Note: L->top is assigned externally
3094
3095
luaD_checkstackfornewci(L, ccl->stacksize);
3096
LUAU_ASSERT(ci->top <= L->stack_last);
3097
3098
if (!ccl->isC)
3099
{
3100
Proto* p = ccl->l.p;
3101
3102
// fill unused parameters with nil
3103
StkId argi = L->top;
3104
StkId argend = L->base + p->numparams;
3105
while (argi < argend)
3106
setnilvalue(argi++); // complete missing arguments
3107
L->top = p->is_vararg ? argi : ci->top;
3108
3109
ci->savedpc = p->code;
3110
3111
#if VM_HAS_NATIVE
3112
if (p->exectarget != 0 && p->execdata)
3113
ci->flags = LUA_CALLINFO_NATIVE;
3114
#endif
3115
3116
return PCRLUA;
3117
}
3118
else
3119
{
3120
lua_CFunction func = ccl->c.f;
3121
int n = func(L);
3122
3123
// yield
3124
if (n < 0)
3125
return PCRYIELD;
3126
3127
// ci is our callinfo, cip is our parent
3128
CallInfo* ci = L->ci;
3129
CallInfo* cip = ci - 1;
3130
3131
// copy return values into parent stack (but only up to nresults!), fill the rest with nil
3132
// TODO: it might be worthwhile to handle the case when nresults==b explicitly?
3133
StkId res = ci->func;
3134
StkId vali = L->top - n;
3135
StkId valend = L->top;
3136
3137
int i;
3138
for (i = nresults; i != 0 && vali < valend; i--)
3139
setobj2s(L, res++, vali++);
3140
while (i-- > 0)
3141
setnilvalue(res++);
3142
3143
// pop the stack frame
3144
L->ci = cip;
3145
L->base = cip->base;
3146
L->top = res;
3147
3148
return PCRC;
3149
}
3150
}
3151
3152
void luau_poscall(lua_State* L, StkId first)
3153
{
3154
// finish interrupted execution of `OP_CALL'
3155
// ci is our callinfo, cip is our parent
3156
CallInfo* ci = L->ci;
3157
CallInfo* cip = ci - 1;
3158
3159
// copy return values into parent stack (but only up to nresults!), fill the rest with nil
3160
// TODO: it might be worthwhile to handle the case when nresults==b explicitly?
3161
StkId res = ci->func;
3162
StkId vali = first;
3163
StkId valend = L->top;
3164
3165
int i;
3166
for (i = ci->nresults; i != 0 && vali < valend; i--)
3167
setobj2s(L, res++, vali++);
3168
while (i-- > 0)
3169
setnilvalue(res++);
3170
3171
// pop the stack frame
3172
L->ci = cip;
3173
L->base = cip->base;
3174
L->top = (ci->nresults == LUA_MULTRET) ? res : cip->top;
3175
}
3176
3177