Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
allendowney
GitHub Repository: allendowney/cpython
Path: blob/main/Python/assemble.c
12 views
1
#include <stdbool.h>
2
3
#include "Python.h"
4
#include "pycore_code.h" // write_location_entry_start()
5
#include "pycore_compile.h"
6
#include "pycore_opcode.h" // _PyOpcode_Caches[] and opcode category macros
7
#include "pycore_opcode_utils.h" // IS_BACKWARDS_JUMP_OPCODE
8
#include "opcode_metadata.h" // IS_PSEUDO_INSTR
9
10
11
#define DEFAULT_CODE_SIZE 128
12
#define DEFAULT_LNOTAB_SIZE 16
13
#define DEFAULT_CNOTAB_SIZE 32
14
15
#undef SUCCESS
16
#undef ERROR
17
#define SUCCESS 0
18
#define ERROR -1
19
20
#define RETURN_IF_ERROR(X) \
21
if ((X) == -1) { \
22
return ERROR; \
23
}
24
25
typedef _PyCompilerSrcLocation location;
26
typedef _PyCompile_Instruction instruction;
27
typedef _PyCompile_InstructionSequence instr_sequence;
28
29
static inline bool
30
same_location(location a, location b)
31
{
32
return a.lineno == b.lineno &&
33
a.end_lineno == b.end_lineno &&
34
a.col_offset == b.col_offset &&
35
a.end_col_offset == b.end_col_offset;
36
}
37
38
static int
39
instr_size(instruction *instr)
40
{
41
int opcode = instr->i_opcode;
42
int oparg = instr->i_oparg;
43
assert(!IS_PSEUDO_INSTR(opcode));
44
assert(OPCODE_HAS_ARG(opcode) || oparg == 0);
45
int extended_args = (0xFFFFFF < oparg) + (0xFFFF < oparg) + (0xFF < oparg);
46
int caches = _PyOpcode_Caches[opcode];
47
return extended_args + 1 + caches;
48
}
49
50
struct assembler {
51
PyObject *a_bytecode; /* bytes containing bytecode */
52
int a_offset; /* offset into bytecode */
53
PyObject *a_except_table; /* bytes containing exception table */
54
int a_except_table_off; /* offset into exception table */
55
/* Location Info */
56
int a_lineno; /* lineno of last emitted instruction */
57
PyObject* a_linetable; /* bytes containing location info */
58
int a_location_off; /* offset of last written location info frame */
59
};
60
61
static int
62
assemble_init(struct assembler *a, int firstlineno)
63
{
64
memset(a, 0, sizeof(struct assembler));
65
a->a_lineno = firstlineno;
66
a->a_linetable = NULL;
67
a->a_location_off = 0;
68
a->a_except_table = NULL;
69
a->a_bytecode = PyBytes_FromStringAndSize(NULL, DEFAULT_CODE_SIZE);
70
if (a->a_bytecode == NULL) {
71
goto error;
72
}
73
a->a_linetable = PyBytes_FromStringAndSize(NULL, DEFAULT_CNOTAB_SIZE);
74
if (a->a_linetable == NULL) {
75
goto error;
76
}
77
a->a_except_table = PyBytes_FromStringAndSize(NULL, DEFAULT_LNOTAB_SIZE);
78
if (a->a_except_table == NULL) {
79
goto error;
80
}
81
return SUCCESS;
82
error:
83
Py_XDECREF(a->a_bytecode);
84
Py_XDECREF(a->a_linetable);
85
Py_XDECREF(a->a_except_table);
86
return ERROR;
87
}
88
89
static void
90
assemble_free(struct assembler *a)
91
{
92
Py_XDECREF(a->a_bytecode);
93
Py_XDECREF(a->a_linetable);
94
Py_XDECREF(a->a_except_table);
95
}
96
97
static inline void
98
write_except_byte(struct assembler *a, int byte) {
99
unsigned char *p = (unsigned char *) PyBytes_AS_STRING(a->a_except_table);
100
p[a->a_except_table_off++] = byte;
101
}
102
103
#define CONTINUATION_BIT 64
104
105
static void
106
assemble_emit_exception_table_item(struct assembler *a, int value, int msb)
107
{
108
assert ((msb | 128) == 128);
109
assert(value >= 0 && value < (1 << 30));
110
if (value >= 1 << 24) {
111
write_except_byte(a, (value >> 24) | CONTINUATION_BIT | msb);
112
msb = 0;
113
}
114
if (value >= 1 << 18) {
115
write_except_byte(a, ((value >> 18)&0x3f) | CONTINUATION_BIT | msb);
116
msb = 0;
117
}
118
if (value >= 1 << 12) {
119
write_except_byte(a, ((value >> 12)&0x3f) | CONTINUATION_BIT | msb);
120
msb = 0;
121
}
122
if (value >= 1 << 6) {
123
write_except_byte(a, ((value >> 6)&0x3f) | CONTINUATION_BIT | msb);
124
msb = 0;
125
}
126
write_except_byte(a, (value&0x3f) | msb);
127
}
128
129
/* See Objects/exception_handling_notes.txt for details of layout */
130
#define MAX_SIZE_OF_ENTRY 20
131
132
static int
133
assemble_emit_exception_table_entry(struct assembler *a, int start, int end,
134
int handler_offset,
135
_PyCompile_ExceptHandlerInfo *handler)
136
{
137
Py_ssize_t len = PyBytes_GET_SIZE(a->a_except_table);
138
if (a->a_except_table_off + MAX_SIZE_OF_ENTRY >= len) {
139
RETURN_IF_ERROR(_PyBytes_Resize(&a->a_except_table, len * 2));
140
}
141
int size = end-start;
142
assert(end > start);
143
int target = handler_offset;
144
int depth = handler->h_startdepth - 1;
145
if (handler->h_preserve_lasti > 0) {
146
depth -= 1;
147
}
148
assert(depth >= 0);
149
int depth_lasti = (depth<<1) | handler->h_preserve_lasti;
150
assemble_emit_exception_table_item(a, start, (1<<7));
151
assemble_emit_exception_table_item(a, size, 0);
152
assemble_emit_exception_table_item(a, target, 0);
153
assemble_emit_exception_table_item(a, depth_lasti, 0);
154
return SUCCESS;
155
}
156
157
static int
158
assemble_exception_table(struct assembler *a, instr_sequence *instrs)
159
{
160
int ioffset = 0;
161
_PyCompile_ExceptHandlerInfo handler;
162
handler.h_label = -1;
163
handler.h_startdepth = -1;
164
handler.h_preserve_lasti = -1;
165
int start = -1;
166
for (int i = 0; i < instrs->s_used; i++) {
167
instruction *instr = &instrs->s_instrs[i];
168
if (instr->i_except_handler_info.h_label != handler.h_label) {
169
if (handler.h_label >= 0) {
170
int handler_offset = instrs->s_instrs[handler.h_label].i_offset;
171
RETURN_IF_ERROR(
172
assemble_emit_exception_table_entry(a, start, ioffset,
173
handler_offset,
174
&handler));
175
}
176
start = ioffset;
177
handler = instr->i_except_handler_info;
178
}
179
ioffset += instr_size(instr);
180
}
181
if (handler.h_label >= 0) {
182
int handler_offset = instrs->s_instrs[handler.h_label].i_offset;
183
RETURN_IF_ERROR(assemble_emit_exception_table_entry(a, start, ioffset,
184
handler_offset,
185
&handler));
186
}
187
return SUCCESS;
188
}
189
190
191
/* Code location emitting code. See locations.md for a description of the format. */
192
193
#define MSB 0x80
194
195
static void
196
write_location_byte(struct assembler* a, int val)
197
{
198
PyBytes_AS_STRING(a->a_linetable)[a->a_location_off] = val&255;
199
a->a_location_off++;
200
}
201
202
203
static uint8_t *
204
location_pointer(struct assembler* a)
205
{
206
return (uint8_t *)PyBytes_AS_STRING(a->a_linetable) +
207
a->a_location_off;
208
}
209
210
static void
211
write_location_first_byte(struct assembler* a, int code, int length)
212
{
213
a->a_location_off += write_location_entry_start(
214
location_pointer(a), code, length);
215
}
216
217
static void
218
write_location_varint(struct assembler* a, unsigned int val)
219
{
220
uint8_t *ptr = location_pointer(a);
221
a->a_location_off += write_varint(ptr, val);
222
}
223
224
225
static void
226
write_location_signed_varint(struct assembler* a, int val)
227
{
228
uint8_t *ptr = location_pointer(a);
229
a->a_location_off += write_signed_varint(ptr, val);
230
}
231
232
static void
233
write_location_info_short_form(struct assembler* a, int length, int column, int end_column)
234
{
235
assert(length > 0 && length <= 8);
236
int column_low_bits = column & 7;
237
int column_group = column >> 3;
238
assert(column < 80);
239
assert(end_column >= column);
240
assert(end_column - column < 16);
241
write_location_first_byte(a, PY_CODE_LOCATION_INFO_SHORT0 + column_group, length);
242
write_location_byte(a, (column_low_bits << 4) | (end_column - column));
243
}
244
245
static void
246
write_location_info_oneline_form(struct assembler* a, int length, int line_delta, int column, int end_column)
247
{
248
assert(length > 0 && length <= 8);
249
assert(line_delta >= 0 && line_delta < 3);
250
assert(column < 128);
251
assert(end_column < 128);
252
write_location_first_byte(a, PY_CODE_LOCATION_INFO_ONE_LINE0 + line_delta, length);
253
write_location_byte(a, column);
254
write_location_byte(a, end_column);
255
}
256
257
static void
258
write_location_info_long_form(struct assembler* a, location loc, int length)
259
{
260
assert(length > 0 && length <= 8);
261
write_location_first_byte(a, PY_CODE_LOCATION_INFO_LONG, length);
262
write_location_signed_varint(a, loc.lineno - a->a_lineno);
263
assert(loc.end_lineno >= loc.lineno);
264
write_location_varint(a, loc.end_lineno - loc.lineno);
265
write_location_varint(a, loc.col_offset + 1);
266
write_location_varint(a, loc.end_col_offset + 1);
267
}
268
269
static void
270
write_location_info_none(struct assembler* a, int length)
271
{
272
write_location_first_byte(a, PY_CODE_LOCATION_INFO_NONE, length);
273
}
274
275
static void
276
write_location_info_no_column(struct assembler* a, int length, int line_delta)
277
{
278
write_location_first_byte(a, PY_CODE_LOCATION_INFO_NO_COLUMNS, length);
279
write_location_signed_varint(a, line_delta);
280
}
281
282
#define THEORETICAL_MAX_ENTRY_SIZE 25 /* 1 + 6 + 6 + 6 + 6 */
283
284
285
static int
286
write_location_info_entry(struct assembler* a, location loc, int isize)
287
{
288
Py_ssize_t len = PyBytes_GET_SIZE(a->a_linetable);
289
if (a->a_location_off + THEORETICAL_MAX_ENTRY_SIZE >= len) {
290
assert(len > THEORETICAL_MAX_ENTRY_SIZE);
291
RETURN_IF_ERROR(_PyBytes_Resize(&a->a_linetable, len*2));
292
}
293
if (loc.lineno < 0) {
294
write_location_info_none(a, isize);
295
return SUCCESS;
296
}
297
int line_delta = loc.lineno - a->a_lineno;
298
int column = loc.col_offset;
299
int end_column = loc.end_col_offset;
300
assert(column >= -1);
301
assert(end_column >= -1);
302
if (column < 0 || end_column < 0) {
303
if (loc.end_lineno == loc.lineno || loc.end_lineno == -1) {
304
write_location_info_no_column(a, isize, line_delta);
305
a->a_lineno = loc.lineno;
306
return SUCCESS;
307
}
308
}
309
else if (loc.end_lineno == loc.lineno) {
310
if (line_delta == 0 && column < 80 && end_column - column < 16 && end_column >= column) {
311
write_location_info_short_form(a, isize, column, end_column);
312
return SUCCESS;
313
}
314
if (line_delta >= 0 && line_delta < 3 && column < 128 && end_column < 128) {
315
write_location_info_oneline_form(a, isize, line_delta, column, end_column);
316
a->a_lineno = loc.lineno;
317
return SUCCESS;
318
}
319
}
320
write_location_info_long_form(a, loc, isize);
321
a->a_lineno = loc.lineno;
322
return SUCCESS;
323
}
324
325
static int
326
assemble_emit_location(struct assembler* a, location loc, int isize)
327
{
328
if (isize == 0) {
329
return SUCCESS;
330
}
331
while (isize > 8) {
332
RETURN_IF_ERROR(write_location_info_entry(a, loc, 8));
333
isize -= 8;
334
}
335
return write_location_info_entry(a, loc, isize);
336
}
337
338
static int
339
assemble_location_info(struct assembler *a, instr_sequence *instrs,
340
int firstlineno)
341
{
342
a->a_lineno = firstlineno;
343
location loc = NO_LOCATION;
344
int size = 0;
345
for (int i = 0; i < instrs->s_used; i++) {
346
instruction *instr = &instrs->s_instrs[i];
347
if (!same_location(loc, instr->i_loc)) {
348
RETURN_IF_ERROR(assemble_emit_location(a, loc, size));
349
loc = instr->i_loc;
350
size = 0;
351
}
352
size += instr_size(instr);
353
}
354
RETURN_IF_ERROR(assemble_emit_location(a, loc, size));
355
return SUCCESS;
356
}
357
358
static void
359
write_instr(_Py_CODEUNIT *codestr, instruction *instr, int ilen)
360
{
361
int opcode = instr->i_opcode;
362
assert(!IS_PSEUDO_INSTR(opcode));
363
int oparg = instr->i_oparg;
364
assert(OPCODE_HAS_ARG(opcode) || oparg == 0);
365
int caches = _PyOpcode_Caches[opcode];
366
switch (ilen - caches) {
367
case 4:
368
codestr->op.code = EXTENDED_ARG;
369
codestr->op.arg = (oparg >> 24) & 0xFF;
370
codestr++;
371
/* fall through */
372
case 3:
373
codestr->op.code = EXTENDED_ARG;
374
codestr->op.arg = (oparg >> 16) & 0xFF;
375
codestr++;
376
/* fall through */
377
case 2:
378
codestr->op.code = EXTENDED_ARG;
379
codestr->op.arg = (oparg >> 8) & 0xFF;
380
codestr++;
381
/* fall through */
382
case 1:
383
codestr->op.code = opcode;
384
codestr->op.arg = oparg & 0xFF;
385
codestr++;
386
break;
387
default:
388
Py_UNREACHABLE();
389
}
390
while (caches--) {
391
codestr->op.code = CACHE;
392
codestr->op.arg = 0;
393
codestr++;
394
}
395
}
396
397
/* assemble_emit_instr()
398
Extend the bytecode with a new instruction.
399
Update lnotab if necessary.
400
*/
401
402
static int
403
assemble_emit_instr(struct assembler *a, instruction *instr)
404
{
405
Py_ssize_t len = PyBytes_GET_SIZE(a->a_bytecode);
406
_Py_CODEUNIT *code;
407
408
int size = instr_size(instr);
409
if (a->a_offset + size >= len / (int)sizeof(_Py_CODEUNIT)) {
410
if (len > PY_SSIZE_T_MAX / 2) {
411
return ERROR;
412
}
413
RETURN_IF_ERROR(_PyBytes_Resize(&a->a_bytecode, len * 2));
414
}
415
code = (_Py_CODEUNIT *)PyBytes_AS_STRING(a->a_bytecode) + a->a_offset;
416
a->a_offset += size;
417
write_instr(code, instr, size);
418
return SUCCESS;
419
}
420
421
static int
422
assemble_emit(struct assembler *a, instr_sequence *instrs,
423
int first_lineno, PyObject *const_cache)
424
{
425
RETURN_IF_ERROR(assemble_init(a, first_lineno));
426
427
for (int i = 0; i < instrs->s_used; i++) {
428
instruction *instr = &instrs->s_instrs[i];
429
RETURN_IF_ERROR(assemble_emit_instr(a, instr));
430
}
431
432
RETURN_IF_ERROR(assemble_location_info(a, instrs, a->a_lineno));
433
434
RETURN_IF_ERROR(assemble_exception_table(a, instrs));
435
436
RETURN_IF_ERROR(_PyBytes_Resize(&a->a_except_table, a->a_except_table_off));
437
RETURN_IF_ERROR(_PyCompile_ConstCacheMergeOne(const_cache, &a->a_except_table));
438
439
RETURN_IF_ERROR(_PyBytes_Resize(&a->a_linetable, a->a_location_off));
440
RETURN_IF_ERROR(_PyCompile_ConstCacheMergeOne(const_cache, &a->a_linetable));
441
442
RETURN_IF_ERROR(_PyBytes_Resize(&a->a_bytecode, a->a_offset * sizeof(_Py_CODEUNIT)));
443
RETURN_IF_ERROR(_PyCompile_ConstCacheMergeOne(const_cache, &a->a_bytecode));
444
return SUCCESS;
445
}
446
447
static PyObject *
448
dict_keys_inorder(PyObject *dict, Py_ssize_t offset)
449
{
450
PyObject *tuple, *k, *v;
451
Py_ssize_t i, pos = 0, size = PyDict_GET_SIZE(dict);
452
453
tuple = PyTuple_New(size);
454
if (tuple == NULL)
455
return NULL;
456
while (PyDict_Next(dict, &pos, &k, &v)) {
457
i = PyLong_AS_LONG(v);
458
assert((i - offset) < size);
459
assert((i - offset) >= 0);
460
PyTuple_SET_ITEM(tuple, i - offset, Py_NewRef(k));
461
}
462
return tuple;
463
}
464
465
// This is in codeobject.c.
466
extern void _Py_set_localsplus_info(int, PyObject *, unsigned char,
467
PyObject *, PyObject *);
468
469
static void
470
compute_localsplus_info(_PyCompile_CodeUnitMetadata *umd, int nlocalsplus,
471
PyObject *names, PyObject *kinds)
472
{
473
PyObject *k, *v;
474
Py_ssize_t pos = 0;
475
while (PyDict_Next(umd->u_varnames, &pos, &k, &v)) {
476
int offset = (int)PyLong_AS_LONG(v);
477
assert(offset >= 0);
478
assert(offset < nlocalsplus);
479
// For now we do not distinguish arg kinds.
480
_PyLocals_Kind kind = CO_FAST_LOCAL;
481
if (PyDict_Contains(umd->u_fasthidden, k)) {
482
kind |= CO_FAST_HIDDEN;
483
}
484
if (PyDict_GetItem(umd->u_cellvars, k) != NULL) {
485
kind |= CO_FAST_CELL;
486
}
487
_Py_set_localsplus_info(offset, k, kind, names, kinds);
488
}
489
int nlocals = (int)PyDict_GET_SIZE(umd->u_varnames);
490
491
// This counter mirrors the fix done in fix_cell_offsets().
492
int numdropped = 0;
493
pos = 0;
494
while (PyDict_Next(umd->u_cellvars, &pos, &k, &v)) {
495
if (PyDict_GetItem(umd->u_varnames, k) != NULL) {
496
// Skip cells that are already covered by locals.
497
numdropped += 1;
498
continue;
499
}
500
int offset = (int)PyLong_AS_LONG(v);
501
assert(offset >= 0);
502
offset += nlocals - numdropped;
503
assert(offset < nlocalsplus);
504
_Py_set_localsplus_info(offset, k, CO_FAST_CELL, names, kinds);
505
}
506
507
pos = 0;
508
while (PyDict_Next(umd->u_freevars, &pos, &k, &v)) {
509
int offset = (int)PyLong_AS_LONG(v);
510
assert(offset >= 0);
511
offset += nlocals - numdropped;
512
assert(offset < nlocalsplus);
513
_Py_set_localsplus_info(offset, k, CO_FAST_FREE, names, kinds);
514
}
515
}
516
517
static PyCodeObject *
518
makecode(_PyCompile_CodeUnitMetadata *umd, struct assembler *a, PyObject *const_cache,
519
PyObject *constslist, int maxdepth, int nlocalsplus, int code_flags,
520
PyObject *filename)
521
{
522
PyCodeObject *co = NULL;
523
PyObject *names = NULL;
524
PyObject *consts = NULL;
525
PyObject *localsplusnames = NULL;
526
PyObject *localspluskinds = NULL;
527
names = dict_keys_inorder(umd->u_names, 0);
528
if (!names) {
529
goto error;
530
}
531
if (_PyCompile_ConstCacheMergeOne(const_cache, &names) < 0) {
532
goto error;
533
}
534
535
consts = PyList_AsTuple(constslist); /* PyCode_New requires a tuple */
536
if (consts == NULL) {
537
goto error;
538
}
539
if (_PyCompile_ConstCacheMergeOne(const_cache, &consts) < 0) {
540
goto error;
541
}
542
543
assert(umd->u_posonlyargcount < INT_MAX);
544
assert(umd->u_argcount < INT_MAX);
545
assert(umd->u_kwonlyargcount < INT_MAX);
546
int posonlyargcount = (int)umd->u_posonlyargcount;
547
int posorkwargcount = (int)umd->u_argcount;
548
assert(INT_MAX - posonlyargcount - posorkwargcount > 0);
549
int kwonlyargcount = (int)umd->u_kwonlyargcount;
550
551
localsplusnames = PyTuple_New(nlocalsplus);
552
if (localsplusnames == NULL) {
553
goto error;
554
}
555
localspluskinds = PyBytes_FromStringAndSize(NULL, nlocalsplus);
556
if (localspluskinds == NULL) {
557
goto error;
558
}
559
compute_localsplus_info(umd, nlocalsplus, localsplusnames, localspluskinds);
560
561
struct _PyCodeConstructor con = {
562
.filename = filename,
563
.name = umd->u_name,
564
.qualname = umd->u_qualname ? umd->u_qualname : umd->u_name,
565
.flags = code_flags,
566
567
.code = a->a_bytecode,
568
.firstlineno = umd->u_firstlineno,
569
.linetable = a->a_linetable,
570
571
.consts = consts,
572
.names = names,
573
574
.localsplusnames = localsplusnames,
575
.localspluskinds = localspluskinds,
576
577
.argcount = posonlyargcount + posorkwargcount,
578
.posonlyargcount = posonlyargcount,
579
.kwonlyargcount = kwonlyargcount,
580
581
.stacksize = maxdepth,
582
583
.exceptiontable = a->a_except_table,
584
};
585
586
if (_PyCode_Validate(&con) < 0) {
587
goto error;
588
}
589
590
if (_PyCompile_ConstCacheMergeOne(const_cache, &localsplusnames) < 0) {
591
goto error;
592
}
593
con.localsplusnames = localsplusnames;
594
595
co = _PyCode_New(&con);
596
if (co == NULL) {
597
goto error;
598
}
599
600
error:
601
Py_XDECREF(names);
602
Py_XDECREF(consts);
603
Py_XDECREF(localsplusnames);
604
Py_XDECREF(localspluskinds);
605
return co;
606
}
607
608
static int
609
resolve_jump_offsets(instr_sequence *instrs)
610
{
611
/* Compute the size of each instruction and fixup jump args.
612
* Replace instruction index with position in bytecode.
613
*/
614
615
for (int i = 0; i < instrs->s_used; i++) {
616
instruction *instr = &instrs->s_instrs[i];
617
if (OPCODE_HAS_JUMP(instr->i_opcode)) {
618
instr->i_target = instr->i_oparg;
619
}
620
}
621
622
int extended_arg_recompile;
623
624
do {
625
int totsize = 0;
626
for (int i = 0; i < instrs->s_used; i++) {
627
instruction *instr = &instrs->s_instrs[i];
628
instr->i_offset = totsize;
629
int isize = instr_size(instr);
630
totsize += isize;
631
}
632
extended_arg_recompile = 0;
633
634
int offset = 0;
635
for (int i = 0; i < instrs->s_used; i++) {
636
instruction *instr = &instrs->s_instrs[i];
637
int isize = instr_size(instr);
638
/* jump offsets are computed relative to
639
* the instruction pointer after fetching
640
* the jump instruction.
641
*/
642
offset += isize;
643
if (OPCODE_HAS_JUMP(instr->i_opcode)) {
644
instruction *target = &instrs->s_instrs[instr->i_target];
645
instr->i_oparg = target->i_offset;
646
if (instr->i_oparg < offset) {
647
assert(IS_BACKWARDS_JUMP_OPCODE(instr->i_opcode));
648
instr->i_oparg = offset - instr->i_oparg;
649
}
650
else {
651
assert(!IS_BACKWARDS_JUMP_OPCODE(instr->i_opcode));
652
instr->i_oparg = instr->i_oparg - offset;
653
}
654
if (instr_size(instr) != isize) {
655
extended_arg_recompile = 1;
656
}
657
}
658
}
659
/* XXX: This is an awful hack that could hurt performance, but
660
on the bright side it should work until we come up
661
with a better solution.
662
663
The issue is that in the first loop instr_size() is
664
called, and it requires i_oparg be set appropriately.
665
There is a bootstrap problem because i_oparg is
666
calculated in the second loop above.
667
668
So we loop until we stop seeing new EXTENDED_ARGs.
669
The only EXTENDED_ARGs that could be popping up are
670
ones in jump instructions. So this should converge
671
fairly quickly.
672
*/
673
} while (extended_arg_recompile);
674
return SUCCESS;
675
}
676
677
static int
678
resolve_unconditional_jumps(instr_sequence *instrs)
679
{
680
/* Resolve directions of unconditional jumps */
681
682
for (int i = 0; i < instrs->s_used; i++) {
683
instruction *instr = &instrs->s_instrs[i];
684
bool is_forward = (instr->i_oparg > i);
685
switch(instr->i_opcode) {
686
case JUMP:
687
assert(SAME_OPCODE_METADATA(JUMP, JUMP_FORWARD));
688
assert(SAME_OPCODE_METADATA(JUMP, JUMP_BACKWARD));
689
instr->i_opcode = is_forward ? JUMP_FORWARD : JUMP_BACKWARD;
690
break;
691
case JUMP_NO_INTERRUPT:
692
assert(SAME_OPCODE_METADATA(JUMP_NO_INTERRUPT, JUMP_FORWARD));
693
assert(SAME_OPCODE_METADATA(JUMP_NO_INTERRUPT, JUMP_BACKWARD_NO_INTERRUPT));
694
instr->i_opcode = is_forward ?
695
JUMP_FORWARD : JUMP_BACKWARD_NO_INTERRUPT;
696
break;
697
default:
698
if (OPCODE_HAS_JUMP(instr->i_opcode) &&
699
IS_PSEUDO_INSTR(instr->i_opcode)) {
700
Py_UNREACHABLE();
701
}
702
}
703
}
704
return SUCCESS;
705
}
706
707
PyCodeObject *
708
_PyAssemble_MakeCodeObject(_PyCompile_CodeUnitMetadata *umd, PyObject *const_cache,
709
PyObject *consts, int maxdepth, instr_sequence *instrs,
710
int nlocalsplus, int code_flags, PyObject *filename)
711
{
712
713
if (resolve_unconditional_jumps(instrs) < 0) {
714
return NULL;
715
}
716
if (resolve_jump_offsets(instrs) < 0) {
717
return NULL;
718
}
719
PyCodeObject *co = NULL;
720
721
struct assembler a;
722
int res = assemble_emit(&a, instrs, umd->u_firstlineno, const_cache);
723
if (res == SUCCESS) {
724
co = makecode(umd, &a, const_cache, consts, maxdepth, nlocalsplus,
725
code_flags, filename);
726
}
727
assemble_free(&a);
728
return co;
729
}
730
731