Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
allendowney
GitHub Repository: allendowney/cpython
Path: blob/main/Python/ceval_gil.c
12 views
1
2
#include "Python.h"
3
#include "pycore_atomic.h" // _Py_atomic_int
4
#include "pycore_ceval.h" // _PyEval_SignalReceived()
5
#include "pycore_pyerrors.h" // _PyErr_GetRaisedException()
6
#include "pycore_pylifecycle.h" // _PyErr_Print()
7
#include "pycore_initconfig.h" // _PyStatus_OK()
8
#include "pycore_interp.h" // _Py_RunGC()
9
#include "pycore_pymem.h" // _PyMem_IsPtrFreed()
10
11
/*
12
Notes about the implementation:
13
14
- The GIL is just a boolean variable (locked) whose access is protected
15
by a mutex (gil_mutex), and whose changes are signalled by a condition
16
variable (gil_cond). gil_mutex is taken for short periods of time,
17
and therefore mostly uncontended.
18
19
- In the GIL-holding thread, the main loop (PyEval_EvalFrameEx) must be
20
able to release the GIL on demand by another thread. A volatile boolean
21
variable (gil_drop_request) is used for that purpose, which is checked
22
at every turn of the eval loop. That variable is set after a wait of
23
`interval` microseconds on `gil_cond` has timed out.
24
25
[Actually, another volatile boolean variable (eval_breaker) is used
26
which ORs several conditions into one. Volatile booleans are
27
sufficient as inter-thread signalling means since Python is run
28
on cache-coherent architectures only.]
29
30
- A thread wanting to take the GIL will first let pass a given amount of
31
time (`interval` microseconds) before setting gil_drop_request. This
32
encourages a defined switching period, but doesn't enforce it since
33
opcodes can take an arbitrary time to execute.
34
35
The `interval` value is available for the user to read and modify
36
using the Python API `sys.{get,set}switchinterval()`.
37
38
- When a thread releases the GIL and gil_drop_request is set, that thread
39
ensures that another GIL-awaiting thread gets scheduled.
40
It does so by waiting on a condition variable (switch_cond) until
41
the value of last_holder is changed to something else than its
42
own thread state pointer, indicating that another thread was able to
43
take the GIL.
44
45
This is meant to prohibit the latency-adverse behaviour on multi-core
46
machines where one thread would speculatively release the GIL, but still
47
run and end up being the first to re-acquire it, making the "timeslices"
48
much longer than expected.
49
(Note: this mechanism is enabled with FORCE_SWITCHING above)
50
*/
51
52
// GH-89279: Force inlining by using a macro.
53
#if defined(_MSC_VER) && SIZEOF_INT == 4
54
#define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) (assert(sizeof((ATOMIC_VAL)->_value) == 4), *((volatile int*)&((ATOMIC_VAL)->_value)))
55
#else
56
#define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) _Py_atomic_load_relaxed(ATOMIC_VAL)
57
#endif
58
59
/* This can set eval_breaker to 0 even though gil_drop_request became
60
1. We believe this is all right because the eval loop will release
61
the GIL eventually anyway. */
62
static inline void
63
COMPUTE_EVAL_BREAKER(PyInterpreterState *interp,
64
struct _ceval_runtime_state *ceval,
65
struct _ceval_state *ceval2)
66
{
67
_Py_atomic_store_relaxed(&ceval2->eval_breaker,
68
_Py_atomic_load_relaxed_int32(&ceval2->gil_drop_request)
69
| (_Py_atomic_load_relaxed_int32(&ceval->signals_pending)
70
&& _Py_ThreadCanHandleSignals(interp))
71
| (_Py_atomic_load_relaxed_int32(&ceval2->pending.calls_to_do))
72
| (_Py_IsMainThread() && _Py_IsMainInterpreter(interp)
73
&&_Py_atomic_load_relaxed_int32(&ceval->pending_mainthread.calls_to_do))
74
| ceval2->pending.async_exc
75
| _Py_atomic_load_relaxed_int32(&ceval2->gc_scheduled));
76
}
77
78
79
static inline void
80
SET_GIL_DROP_REQUEST(PyInterpreterState *interp)
81
{
82
struct _ceval_state *ceval2 = &interp->ceval;
83
_Py_atomic_store_relaxed(&ceval2->gil_drop_request, 1);
84
_Py_atomic_store_relaxed(&ceval2->eval_breaker, 1);
85
}
86
87
88
static inline void
89
RESET_GIL_DROP_REQUEST(PyInterpreterState *interp)
90
{
91
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
92
struct _ceval_state *ceval2 = &interp->ceval;
93
_Py_atomic_store_relaxed(&ceval2->gil_drop_request, 0);
94
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
95
}
96
97
98
static inline void
99
SIGNAL_PENDING_CALLS(struct _pending_calls *pending, PyInterpreterState *interp)
100
{
101
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
102
struct _ceval_state *ceval2 = &interp->ceval;
103
_Py_atomic_store_relaxed(&pending->calls_to_do, 1);
104
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
105
}
106
107
108
static inline void
109
UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp)
110
{
111
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
112
struct _ceval_state *ceval2 = &interp->ceval;
113
if (_Py_IsMainThread() && _Py_IsMainInterpreter(interp)) {
114
_Py_atomic_store_relaxed(&ceval->pending_mainthread.calls_to_do, 0);
115
}
116
_Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 0);
117
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
118
}
119
120
121
static inline void
122
SIGNAL_PENDING_SIGNALS(PyInterpreterState *interp, int force)
123
{
124
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
125
struct _ceval_state *ceval2 = &interp->ceval;
126
_Py_atomic_store_relaxed(&ceval->signals_pending, 1);
127
if (force) {
128
_Py_atomic_store_relaxed(&ceval2->eval_breaker, 1);
129
}
130
else {
131
/* eval_breaker is not set to 1 if thread_can_handle_signals() is false */
132
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
133
}
134
}
135
136
137
static inline void
138
UNSIGNAL_PENDING_SIGNALS(PyInterpreterState *interp)
139
{
140
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
141
struct _ceval_state *ceval2 = &interp->ceval;
142
_Py_atomic_store_relaxed(&ceval->signals_pending, 0);
143
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
144
}
145
146
147
static inline void
148
SIGNAL_ASYNC_EXC(PyInterpreterState *interp)
149
{
150
struct _ceval_state *ceval2 = &interp->ceval;
151
ceval2->pending.async_exc = 1;
152
_Py_atomic_store_relaxed(&ceval2->eval_breaker, 1);
153
}
154
155
156
static inline void
157
UNSIGNAL_ASYNC_EXC(PyInterpreterState *interp)
158
{
159
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
160
struct _ceval_state *ceval2 = &interp->ceval;
161
ceval2->pending.async_exc = 0;
162
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
163
}
164
165
#ifndef NDEBUG
166
/* Ensure that tstate is valid */
167
static int
168
is_tstate_valid(PyThreadState *tstate)
169
{
170
assert(!_PyMem_IsPtrFreed(tstate));
171
assert(!_PyMem_IsPtrFreed(tstate->interp));
172
return 1;
173
}
174
#endif
175
176
/*
177
* Implementation of the Global Interpreter Lock (GIL).
178
*/
179
180
#include <stdlib.h>
181
#include <errno.h>
182
183
#include "pycore_atomic.h"
184
185
186
#include "condvar.h"
187
188
#define MUTEX_INIT(mut) \
189
if (PyMUTEX_INIT(&(mut))) { \
190
Py_FatalError("PyMUTEX_INIT(" #mut ") failed"); };
191
#define MUTEX_FINI(mut) \
192
if (PyMUTEX_FINI(&(mut))) { \
193
Py_FatalError("PyMUTEX_FINI(" #mut ") failed"); };
194
#define MUTEX_LOCK(mut) \
195
if (PyMUTEX_LOCK(&(mut))) { \
196
Py_FatalError("PyMUTEX_LOCK(" #mut ") failed"); };
197
#define MUTEX_UNLOCK(mut) \
198
if (PyMUTEX_UNLOCK(&(mut))) { \
199
Py_FatalError("PyMUTEX_UNLOCK(" #mut ") failed"); };
200
201
#define COND_INIT(cond) \
202
if (PyCOND_INIT(&(cond))) { \
203
Py_FatalError("PyCOND_INIT(" #cond ") failed"); };
204
#define COND_FINI(cond) \
205
if (PyCOND_FINI(&(cond))) { \
206
Py_FatalError("PyCOND_FINI(" #cond ") failed"); };
207
#define COND_SIGNAL(cond) \
208
if (PyCOND_SIGNAL(&(cond))) { \
209
Py_FatalError("PyCOND_SIGNAL(" #cond ") failed"); };
210
#define COND_WAIT(cond, mut) \
211
if (PyCOND_WAIT(&(cond), &(mut))) { \
212
Py_FatalError("PyCOND_WAIT(" #cond ") failed"); };
213
#define COND_TIMED_WAIT(cond, mut, microseconds, timeout_result) \
214
{ \
215
int r = PyCOND_TIMEDWAIT(&(cond), &(mut), (microseconds)); \
216
if (r < 0) \
217
Py_FatalError("PyCOND_WAIT(" #cond ") failed"); \
218
if (r) /* 1 == timeout, 2 == impl. can't say, so assume timeout */ \
219
timeout_result = 1; \
220
else \
221
timeout_result = 0; \
222
} \
223
224
225
#define DEFAULT_INTERVAL 5000
226
227
static void _gil_initialize(struct _gil_runtime_state *gil)
228
{
229
_Py_atomic_int uninitialized = {-1};
230
gil->locked = uninitialized;
231
gil->interval = DEFAULT_INTERVAL;
232
}
233
234
static int gil_created(struct _gil_runtime_state *gil)
235
{
236
if (gil == NULL) {
237
return 0;
238
}
239
return (_Py_atomic_load_explicit(&gil->locked, _Py_memory_order_acquire) >= 0);
240
}
241
242
static void create_gil(struct _gil_runtime_state *gil)
243
{
244
MUTEX_INIT(gil->mutex);
245
#ifdef FORCE_SWITCHING
246
MUTEX_INIT(gil->switch_mutex);
247
#endif
248
COND_INIT(gil->cond);
249
#ifdef FORCE_SWITCHING
250
COND_INIT(gil->switch_cond);
251
#endif
252
_Py_atomic_store_relaxed(&gil->last_holder, 0);
253
_Py_ANNOTATE_RWLOCK_CREATE(&gil->locked);
254
_Py_atomic_store_explicit(&gil->locked, 0, _Py_memory_order_release);
255
}
256
257
static void destroy_gil(struct _gil_runtime_state *gil)
258
{
259
/* some pthread-like implementations tie the mutex to the cond
260
* and must have the cond destroyed first.
261
*/
262
COND_FINI(gil->cond);
263
MUTEX_FINI(gil->mutex);
264
#ifdef FORCE_SWITCHING
265
COND_FINI(gil->switch_cond);
266
MUTEX_FINI(gil->switch_mutex);
267
#endif
268
_Py_atomic_store_explicit(&gil->locked, -1,
269
_Py_memory_order_release);
270
_Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
271
}
272
273
#ifdef HAVE_FORK
274
static void recreate_gil(struct _gil_runtime_state *gil)
275
{
276
_Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
277
/* XXX should we destroy the old OS resources here? */
278
create_gil(gil);
279
}
280
#endif
281
282
static void
283
drop_gil(struct _ceval_state *ceval, PyThreadState *tstate)
284
{
285
/* If tstate is NULL, the caller is indicating that we're releasing
286
the GIL for the last time in this thread. This is particularly
287
relevant when the current thread state is finalizing or its
288
interpreter is finalizing (either may be in an inconsistent
289
state). In that case the current thread will definitely
290
never try to acquire the GIL again. */
291
// XXX It may be more correct to check tstate->_status.finalizing.
292
// XXX assert(tstate == NULL || !tstate->_status.cleared);
293
294
struct _gil_runtime_state *gil = ceval->gil;
295
if (!_Py_atomic_load_relaxed(&gil->locked)) {
296
Py_FatalError("drop_gil: GIL is not locked");
297
}
298
299
/* tstate is allowed to be NULL (early interpreter init) */
300
if (tstate != NULL) {
301
/* Sub-interpreter support: threads might have been switched
302
under our feet using PyThreadState_Swap(). Fix the GIL last
303
holder variable so that our heuristics work. */
304
_Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
305
}
306
307
MUTEX_LOCK(gil->mutex);
308
_Py_ANNOTATE_RWLOCK_RELEASED(&gil->locked, /*is_write=*/1);
309
_Py_atomic_store_relaxed(&gil->locked, 0);
310
COND_SIGNAL(gil->cond);
311
MUTEX_UNLOCK(gil->mutex);
312
313
#ifdef FORCE_SWITCHING
314
/* We check tstate first in case we might be releasing the GIL for
315
the last time in this thread. In that case there's a possible
316
race with tstate->interp getting deleted after gil->mutex is
317
unlocked and before the following code runs, leading to a crash.
318
We can use (tstate == NULL) to indicate the thread is done with
319
the GIL, and that's the only time we might delete the
320
interpreter, so checking tstate first prevents the crash.
321
See https://github.com/python/cpython/issues/104341. */
322
if (tstate != NULL && _Py_atomic_load_relaxed(&ceval->gil_drop_request)) {
323
MUTEX_LOCK(gil->switch_mutex);
324
/* Not switched yet => wait */
325
if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate)
326
{
327
assert(is_tstate_valid(tstate));
328
RESET_GIL_DROP_REQUEST(tstate->interp);
329
/* NOTE: if COND_WAIT does not atomically start waiting when
330
releasing the mutex, another thread can run through, take
331
the GIL and drop it again, and reset the condition
332
before we even had a chance to wait for it. */
333
COND_WAIT(gil->switch_cond, gil->switch_mutex);
334
}
335
MUTEX_UNLOCK(gil->switch_mutex);
336
}
337
#endif
338
}
339
340
341
/* Check if a Python thread must exit immediately, rather than taking the GIL
342
if Py_Finalize() has been called.
343
344
When this function is called by a daemon thread after Py_Finalize() has been
345
called, the GIL does no longer exist.
346
347
tstate must be non-NULL. */
348
static inline int
349
tstate_must_exit(PyThreadState *tstate)
350
{
351
/* bpo-39877: Access _PyRuntime directly rather than using
352
tstate->interp->runtime to support calls from Python daemon threads.
353
After Py_Finalize() has been called, tstate can be a dangling pointer:
354
point to PyThreadState freed memory. */
355
PyThreadState *finalizing = _PyRuntimeState_GetFinalizing(&_PyRuntime);
356
if (finalizing == NULL) {
357
finalizing = _PyInterpreterState_GetFinalizing(tstate->interp);
358
}
359
return (finalizing != NULL && finalizing != tstate);
360
}
361
362
363
/* Take the GIL.
364
365
The function saves errno at entry and restores its value at exit.
366
367
tstate must be non-NULL. */
368
static void
369
take_gil(PyThreadState *tstate)
370
{
371
int err = errno;
372
373
assert(tstate != NULL);
374
/* We shouldn't be using a thread state that isn't viable any more. */
375
// XXX It may be more correct to check tstate->_status.finalizing.
376
// XXX assert(!tstate->_status.cleared);
377
378
if (tstate_must_exit(tstate)) {
379
/* bpo-39877: If Py_Finalize() has been called and tstate is not the
380
thread which called Py_Finalize(), exit immediately the thread.
381
382
This code path can be reached by a daemon thread after Py_Finalize()
383
completes. In this case, tstate is a dangling pointer: points to
384
PyThreadState freed memory. */
385
PyThread_exit_thread();
386
}
387
388
assert(is_tstate_valid(tstate));
389
PyInterpreterState *interp = tstate->interp;
390
struct _ceval_state *ceval = &interp->ceval;
391
struct _gil_runtime_state *gil = ceval->gil;
392
393
/* Check that _PyEval_InitThreads() was called to create the lock */
394
assert(gil_created(gil));
395
396
MUTEX_LOCK(gil->mutex);
397
398
if (!_Py_atomic_load_relaxed(&gil->locked)) {
399
goto _ready;
400
}
401
402
int drop_requested = 0;
403
while (_Py_atomic_load_relaxed(&gil->locked)) {
404
unsigned long saved_switchnum = gil->switch_number;
405
406
unsigned long interval = (gil->interval >= 1 ? gil->interval : 1);
407
int timed_out = 0;
408
COND_TIMED_WAIT(gil->cond, gil->mutex, interval, timed_out);
409
410
/* If we timed out and no switch occurred in the meantime, it is time
411
to ask the GIL-holding thread to drop it. */
412
if (timed_out &&
413
_Py_atomic_load_relaxed(&gil->locked) &&
414
gil->switch_number == saved_switchnum)
415
{
416
if (tstate_must_exit(tstate)) {
417
MUTEX_UNLOCK(gil->mutex);
418
// gh-96387: If the loop requested a drop request in a previous
419
// iteration, reset the request. Otherwise, drop_gil() can
420
// block forever waiting for the thread which exited. Drop
421
// requests made by other threads are also reset: these threads
422
// may have to request again a drop request (iterate one more
423
// time).
424
if (drop_requested) {
425
RESET_GIL_DROP_REQUEST(interp);
426
}
427
PyThread_exit_thread();
428
}
429
assert(is_tstate_valid(tstate));
430
431
SET_GIL_DROP_REQUEST(interp);
432
drop_requested = 1;
433
}
434
}
435
436
_ready:
437
#ifdef FORCE_SWITCHING
438
/* This mutex must be taken before modifying gil->last_holder:
439
see drop_gil(). */
440
MUTEX_LOCK(gil->switch_mutex);
441
#endif
442
/* We now hold the GIL */
443
_Py_atomic_store_relaxed(&gil->locked, 1);
444
_Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, /*is_write=*/1);
445
446
if (tstate != (PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) {
447
_Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
448
++gil->switch_number;
449
}
450
451
#ifdef FORCE_SWITCHING
452
COND_SIGNAL(gil->switch_cond);
453
MUTEX_UNLOCK(gil->switch_mutex);
454
#endif
455
456
if (tstate_must_exit(tstate)) {
457
/* bpo-36475: If Py_Finalize() has been called and tstate is not
458
the thread which called Py_Finalize(), exit immediately the
459
thread.
460
461
This code path can be reached by a daemon thread which was waiting
462
in take_gil() while the main thread called
463
wait_for_thread_shutdown() from Py_Finalize(). */
464
MUTEX_UNLOCK(gil->mutex);
465
drop_gil(ceval, tstate);
466
PyThread_exit_thread();
467
}
468
assert(is_tstate_valid(tstate));
469
470
if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) {
471
RESET_GIL_DROP_REQUEST(interp);
472
}
473
else {
474
/* bpo-40010: eval_breaker should be recomputed to be set to 1 if there
475
is a pending signal: signal received by another thread which cannot
476
handle signals.
477
478
Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */
479
COMPUTE_EVAL_BREAKER(interp, &_PyRuntime.ceval, ceval);
480
}
481
482
/* Don't access tstate if the thread must exit */
483
if (tstate->async_exc != NULL) {
484
_PyEval_SignalAsyncExc(tstate->interp);
485
}
486
487
MUTEX_UNLOCK(gil->mutex);
488
489
errno = err;
490
}
491
492
void _PyEval_SetSwitchInterval(unsigned long microseconds)
493
{
494
PyInterpreterState *interp = _PyInterpreterState_GET();
495
struct _gil_runtime_state *gil = interp->ceval.gil;
496
assert(gil != NULL);
497
gil->interval = microseconds;
498
}
499
500
unsigned long _PyEval_GetSwitchInterval(void)
501
{
502
PyInterpreterState *interp = _PyInterpreterState_GET();
503
struct _gil_runtime_state *gil = interp->ceval.gil;
504
assert(gil != NULL);
505
return gil->interval;
506
}
507
508
509
int
510
_PyEval_ThreadsInitialized(void)
511
{
512
/* XXX This is only needed for an assert in PyGILState_Ensure(),
513
* which currently does not work with subinterpreters.
514
* Thus we only use the main interpreter. */
515
PyInterpreterState *interp = _PyInterpreterState_Main();
516
if (interp == NULL) {
517
return 0;
518
}
519
struct _gil_runtime_state *gil = interp->ceval.gil;
520
return gil_created(gil);
521
}
522
523
// Function removed in the Python 3.13 API but kept in the stable ABI.
524
PyAPI_FUNC(int)
525
PyEval_ThreadsInitialized(void)
526
{
527
return _PyEval_ThreadsInitialized();
528
}
529
530
static inline int
531
current_thread_holds_gil(struct _gil_runtime_state *gil, PyThreadState *tstate)
532
{
533
if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) != tstate) {
534
return 0;
535
}
536
return _Py_atomic_load_relaxed(&gil->locked);
537
}
538
539
static void
540
init_shared_gil(PyInterpreterState *interp, struct _gil_runtime_state *gil)
541
{
542
assert(gil_created(gil));
543
interp->ceval.gil = gil;
544
interp->ceval.own_gil = 0;
545
}
546
547
static void
548
init_own_gil(PyInterpreterState *interp, struct _gil_runtime_state *gil)
549
{
550
assert(!gil_created(gil));
551
create_gil(gil);
552
assert(gil_created(gil));
553
interp->ceval.gil = gil;
554
interp->ceval.own_gil = 1;
555
}
556
557
PyStatus
558
_PyEval_InitGIL(PyThreadState *tstate, int own_gil)
559
{
560
assert(tstate->interp->ceval.gil == NULL);
561
int locked;
562
if (!own_gil) {
563
/* The interpreter will share the main interpreter's instead. */
564
PyInterpreterState *main_interp = _PyInterpreterState_Main();
565
assert(tstate->interp != main_interp);
566
struct _gil_runtime_state *gil = main_interp->ceval.gil;
567
init_shared_gil(tstate->interp, gil);
568
locked = current_thread_holds_gil(gil, tstate);
569
}
570
else {
571
PyThread_init_thread();
572
init_own_gil(tstate->interp, &tstate->interp->_gil);
573
locked = 0;
574
}
575
if (!locked) {
576
take_gil(tstate);
577
}
578
579
return _PyStatus_OK();
580
}
581
582
void
583
_PyEval_FiniGIL(PyInterpreterState *interp)
584
{
585
struct _gil_runtime_state *gil = interp->ceval.gil;
586
if (gil == NULL) {
587
/* It was already finalized (or hasn't been initialized yet). */
588
assert(!interp->ceval.own_gil);
589
return;
590
}
591
else if (!interp->ceval.own_gil) {
592
#ifdef Py_DEBUG
593
PyInterpreterState *main_interp = _PyInterpreterState_Main();
594
assert(main_interp != NULL && interp != main_interp);
595
assert(interp->ceval.gil == main_interp->ceval.gil);
596
#endif
597
interp->ceval.gil = NULL;
598
return;
599
}
600
601
if (!gil_created(gil)) {
602
/* First Py_InitializeFromConfig() call: the GIL doesn't exist
603
yet: do nothing. */
604
return;
605
}
606
607
destroy_gil(gil);
608
assert(!gil_created(gil));
609
interp->ceval.gil = NULL;
610
}
611
612
// Function removed in the Python 3.13 API but kept in the stable ABI.
613
PyAPI_FUNC(void)
614
PyEval_InitThreads(void)
615
{
616
/* Do nothing: kept for backward compatibility */
617
}
618
619
void
620
_PyEval_Fini(void)
621
{
622
#ifdef Py_STATS
623
_Py_PrintSpecializationStats(1);
624
#endif
625
}
626
627
// Function removed in the Python 3.13 API but kept in the stable ABI.
628
PyAPI_FUNC(void)
629
PyEval_AcquireLock(void)
630
{
631
PyThreadState *tstate = _PyThreadState_GET();
632
_Py_EnsureTstateNotNULL(tstate);
633
634
take_gil(tstate);
635
}
636
637
// Function removed in the Python 3.13 API but kept in the stable ABI.
638
PyAPI_FUNC(void)
639
PyEval_ReleaseLock(void)
640
{
641
PyThreadState *tstate = _PyThreadState_GET();
642
/* This function must succeed when the current thread state is NULL.
643
We therefore avoid PyThreadState_Get() which dumps a fatal error
644
in debug mode. */
645
struct _ceval_state *ceval = &tstate->interp->ceval;
646
drop_gil(ceval, tstate);
647
}
648
649
void
650
_PyEval_AcquireLock(PyThreadState *tstate)
651
{
652
_Py_EnsureTstateNotNULL(tstate);
653
take_gil(tstate);
654
}
655
656
void
657
_PyEval_ReleaseLock(PyInterpreterState *interp, PyThreadState *tstate)
658
{
659
/* If tstate is NULL then we do not expect the current thread
660
to acquire the GIL ever again. */
661
assert(tstate == NULL || tstate->interp == interp);
662
struct _ceval_state *ceval = &interp->ceval;
663
drop_gil(ceval, tstate);
664
}
665
666
void
667
PyEval_AcquireThread(PyThreadState *tstate)
668
{
669
_Py_EnsureTstateNotNULL(tstate);
670
671
take_gil(tstate);
672
673
if (_PyThreadState_SwapNoGIL(tstate) != NULL) {
674
Py_FatalError("non-NULL old thread state");
675
}
676
}
677
678
void
679
PyEval_ReleaseThread(PyThreadState *tstate)
680
{
681
assert(is_tstate_valid(tstate));
682
683
PyThreadState *new_tstate = _PyThreadState_SwapNoGIL(NULL);
684
if (new_tstate != tstate) {
685
Py_FatalError("wrong thread state");
686
}
687
struct _ceval_state *ceval = &tstate->interp->ceval;
688
drop_gil(ceval, tstate);
689
}
690
691
#ifdef HAVE_FORK
692
/* This function is called from PyOS_AfterFork_Child to destroy all threads
693
which are not running in the child process, and clear internal locks
694
which might be held by those threads. */
695
PyStatus
696
_PyEval_ReInitThreads(PyThreadState *tstate)
697
{
698
assert(tstate->interp == _PyInterpreterState_Main());
699
700
struct _gil_runtime_state *gil = tstate->interp->ceval.gil;
701
if (!gil_created(gil)) {
702
return _PyStatus_OK();
703
}
704
recreate_gil(gil);
705
706
take_gil(tstate);
707
708
struct _pending_calls *pending = &tstate->interp->ceval.pending;
709
if (_PyThread_at_fork_reinit(&pending->lock) < 0) {
710
return _PyStatus_ERR("Can't reinitialize pending calls lock");
711
}
712
713
/* Destroy all threads except the current one */
714
_PyThreadState_DeleteExcept(tstate);
715
return _PyStatus_OK();
716
}
717
#endif
718
719
/* This function is used to signal that async exceptions are waiting to be
720
raised. */
721
722
void
723
_PyEval_SignalAsyncExc(PyInterpreterState *interp)
724
{
725
SIGNAL_ASYNC_EXC(interp);
726
}
727
728
PyThreadState *
729
PyEval_SaveThread(void)
730
{
731
PyThreadState *tstate = _PyThreadState_SwapNoGIL(NULL);
732
_Py_EnsureTstateNotNULL(tstate);
733
734
struct _ceval_state *ceval = &tstate->interp->ceval;
735
assert(gil_created(ceval->gil));
736
drop_gil(ceval, tstate);
737
return tstate;
738
}
739
740
void
741
PyEval_RestoreThread(PyThreadState *tstate)
742
{
743
_Py_EnsureTstateNotNULL(tstate);
744
745
take_gil(tstate);
746
747
_PyThreadState_SwapNoGIL(tstate);
748
}
749
750
751
/* Mechanism whereby asynchronously executing callbacks (e.g. UNIX
752
signal handlers or Mac I/O completion routines) can schedule calls
753
to a function to be called synchronously.
754
The synchronous function is called with one void* argument.
755
It should return 0 for success or -1 for failure -- failure should
756
be accompanied by an exception.
757
758
If registry succeeds, the registry function returns 0; if it fails
759
(e.g. due to too many pending calls) it returns -1 (without setting
760
an exception condition).
761
762
Note that because registry may occur from within signal handlers,
763
or other asynchronous events, calling malloc() is unsafe!
764
765
Any thread can schedule pending calls, but only the main thread
766
will execute them.
767
There is no facility to schedule calls to a particular thread, but
768
that should be easy to change, should that ever be required. In
769
that case, the static variables here should go into the python
770
threadstate.
771
*/
772
773
void
774
_PyEval_SignalReceived(PyInterpreterState *interp)
775
{
776
#ifdef MS_WINDOWS
777
// bpo-42296: On Windows, _PyEval_SignalReceived() is called from a signal
778
// handler which can run in a thread different than the Python thread, in
779
// which case _Py_ThreadCanHandleSignals() is wrong. Ignore
780
// _Py_ThreadCanHandleSignals() and always set eval_breaker to 1.
781
//
782
// The next eval_frame_handle_pending() call will call
783
// _Py_ThreadCanHandleSignals() to recompute eval_breaker.
784
int force = 1;
785
#else
786
int force = 0;
787
#endif
788
/* bpo-30703: Function called when the C signal handler of Python gets a
789
signal. We cannot queue a callback using _PyEval_AddPendingCall() since
790
that function is not async-signal-safe. */
791
SIGNAL_PENDING_SIGNALS(interp, force);
792
}
793
794
/* Push one item onto the queue while holding the lock. */
795
static int
796
_push_pending_call(struct _pending_calls *pending,
797
int (*func)(void *), void *arg)
798
{
799
int i = pending->last;
800
int j = (i + 1) % NPENDINGCALLS;
801
if (j == pending->first) {
802
return -1; /* Queue full */
803
}
804
pending->calls[i].func = func;
805
pending->calls[i].arg = arg;
806
pending->last = j;
807
return 0;
808
}
809
810
static int
811
_next_pending_call(struct _pending_calls *pending,
812
int (**func)(void *), void **arg)
813
{
814
int i = pending->first;
815
if (i == pending->last) {
816
/* Queue empty */
817
assert(pending->calls[i].func == NULL);
818
return -1;
819
}
820
*func = pending->calls[i].func;
821
*arg = pending->calls[i].arg;
822
return i;
823
}
824
825
/* Pop one item off the queue while holding the lock. */
826
static void
827
_pop_pending_call(struct _pending_calls *pending,
828
int (**func)(void *), void **arg)
829
{
830
int i = _next_pending_call(pending, func, arg);
831
if (i >= 0) {
832
pending->calls[i] = (struct _pending_call){0};
833
pending->first = (i + 1) % NPENDINGCALLS;
834
}
835
}
836
837
/* This implementation is thread-safe. It allows
838
scheduling to be made from any thread, and even from an executing
839
callback.
840
*/
841
842
int
843
_PyEval_AddPendingCall(PyInterpreterState *interp,
844
int (*func)(void *), void *arg,
845
int mainthreadonly)
846
{
847
assert(!mainthreadonly || _Py_IsMainInterpreter(interp));
848
struct _pending_calls *pending = &interp->ceval.pending;
849
if (mainthreadonly) {
850
/* The main thread only exists in the main interpreter. */
851
assert(_Py_IsMainInterpreter(interp));
852
pending = &_PyRuntime.ceval.pending_mainthread;
853
}
854
/* Ensure that _PyEval_InitState() was called
855
and that _PyEval_FiniState() is not called yet. */
856
assert(pending->lock != NULL);
857
858
PyThread_acquire_lock(pending->lock, WAIT_LOCK);
859
int result = _push_pending_call(pending, func, arg);
860
PyThread_release_lock(pending->lock);
861
862
/* signal main loop */
863
SIGNAL_PENDING_CALLS(pending, interp);
864
return result;
865
}
866
867
int
868
Py_AddPendingCall(int (*func)(void *), void *arg)
869
{
870
/* Legacy users of this API will continue to target the main thread
871
(of the main interpreter). */
872
PyInterpreterState *interp = _PyInterpreterState_Main();
873
return _PyEval_AddPendingCall(interp, func, arg, 1);
874
}
875
876
static int
877
handle_signals(PyThreadState *tstate)
878
{
879
assert(is_tstate_valid(tstate));
880
if (!_Py_ThreadCanHandleSignals(tstate->interp)) {
881
return 0;
882
}
883
884
UNSIGNAL_PENDING_SIGNALS(tstate->interp);
885
if (_PyErr_CheckSignalsTstate(tstate) < 0) {
886
/* On failure, re-schedule a call to handle_signals(). */
887
SIGNAL_PENDING_SIGNALS(tstate->interp, 0);
888
return -1;
889
}
890
return 0;
891
}
892
893
static inline int
894
maybe_has_pending_calls(PyInterpreterState *interp)
895
{
896
struct _pending_calls *pending = &interp->ceval.pending;
897
if (_Py_atomic_load_relaxed_int32(&pending->calls_to_do)) {
898
return 1;
899
}
900
if (!_Py_IsMainThread() || !_Py_IsMainInterpreter(interp)) {
901
return 0;
902
}
903
pending = &_PyRuntime.ceval.pending_mainthread;
904
return _Py_atomic_load_relaxed_int32(&pending->calls_to_do);
905
}
906
907
static int
908
_make_pending_calls(struct _pending_calls *pending)
909
{
910
/* perform a bounded number of calls, in case of recursion */
911
for (int i=0; i<NPENDINGCALLS; i++) {
912
int (*func)(void *) = NULL;
913
void *arg = NULL;
914
915
/* pop one item off the queue while holding the lock */
916
PyThread_acquire_lock(pending->lock, WAIT_LOCK);
917
_pop_pending_call(pending, &func, &arg);
918
PyThread_release_lock(pending->lock);
919
920
/* having released the lock, perform the callback */
921
if (func == NULL) {
922
break;
923
}
924
if (func(arg) != 0) {
925
return -1;
926
}
927
}
928
return 0;
929
}
930
931
static int
932
make_pending_calls(PyInterpreterState *interp)
933
{
934
struct _pending_calls *pending = &interp->ceval.pending;
935
struct _pending_calls *pending_main = &_PyRuntime.ceval.pending_mainthread;
936
937
/* Only one thread (per interpreter) may run the pending calls
938
at once. In the same way, we don't do recursive pending calls. */
939
PyThread_acquire_lock(pending->lock, WAIT_LOCK);
940
if (pending->busy) {
941
/* A pending call was added after another thread was already
942
handling the pending calls (and had already "unsignaled").
943
Once that thread is done, it may have taken care of all the
944
pending calls, or there might be some still waiting.
945
Regardless, this interpreter's pending calls will stay
946
"signaled" until that first thread has finished. At that
947
point the next thread to trip the eval breaker will take
948
care of any remaining pending calls. Until then, though,
949
all the interpreter's threads will be tripping the eval
950
breaker every time it's checked. */
951
PyThread_release_lock(pending->lock);
952
return 0;
953
}
954
pending->busy = 1;
955
PyThread_release_lock(pending->lock);
956
957
/* unsignal before starting to call callbacks, so that any callback
958
added in-between re-signals */
959
UNSIGNAL_PENDING_CALLS(interp);
960
961
if (_make_pending_calls(pending) != 0) {
962
pending->busy = 0;
963
/* There might not be more calls to make, but we play it safe. */
964
SIGNAL_PENDING_CALLS(pending, interp);
965
return -1;
966
}
967
968
if (_Py_IsMainThread() && _Py_IsMainInterpreter(interp)) {
969
if (_make_pending_calls(pending_main) != 0) {
970
pending->busy = 0;
971
/* There might not be more calls to make, but we play it safe. */
972
SIGNAL_PENDING_CALLS(pending_main, interp);
973
return -1;
974
}
975
}
976
977
pending->busy = 0;
978
return 0;
979
}
980
981
void
982
_Py_FinishPendingCalls(PyThreadState *tstate)
983
{
984
assert(PyGILState_Check());
985
assert(is_tstate_valid(tstate));
986
987
if (make_pending_calls(tstate->interp) < 0) {
988
PyObject *exc = _PyErr_GetRaisedException(tstate);
989
PyErr_BadInternalCall();
990
_PyErr_ChainExceptions1(exc);
991
_PyErr_Print(tstate);
992
}
993
}
994
995
int
996
_PyEval_MakePendingCalls(PyThreadState *tstate)
997
{
998
int res;
999
1000
if (_Py_IsMainThread() && _Py_IsMainInterpreter(tstate->interp)) {
1001
/* Python signal handler doesn't really queue a callback:
1002
it only signals that a signal was received,
1003
see _PyEval_SignalReceived(). */
1004
res = handle_signals(tstate);
1005
if (res != 0) {
1006
return res;
1007
}
1008
}
1009
1010
res = make_pending_calls(tstate->interp);
1011
if (res != 0) {
1012
return res;
1013
}
1014
1015
return 0;
1016
}
1017
1018
/* Py_MakePendingCalls() is a simple wrapper for the sake
1019
of backward-compatibility. */
1020
int
1021
Py_MakePendingCalls(void)
1022
{
1023
assert(PyGILState_Check());
1024
1025
PyThreadState *tstate = _PyThreadState_GET();
1026
assert(is_tstate_valid(tstate));
1027
1028
/* Only execute pending calls on the main thread. */
1029
if (!_Py_IsMainThread() || !_Py_IsMainInterpreter(tstate->interp)) {
1030
return 0;
1031
}
1032
return _PyEval_MakePendingCalls(tstate);
1033
}
1034
1035
void
1036
_PyEval_InitState(PyInterpreterState *interp, PyThread_type_lock pending_lock)
1037
{
1038
_gil_initialize(&interp->_gil);
1039
1040
struct _pending_calls *pending = &interp->ceval.pending;
1041
assert(pending->lock == NULL);
1042
pending->lock = pending_lock;
1043
}
1044
1045
void
1046
_PyEval_FiniState(struct _ceval_state *ceval)
1047
{
1048
struct _pending_calls *pending = &ceval->pending;
1049
if (pending->lock != NULL) {
1050
PyThread_free_lock(pending->lock);
1051
pending->lock = NULL;
1052
}
1053
}
1054
1055
/* Handle signals, pending calls, GIL drop request
1056
and asynchronous exception */
1057
int
1058
_Py_HandlePending(PyThreadState *tstate)
1059
{
1060
_PyRuntimeState * const runtime = &_PyRuntime;
1061
struct _ceval_runtime_state *ceval = &runtime->ceval;
1062
struct _ceval_state *interp_ceval_state = &tstate->interp->ceval;
1063
1064
/* Pending signals */
1065
if (_Py_atomic_load_relaxed_int32(&ceval->signals_pending)) {
1066
if (handle_signals(tstate) != 0) {
1067
return -1;
1068
}
1069
}
1070
1071
/* Pending calls */
1072
if (maybe_has_pending_calls(tstate->interp)) {
1073
if (make_pending_calls(tstate->interp) != 0) {
1074
return -1;
1075
}
1076
}
1077
1078
/* GC scheduled to run */
1079
if (_Py_atomic_load_relaxed_int32(&interp_ceval_state->gc_scheduled)) {
1080
_Py_atomic_store_relaxed(&interp_ceval_state->gc_scheduled, 0);
1081
COMPUTE_EVAL_BREAKER(tstate->interp, ceval, interp_ceval_state);
1082
_Py_RunGC(tstate);
1083
}
1084
1085
/* GIL drop request */
1086
if (_Py_atomic_load_relaxed_int32(&interp_ceval_state->gil_drop_request)) {
1087
/* Give another thread a chance */
1088
if (_PyThreadState_SwapNoGIL(NULL) != tstate) {
1089
Py_FatalError("tstate mix-up");
1090
}
1091
drop_gil(interp_ceval_state, tstate);
1092
1093
/* Other threads may run now */
1094
1095
take_gil(tstate);
1096
1097
if (_PyThreadState_SwapNoGIL(tstate) != NULL) {
1098
Py_FatalError("orphan tstate");
1099
}
1100
}
1101
1102
/* Check for asynchronous exception. */
1103
if (tstate->async_exc != NULL) {
1104
PyObject *exc = tstate->async_exc;
1105
tstate->async_exc = NULL;
1106
UNSIGNAL_ASYNC_EXC(tstate->interp);
1107
_PyErr_SetNone(tstate, exc);
1108
Py_DECREF(exc);
1109
return -1;
1110
}
1111
1112
1113
// It is possible that some of the conditions that trigger the eval breaker
1114
// are called in a different thread than the Python thread. An example of
1115
// this is bpo-42296: On Windows, _PyEval_SignalReceived() can be called in
1116
// a different thread than the Python thread, in which case
1117
// _Py_ThreadCanHandleSignals() is wrong. Recompute eval_breaker in the
1118
// current Python thread with the correct _Py_ThreadCanHandleSignals()
1119
// value. It prevents to interrupt the eval loop at every instruction if
1120
// the current Python thread cannot handle signals (if
1121
// _Py_ThreadCanHandleSignals() is false).
1122
COMPUTE_EVAL_BREAKER(tstate->interp, ceval, interp_ceval_state);
1123
1124
return 0;
1125
}
1126
1127
1128