#include "Python.h"
#include "pycore_atomic.h"
#include "pycore_ceval.h"
#include "pycore_pyerrors.h"
#include "pycore_pylifecycle.h"
#include "pycore_initconfig.h"
#include "pycore_interp.h"
#include "pycore_pymem.h"
#if defined(_MSC_VER) && SIZEOF_INT == 4
#define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) (assert(sizeof((ATOMIC_VAL)->_value) == 4), *((volatile int*)&((ATOMIC_VAL)->_value)))
#else
#define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) _Py_atomic_load_relaxed(ATOMIC_VAL)
#endif
static inline void
COMPUTE_EVAL_BREAKER(PyInterpreterState *interp,
struct _ceval_runtime_state *ceval,
struct _ceval_state *ceval2)
{
_Py_atomic_store_relaxed(&ceval2->eval_breaker,
_Py_atomic_load_relaxed_int32(&ceval2->gil_drop_request)
| (_Py_atomic_load_relaxed_int32(&ceval->signals_pending)
&& _Py_ThreadCanHandleSignals(interp))
| (_Py_atomic_load_relaxed_int32(&ceval2->pending.calls_to_do))
| (_Py_IsMainThread() && _Py_IsMainInterpreter(interp)
&&_Py_atomic_load_relaxed_int32(&ceval->pending_mainthread.calls_to_do))
| ceval2->pending.async_exc
| _Py_atomic_load_relaxed_int32(&ceval2->gc_scheduled));
}
static inline void
SET_GIL_DROP_REQUEST(PyInterpreterState *interp)
{
struct _ceval_state *ceval2 = &interp->ceval;
_Py_atomic_store_relaxed(&ceval2->gil_drop_request, 1);
_Py_atomic_store_relaxed(&ceval2->eval_breaker, 1);
}
static inline void
RESET_GIL_DROP_REQUEST(PyInterpreterState *interp)
{
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
struct _ceval_state *ceval2 = &interp->ceval;
_Py_atomic_store_relaxed(&ceval2->gil_drop_request, 0);
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
}
static inline void
SIGNAL_PENDING_CALLS(struct _pending_calls *pending, PyInterpreterState *interp)
{
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
struct _ceval_state *ceval2 = &interp->ceval;
_Py_atomic_store_relaxed(&pending->calls_to_do, 1);
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
}
static inline void
UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp)
{
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
struct _ceval_state *ceval2 = &interp->ceval;
if (_Py_IsMainThread() && _Py_IsMainInterpreter(interp)) {
_Py_atomic_store_relaxed(&ceval->pending_mainthread.calls_to_do, 0);
}
_Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 0);
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
}
static inline void
SIGNAL_PENDING_SIGNALS(PyInterpreterState *interp, int force)
{
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
struct _ceval_state *ceval2 = &interp->ceval;
_Py_atomic_store_relaxed(&ceval->signals_pending, 1);
if (force) {
_Py_atomic_store_relaxed(&ceval2->eval_breaker, 1);
}
else {
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
}
}
static inline void
UNSIGNAL_PENDING_SIGNALS(PyInterpreterState *interp)
{
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
struct _ceval_state *ceval2 = &interp->ceval;
_Py_atomic_store_relaxed(&ceval->signals_pending, 0);
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
}
static inline void
SIGNAL_ASYNC_EXC(PyInterpreterState *interp)
{
struct _ceval_state *ceval2 = &interp->ceval;
ceval2->pending.async_exc = 1;
_Py_atomic_store_relaxed(&ceval2->eval_breaker, 1);
}
static inline void
UNSIGNAL_ASYNC_EXC(PyInterpreterState *interp)
{
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
struct _ceval_state *ceval2 = &interp->ceval;
ceval2->pending.async_exc = 0;
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
}
#ifndef NDEBUG
static int
is_tstate_valid(PyThreadState *tstate)
{
assert(!_PyMem_IsPtrFreed(tstate));
assert(!_PyMem_IsPtrFreed(tstate->interp));
return 1;
}
#endif
#include <stdlib.h>
#include <errno.h>
#include "pycore_atomic.h"
#include "condvar.h"
#define MUTEX_INIT(mut) \
if (PyMUTEX_INIT(&(mut))) { \
Py_FatalError("PyMUTEX_INIT(" #mut ") failed"); };
#define MUTEX_FINI(mut) \
if (PyMUTEX_FINI(&(mut))) { \
Py_FatalError("PyMUTEX_FINI(" #mut ") failed"); };
#define MUTEX_LOCK(mut) \
if (PyMUTEX_LOCK(&(mut))) { \
Py_FatalError("PyMUTEX_LOCK(" #mut ") failed"); };
#define MUTEX_UNLOCK(mut) \
if (PyMUTEX_UNLOCK(&(mut))) { \
Py_FatalError("PyMUTEX_UNLOCK(" #mut ") failed"); };
#define COND_INIT(cond) \
if (PyCOND_INIT(&(cond))) { \
Py_FatalError("PyCOND_INIT(" #cond ") failed"); };
#define COND_FINI(cond) \
if (PyCOND_FINI(&(cond))) { \
Py_FatalError("PyCOND_FINI(" #cond ") failed"); };
#define COND_SIGNAL(cond) \
if (PyCOND_SIGNAL(&(cond))) { \
Py_FatalError("PyCOND_SIGNAL(" #cond ") failed"); };
#define COND_WAIT(cond, mut) \
if (PyCOND_WAIT(&(cond), &(mut))) { \
Py_FatalError("PyCOND_WAIT(" #cond ") failed"); };
#define COND_TIMED_WAIT(cond, mut, microseconds, timeout_result) \
{ \
int r = PyCOND_TIMEDWAIT(&(cond), &(mut), (microseconds)); \
if (r < 0) \
Py_FatalError("PyCOND_WAIT(" #cond ") failed"); \
if (r) \
timeout_result = 1; \
else \
timeout_result = 0; \
} \
#define DEFAULT_INTERVAL 5000
static void _gil_initialize(struct _gil_runtime_state *gil)
{
_Py_atomic_int uninitialized = {-1};
gil->locked = uninitialized;
gil->interval = DEFAULT_INTERVAL;
}
static int gil_created(struct _gil_runtime_state *gil)
{
if (gil == NULL) {
return 0;
}
return (_Py_atomic_load_explicit(&gil->locked, _Py_memory_order_acquire) >= 0);
}
static void create_gil(struct _gil_runtime_state *gil)
{
MUTEX_INIT(gil->mutex);
#ifdef FORCE_SWITCHING
MUTEX_INIT(gil->switch_mutex);
#endif
COND_INIT(gil->cond);
#ifdef FORCE_SWITCHING
COND_INIT(gil->switch_cond);
#endif
_Py_atomic_store_relaxed(&gil->last_holder, 0);
_Py_ANNOTATE_RWLOCK_CREATE(&gil->locked);
_Py_atomic_store_explicit(&gil->locked, 0, _Py_memory_order_release);
}
static void destroy_gil(struct _gil_runtime_state *gil)
{
COND_FINI(gil->cond);
MUTEX_FINI(gil->mutex);
#ifdef FORCE_SWITCHING
COND_FINI(gil->switch_cond);
MUTEX_FINI(gil->switch_mutex);
#endif
_Py_atomic_store_explicit(&gil->locked, -1,
_Py_memory_order_release);
_Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
}
#ifdef HAVE_FORK
static void recreate_gil(struct _gil_runtime_state *gil)
{
_Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
create_gil(gil);
}
#endif
static void
drop_gil(struct _ceval_state *ceval, PyThreadState *tstate)
{
struct _gil_runtime_state *gil = ceval->gil;
if (!_Py_atomic_load_relaxed(&gil->locked)) {
Py_FatalError("drop_gil: GIL is not locked");
}
if (tstate != NULL) {
_Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
}
MUTEX_LOCK(gil->mutex);
_Py_ANNOTATE_RWLOCK_RELEASED(&gil->locked, 1);
_Py_atomic_store_relaxed(&gil->locked, 0);
COND_SIGNAL(gil->cond);
MUTEX_UNLOCK(gil->mutex);
#ifdef FORCE_SWITCHING
if (tstate != NULL && _Py_atomic_load_relaxed(&ceval->gil_drop_request)) {
MUTEX_LOCK(gil->switch_mutex);
if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate)
{
assert(is_tstate_valid(tstate));
RESET_GIL_DROP_REQUEST(tstate->interp);
COND_WAIT(gil->switch_cond, gil->switch_mutex);
}
MUTEX_UNLOCK(gil->switch_mutex);
}
#endif
}
static inline int
tstate_must_exit(PyThreadState *tstate)
{
PyThreadState *finalizing = _PyRuntimeState_GetFinalizing(&_PyRuntime);
if (finalizing == NULL) {
finalizing = _PyInterpreterState_GetFinalizing(tstate->interp);
}
return (finalizing != NULL && finalizing != tstate);
}
static void
take_gil(PyThreadState *tstate)
{
int err = errno;
assert(tstate != NULL);
if (tstate_must_exit(tstate)) {
PyThread_exit_thread();
}
assert(is_tstate_valid(tstate));
PyInterpreterState *interp = tstate->interp;
struct _ceval_state *ceval = &interp->ceval;
struct _gil_runtime_state *gil = ceval->gil;
assert(gil_created(gil));
MUTEX_LOCK(gil->mutex);
if (!_Py_atomic_load_relaxed(&gil->locked)) {
goto _ready;
}
int drop_requested = 0;
while (_Py_atomic_load_relaxed(&gil->locked)) {
unsigned long saved_switchnum = gil->switch_number;
unsigned long interval = (gil->interval >= 1 ? gil->interval : 1);
int timed_out = 0;
COND_TIMED_WAIT(gil->cond, gil->mutex, interval, timed_out);
if (timed_out &&
_Py_atomic_load_relaxed(&gil->locked) &&
gil->switch_number == saved_switchnum)
{
if (tstate_must_exit(tstate)) {
MUTEX_UNLOCK(gil->mutex);
if (drop_requested) {
RESET_GIL_DROP_REQUEST(interp);
}
PyThread_exit_thread();
}
assert(is_tstate_valid(tstate));
SET_GIL_DROP_REQUEST(interp);
drop_requested = 1;
}
}
_ready:
#ifdef FORCE_SWITCHING
MUTEX_LOCK(gil->switch_mutex);
#endif
_Py_atomic_store_relaxed(&gil->locked, 1);
_Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, 1);
if (tstate != (PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) {
_Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
++gil->switch_number;
}
#ifdef FORCE_SWITCHING
COND_SIGNAL(gil->switch_cond);
MUTEX_UNLOCK(gil->switch_mutex);
#endif
if (tstate_must_exit(tstate)) {
MUTEX_UNLOCK(gil->mutex);
drop_gil(ceval, tstate);
PyThread_exit_thread();
}
assert(is_tstate_valid(tstate));
if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) {
RESET_GIL_DROP_REQUEST(interp);
}
else {
COMPUTE_EVAL_BREAKER(interp, &_PyRuntime.ceval, ceval);
}
if (tstate->async_exc != NULL) {
_PyEval_SignalAsyncExc(tstate->interp);
}
MUTEX_UNLOCK(gil->mutex);
errno = err;
}
void _PyEval_SetSwitchInterval(unsigned long microseconds)
{
PyInterpreterState *interp = _PyInterpreterState_GET();
struct _gil_runtime_state *gil = interp->ceval.gil;
assert(gil != NULL);
gil->interval = microseconds;
}
unsigned long _PyEval_GetSwitchInterval(void)
{
PyInterpreterState *interp = _PyInterpreterState_GET();
struct _gil_runtime_state *gil = interp->ceval.gil;
assert(gil != NULL);
return gil->interval;
}
int
_PyEval_ThreadsInitialized(void)
{
PyInterpreterState *interp = _PyInterpreterState_Main();
if (interp == NULL) {
return 0;
}
struct _gil_runtime_state *gil = interp->ceval.gil;
return gil_created(gil);
}
PyAPI_FUNC(int)
PyEval_ThreadsInitialized(void)
{
return _PyEval_ThreadsInitialized();
}
static inline int
current_thread_holds_gil(struct _gil_runtime_state *gil, PyThreadState *tstate)
{
if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) != tstate) {
return 0;
}
return _Py_atomic_load_relaxed(&gil->locked);
}
static void
init_shared_gil(PyInterpreterState *interp, struct _gil_runtime_state *gil)
{
assert(gil_created(gil));
interp->ceval.gil = gil;
interp->ceval.own_gil = 0;
}
static void
init_own_gil(PyInterpreterState *interp, struct _gil_runtime_state *gil)
{
assert(!gil_created(gil));
create_gil(gil);
assert(gil_created(gil));
interp->ceval.gil = gil;
interp->ceval.own_gil = 1;
}
PyStatus
_PyEval_InitGIL(PyThreadState *tstate, int own_gil)
{
assert(tstate->interp->ceval.gil == NULL);
int locked;
if (!own_gil) {
PyInterpreterState *main_interp = _PyInterpreterState_Main();
assert(tstate->interp != main_interp);
struct _gil_runtime_state *gil = main_interp->ceval.gil;
init_shared_gil(tstate->interp, gil);
locked = current_thread_holds_gil(gil, tstate);
}
else {
PyThread_init_thread();
init_own_gil(tstate->interp, &tstate->interp->_gil);
locked = 0;
}
if (!locked) {
take_gil(tstate);
}
return _PyStatus_OK();
}
void
_PyEval_FiniGIL(PyInterpreterState *interp)
{
struct _gil_runtime_state *gil = interp->ceval.gil;
if (gil == NULL) {
assert(!interp->ceval.own_gil);
return;
}
else if (!interp->ceval.own_gil) {
#ifdef Py_DEBUG
PyInterpreterState *main_interp = _PyInterpreterState_Main();
assert(main_interp != NULL && interp != main_interp);
assert(interp->ceval.gil == main_interp->ceval.gil);
#endif
interp->ceval.gil = NULL;
return;
}
if (!gil_created(gil)) {
return;
}
destroy_gil(gil);
assert(!gil_created(gil));
interp->ceval.gil = NULL;
}
PyAPI_FUNC(void)
PyEval_InitThreads(void)
{
}
void
_PyEval_Fini(void)
{
#ifdef Py_STATS
_Py_PrintSpecializationStats(1);
#endif
}
PyAPI_FUNC(void)
PyEval_AcquireLock(void)
{
PyThreadState *tstate = _PyThreadState_GET();
_Py_EnsureTstateNotNULL(tstate);
take_gil(tstate);
}
PyAPI_FUNC(void)
PyEval_ReleaseLock(void)
{
PyThreadState *tstate = _PyThreadState_GET();
struct _ceval_state *ceval = &tstate->interp->ceval;
drop_gil(ceval, tstate);
}
void
_PyEval_AcquireLock(PyThreadState *tstate)
{
_Py_EnsureTstateNotNULL(tstate);
take_gil(tstate);
}
void
_PyEval_ReleaseLock(PyInterpreterState *interp, PyThreadState *tstate)
{
assert(tstate == NULL || tstate->interp == interp);
struct _ceval_state *ceval = &interp->ceval;
drop_gil(ceval, tstate);
}
void
PyEval_AcquireThread(PyThreadState *tstate)
{
_Py_EnsureTstateNotNULL(tstate);
take_gil(tstate);
if (_PyThreadState_SwapNoGIL(tstate) != NULL) {
Py_FatalError("non-NULL old thread state");
}
}
void
PyEval_ReleaseThread(PyThreadState *tstate)
{
assert(is_tstate_valid(tstate));
PyThreadState *new_tstate = _PyThreadState_SwapNoGIL(NULL);
if (new_tstate != tstate) {
Py_FatalError("wrong thread state");
}
struct _ceval_state *ceval = &tstate->interp->ceval;
drop_gil(ceval, tstate);
}
#ifdef HAVE_FORK
PyStatus
_PyEval_ReInitThreads(PyThreadState *tstate)
{
assert(tstate->interp == _PyInterpreterState_Main());
struct _gil_runtime_state *gil = tstate->interp->ceval.gil;
if (!gil_created(gil)) {
return _PyStatus_OK();
}
recreate_gil(gil);
take_gil(tstate);
struct _pending_calls *pending = &tstate->interp->ceval.pending;
if (_PyThread_at_fork_reinit(&pending->lock) < 0) {
return _PyStatus_ERR("Can't reinitialize pending calls lock");
}
_PyThreadState_DeleteExcept(tstate);
return _PyStatus_OK();
}
#endif
void
_PyEval_SignalAsyncExc(PyInterpreterState *interp)
{
SIGNAL_ASYNC_EXC(interp);
}
PyThreadState *
PyEval_SaveThread(void)
{
PyThreadState *tstate = _PyThreadState_SwapNoGIL(NULL);
_Py_EnsureTstateNotNULL(tstate);
struct _ceval_state *ceval = &tstate->interp->ceval;
assert(gil_created(ceval->gil));
drop_gil(ceval, tstate);
return tstate;
}
void
PyEval_RestoreThread(PyThreadState *tstate)
{
_Py_EnsureTstateNotNULL(tstate);
take_gil(tstate);
_PyThreadState_SwapNoGIL(tstate);
}
void
_PyEval_SignalReceived(PyInterpreterState *interp)
{
#ifdef MS_WINDOWS
int force = 1;
#else
int force = 0;
#endif
SIGNAL_PENDING_SIGNALS(interp, force);
}
static int
_push_pending_call(struct _pending_calls *pending,
int (*func)(void *), void *arg)
{
int i = pending->last;
int j = (i + 1) % NPENDINGCALLS;
if (j == pending->first) {
return -1;
}
pending->calls[i].func = func;
pending->calls[i].arg = arg;
pending->last = j;
return 0;
}
static int
_next_pending_call(struct _pending_calls *pending,
int (**func)(void *), void **arg)
{
int i = pending->first;
if (i == pending->last) {
assert(pending->calls[i].func == NULL);
return -1;
}
*func = pending->calls[i].func;
*arg = pending->calls[i].arg;
return i;
}
static void
_pop_pending_call(struct _pending_calls *pending,
int (**func)(void *), void **arg)
{
int i = _next_pending_call(pending, func, arg);
if (i >= 0) {
pending->calls[i] = (struct _pending_call){0};
pending->first = (i + 1) % NPENDINGCALLS;
}
}
int
_PyEval_AddPendingCall(PyInterpreterState *interp,
int (*func)(void *), void *arg,
int mainthreadonly)
{
assert(!mainthreadonly || _Py_IsMainInterpreter(interp));
struct _pending_calls *pending = &interp->ceval.pending;
if (mainthreadonly) {
assert(_Py_IsMainInterpreter(interp));
pending = &_PyRuntime.ceval.pending_mainthread;
}
assert(pending->lock != NULL);
PyThread_acquire_lock(pending->lock, WAIT_LOCK);
int result = _push_pending_call(pending, func, arg);
PyThread_release_lock(pending->lock);
SIGNAL_PENDING_CALLS(pending, interp);
return result;
}
int
Py_AddPendingCall(int (*func)(void *), void *arg)
{
PyInterpreterState *interp = _PyInterpreterState_Main();
return _PyEval_AddPendingCall(interp, func, arg, 1);
}
static int
handle_signals(PyThreadState *tstate)
{
assert(is_tstate_valid(tstate));
if (!_Py_ThreadCanHandleSignals(tstate->interp)) {
return 0;
}
UNSIGNAL_PENDING_SIGNALS(tstate->interp);
if (_PyErr_CheckSignalsTstate(tstate) < 0) {
SIGNAL_PENDING_SIGNALS(tstate->interp, 0);
return -1;
}
return 0;
}
static inline int
maybe_has_pending_calls(PyInterpreterState *interp)
{
struct _pending_calls *pending = &interp->ceval.pending;
if (_Py_atomic_load_relaxed_int32(&pending->calls_to_do)) {
return 1;
}
if (!_Py_IsMainThread() || !_Py_IsMainInterpreter(interp)) {
return 0;
}
pending = &_PyRuntime.ceval.pending_mainthread;
return _Py_atomic_load_relaxed_int32(&pending->calls_to_do);
}
static int
_make_pending_calls(struct _pending_calls *pending)
{
for (int i=0; i<NPENDINGCALLS; i++) {
int (*func)(void *) = NULL;
void *arg = NULL;
PyThread_acquire_lock(pending->lock, WAIT_LOCK);
_pop_pending_call(pending, &func, &arg);
PyThread_release_lock(pending->lock);
if (func == NULL) {
break;
}
if (func(arg) != 0) {
return -1;
}
}
return 0;
}
static int
make_pending_calls(PyInterpreterState *interp)
{
struct _pending_calls *pending = &interp->ceval.pending;
struct _pending_calls *pending_main = &_PyRuntime.ceval.pending_mainthread;
PyThread_acquire_lock(pending->lock, WAIT_LOCK);
if (pending->busy) {
PyThread_release_lock(pending->lock);
return 0;
}
pending->busy = 1;
PyThread_release_lock(pending->lock);
UNSIGNAL_PENDING_CALLS(interp);
if (_make_pending_calls(pending) != 0) {
pending->busy = 0;
SIGNAL_PENDING_CALLS(pending, interp);
return -1;
}
if (_Py_IsMainThread() && _Py_IsMainInterpreter(interp)) {
if (_make_pending_calls(pending_main) != 0) {
pending->busy = 0;
SIGNAL_PENDING_CALLS(pending_main, interp);
return -1;
}
}
pending->busy = 0;
return 0;
}
void
_Py_FinishPendingCalls(PyThreadState *tstate)
{
assert(PyGILState_Check());
assert(is_tstate_valid(tstate));
if (make_pending_calls(tstate->interp) < 0) {
PyObject *exc = _PyErr_GetRaisedException(tstate);
PyErr_BadInternalCall();
_PyErr_ChainExceptions1(exc);
_PyErr_Print(tstate);
}
}
int
_PyEval_MakePendingCalls(PyThreadState *tstate)
{
int res;
if (_Py_IsMainThread() && _Py_IsMainInterpreter(tstate->interp)) {
res = handle_signals(tstate);
if (res != 0) {
return res;
}
}
res = make_pending_calls(tstate->interp);
if (res != 0) {
return res;
}
return 0;
}
int
Py_MakePendingCalls(void)
{
assert(PyGILState_Check());
PyThreadState *tstate = _PyThreadState_GET();
assert(is_tstate_valid(tstate));
if (!_Py_IsMainThread() || !_Py_IsMainInterpreter(tstate->interp)) {
return 0;
}
return _PyEval_MakePendingCalls(tstate);
}
void
_PyEval_InitState(PyInterpreterState *interp, PyThread_type_lock pending_lock)
{
_gil_initialize(&interp->_gil);
struct _pending_calls *pending = &interp->ceval.pending;
assert(pending->lock == NULL);
pending->lock = pending_lock;
}
void
_PyEval_FiniState(struct _ceval_state *ceval)
{
struct _pending_calls *pending = &ceval->pending;
if (pending->lock != NULL) {
PyThread_free_lock(pending->lock);
pending->lock = NULL;
}
}
int
_Py_HandlePending(PyThreadState *tstate)
{
_PyRuntimeState * const runtime = &_PyRuntime;
struct _ceval_runtime_state *ceval = &runtime->ceval;
struct _ceval_state *interp_ceval_state = &tstate->interp->ceval;
if (_Py_atomic_load_relaxed_int32(&ceval->signals_pending)) {
if (handle_signals(tstate) != 0) {
return -1;
}
}
if (maybe_has_pending_calls(tstate->interp)) {
if (make_pending_calls(tstate->interp) != 0) {
return -1;
}
}
if (_Py_atomic_load_relaxed_int32(&interp_ceval_state->gc_scheduled)) {
_Py_atomic_store_relaxed(&interp_ceval_state->gc_scheduled, 0);
COMPUTE_EVAL_BREAKER(tstate->interp, ceval, interp_ceval_state);
_Py_RunGC(tstate);
}
if (_Py_atomic_load_relaxed_int32(&interp_ceval_state->gil_drop_request)) {
if (_PyThreadState_SwapNoGIL(NULL) != tstate) {
Py_FatalError("tstate mix-up");
}
drop_gil(interp_ceval_state, tstate);
take_gil(tstate);
if (_PyThreadState_SwapNoGIL(tstate) != NULL) {
Py_FatalError("orphan tstate");
}
}
if (tstate->async_exc != NULL) {
PyObject *exc = tstate->async_exc;
tstate->async_exc = NULL;
UNSIGNAL_ASYNC_EXC(tstate->interp);
_PyErr_SetNone(tstate, exc);
Py_DECREF(exc);
return -1;
}
COMPUTE_EVAL_BREAKER(tstate->interp, ceval, interp_ceval_state);
return 0;
}